aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/prog_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c115
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_link.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c133
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_create.c348
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_post_bind.c426
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subskeleton.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c120
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_kfunc.c80
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c278
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c25
11 files changed, 1554 insertions, 65 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
new file mode 100644
index 000000000000..848d8fc9171f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+#include "kmem_cache_iter.skel.h"
+
+#define SLAB_NAME_MAX 32
+
+struct kmem_cache_result {
+ char name[SLAB_NAME_MAX];
+ long obj_size;
+};
+
+static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .flags = 0, /* Run it with the current task */
+ );
+ int prog_fd = bpf_program__fd(skel->progs.check_task_struct);
+
+ /* Get task_struct and check it if's from a slab cache */
+ ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run");
+
+ /* The BPF program should set 'found' variable */
+ ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found");
+}
+
+static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
+{
+ FILE *fp;
+ int map_fd;
+ char name[SLAB_NAME_MAX];
+ unsigned long objsize;
+ char rest_of_line[1000];
+ struct kmem_cache_result r;
+ int seen = 0;
+
+ fp = fopen("/proc/slabinfo", "r");
+ if (fp == NULL) {
+ /* CONFIG_SLUB_DEBUG is not enabled */
+ return;
+ }
+
+ map_fd = bpf_map__fd(skel->maps.slab_result);
+
+ /* Ignore first two lines for header */
+ fscanf(fp, "slabinfo - version: %*d.%*d\n");
+ fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line);
+
+ /* Compare name and objsize only - others can be changes frequently */
+ while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n",
+ name, &objsize, rest_of_line) == 3) {
+ int ret = bpf_map_lookup_elem(map_fd, &seen, &r);
+
+ if (!ASSERT_OK(ret, "kmem_cache_lookup"))
+ break;
+
+ ASSERT_STREQ(r.name, name, "kmem_cache_name");
+ ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize");
+
+ seen++;
+ }
+
+ ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq");
+
+ fclose(fp);
+}
+
+void test_kmem_cache_iter(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ struct kmem_cache_iter *skel = NULL;
+ union bpf_iter_link_info linfo = {};
+ struct bpf_link *link;
+ char buf[256];
+ int iter_fd;
+
+ skel = kmem_cache_iter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
+ return;
+
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto destroy;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "iter_create"))
+ goto free_link;
+
+ memset(buf, 0, sizeof(buf));
+ while (read(iter_fd, buf, sizeof(buf) > 0)) {
+ /* Read out all contents */
+ printf("%s", buf);
+ }
+
+ /* Next reads should return 0 */
+ ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
+
+ if (test__start_subtest("check_task_struct"))
+ subtest_kmem_cache_iter_check_task_struct(skel);
+ if (test__start_subtest("check_slabinfo"))
+ subtest_kmem_cache_iter_check_slabinfo(skel);
+
+ close(iter_fd);
+
+free_link:
+ bpf_link__destroy(link);
+destroy:
+ kmem_cache_iter__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
index 27676a04d0b6..169ce689b97c 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_buf.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -169,7 +169,6 @@ static void bpf_prog_load_log_buf(void)
ASSERT_GE(fd, 0, "good_fd1");
if (fd >= 0)
close(fd);
- fd = -1;
/* log_level == 2 should always fill log_buf, even for good prog */
log_buf[0] = '\0';
@@ -180,7 +179,6 @@ static void bpf_prog_load_log_buf(void)
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);
- fd = -1;
/* log_level == 0 should fill log_buf for bad prog */
log_buf[0] = '\0';
@@ -191,7 +189,6 @@ static void bpf_prog_load_log_buf(void)
ASSERT_LT(fd, 0, "bad_fd");
if (fd >= 0)
close(fd);
- fd = -1;
free(log_buf);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
index 3a25f1c743a1..d940ff87fa08 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
@@ -4,8 +4,12 @@
#include <pthread.h>
#include <sched.h>
#include <test_progs.h>
+#include "testing_helpers.h"
#include "test_perf_link.skel.h"
+#define BURN_TIMEOUT_MS 100
+#define BURN_TIMEOUT_NS BURN_TIMEOUT_MS * 1000000
+
static void burn_cpu(void)
{
volatile int j = 0;
@@ -32,6 +36,7 @@ void serial_test_perf_link(void)
int run_cnt_before, run_cnt_after;
struct bpf_link_info info;
__u32 info_len = sizeof(info);
+ __u64 timeout_time_ns;
/* create perf event */
memset(&attr, 0, sizeof(attr));
@@ -63,8 +68,14 @@ void serial_test_perf_link(void)
ASSERT_GT(info.prog_id, 0, "link_prog_id");
/* ensure we get at least one perf_event prog execution */
- burn_cpu();
- ASSERT_GT(skel->bss->run_cnt, 0, "run_cnt");
+ timeout_time_ns = get_time_ns() + BURN_TIMEOUT_NS;
+ while (true) {
+ burn_cpu();
+ if (skel->bss->run_cnt > 0)
+ break;
+ if (!ASSERT_LT(get_time_ns(), timeout_time_ns, "run_cnt_timeout"))
+ break;
+ }
/* perf_event is still active, but we close link and BPF program
* shouldn't be executed anymore
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 6cc69900b310..1aed94ec14ef 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -8,17 +8,25 @@ static int sigusr1_received;
static void sigusr1_handler(int signum)
{
- sigusr1_received = 1;
+ sigusr1_received = 8;
+}
+
+static void sigusr1_siginfo_handler(int s, siginfo_t *i, void *v)
+{
+ sigusr1_received = (int)(long long)i->si_value.sival_ptr;
}
static void test_send_signal_common(struct perf_event_attr *attr,
- bool signal_thread)
+ bool signal_thread, bool remote)
{
struct test_send_signal_kern *skel;
+ struct sigaction sa;
int pipe_c2p[2], pipe_p2c[2];
int err = -1, pmu_fd = -1;
+ volatile int j = 0;
char buf[256];
pid_t pid;
+ int old_prio;
if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p"))
return;
@@ -39,11 +47,14 @@ static void test_send_signal_common(struct perf_event_attr *attr,
}
if (pid == 0) {
- int old_prio;
- volatile int j = 0;
-
/* install signal handler and notify parent */
- ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");
+ if (remote) {
+ sa.sa_sigaction = sigusr1_siginfo_handler;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ ASSERT_NEQ(sigaction(SIGUSR1, &sa, NULL), -1, "sigaction");
+ } else {
+ ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");
+ }
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
@@ -52,10 +63,12 @@ static void test_send_signal_common(struct perf_event_attr *attr,
* that if an interrupt happens, the underlying task
* is this process.
*/
- errno = 0;
- old_prio = getpriority(PRIO_PROCESS, 0);
- ASSERT_OK(errno, "getpriority");
- ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+ if (!remote) {
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+ }
/* notify parent signal handler is installed */
ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
@@ -66,20 +79,25 @@ static void test_send_signal_common(struct perf_event_attr *attr,
/* wait a little for signal handler */
for (int i = 0; i < 1000000000 && !sigusr1_received; i++) {
j /= i + j + 1;
- if (!attr)
- /* trigger the nanosleep tracepoint program. */
- usleep(1);
+ if (remote)
+ sleep(1);
+ else
+ if (!attr)
+ /* trigger the nanosleep tracepoint program. */
+ usleep(1);
}
- buf[0] = sigusr1_received ? '2' : '0';
- ASSERT_EQ(sigusr1_received, 1, "sigusr1_received");
+ buf[0] = sigusr1_received;
+
+ ASSERT_EQ(sigusr1_received, 8, "sigusr1_received");
ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* wait for parent notification and exit */
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* restore the old priority */
- ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
+ if (!remote)
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
close(pipe_c2p[1]);
close(pipe_p2c[0]);
@@ -93,6 +111,17 @@ static void test_send_signal_common(struct perf_event_attr *attr,
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
goto skel_open_load_failure;
+ /* boost with a high priority so we got a higher chance
+ * that if an interrupt happens, the underlying task
+ * is this process.
+ */
+ if (remote) {
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+ }
+
if (!attr) {
err = test_send_signal_kern__attach(skel);
if (!ASSERT_OK(err, "skel_attach")) {
@@ -100,8 +129,12 @@ static void test_send_signal_common(struct perf_event_attr *attr,
goto destroy_skel;
}
} else {
- pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,
- -1 /* group id */, 0 /* flags */);
+ if (!remote)
+ pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,
+ -1 /* group id */, 0 /* flags */);
+ else
+ pmu_fd = syscall(__NR_perf_event_open, attr, getpid(), -1 /* cpu */,
+ -1 /* group id */, 0 /* flags */);
if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {
err = -1;
goto destroy_skel;
@@ -119,11 +152,30 @@ static void test_send_signal_common(struct perf_event_attr *attr,
/* trigger the bpf send_signal */
skel->bss->signal_thread = signal_thread;
skel->bss->sig = SIGUSR1;
- skel->bss->pid = pid;
+ if (!remote) {
+ skel->bss->target_pid = 0;
+ skel->bss->pid = pid;
+ } else {
+ skel->bss->target_pid = pid;
+ skel->bss->pid = getpid();
+ }
/* notify child that bpf program can send_signal now */
ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
+ /* For the remote test, the BPF program is triggered from this
+ * process but the other process/thread is signaled.
+ */
+ if (remote) {
+ if (!attr) {
+ for (int i = 0; i < 10; i++)
+ usleep(1);
+ } else {
+ for (int i = 0; i < 100000000; i++)
+ j /= i + 1;
+ }
+ }
+
/* wait for result */
err = read(pipe_c2p[0], buf, 1);
if (!ASSERT_GE(err, 0, "reading pipe"))
@@ -133,7 +185,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
goto disable_pmu;
}
- ASSERT_EQ(buf[0], '2', "incorrect result");
+ ASSERT_EQ(buf[0], 8, "incorrect result");
/* notify child safe to exit */
ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
@@ -142,18 +194,21 @@ disable_pmu:
close(pmu_fd);
destroy_skel:
test_send_signal_kern__destroy(skel);
+ /* restore the old priority */
+ if (remote)
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
skel_open_load_failure:
close(pipe_c2p[0]);
close(pipe_p2c[1]);
wait(NULL);
}
-static void test_send_signal_tracepoint(bool signal_thread)
+static void test_send_signal_tracepoint(bool signal_thread, bool remote)
{
- test_send_signal_common(NULL, signal_thread);
+ test_send_signal_common(NULL, signal_thread, remote);
}
-static void test_send_signal_perf(bool signal_thread)
+static void test_send_signal_perf(bool signal_thread, bool remote)
{
struct perf_event_attr attr = {
.freq = 1,
@@ -162,10 +217,10 @@ static void test_send_signal_perf(bool signal_thread)
.config = PERF_COUNT_SW_CPU_CLOCK,
};
- test_send_signal_common(&attr, signal_thread);
+ test_send_signal_common(&attr, signal_thread, remote);
}
-static void test_send_signal_nmi(bool signal_thread)
+static void test_send_signal_nmi(bool signal_thread, bool remote)
{
struct perf_event_attr attr = {
.sample_period = 1,
@@ -191,21 +246,35 @@ static void test_send_signal_nmi(bool signal_thread)
close(pmu_fd);
}
- test_send_signal_common(&attr, signal_thread);
+ test_send_signal_common(&attr, signal_thread, remote);
}
void test_send_signal(void)
{
if (test__start_subtest("send_signal_tracepoint"))
- test_send_signal_tracepoint(false);
+ test_send_signal_tracepoint(false, false);
if (test__start_subtest("send_signal_perf"))
- test_send_signal_perf(false);
+ test_send_signal_perf(false, false);
if (test__start_subtest("send_signal_nmi"))
- test_send_signal_nmi(false);
+ test_send_signal_nmi(false, false);
if (test__start_subtest("send_signal_tracepoint_thread"))
- test_send_signal_tracepoint(true);
+ test_send_signal_tracepoint(true, false);
if (test__start_subtest("send_signal_perf_thread"))
- test_send_signal_perf(true);
+ test_send_signal_perf(true, false);
if (test__start_subtest("send_signal_nmi_thread"))
- test_send_signal_nmi(true);
+ test_send_signal_nmi(true, false);
+
+ /* Signal remote thread and thread group */
+ if (test__start_subtest("send_signal_tracepoint_remote"))
+ test_send_signal_tracepoint(false, true);
+ if (test__start_subtest("send_signal_perf_remote"))
+ test_send_signal_perf(false, true);
+ if (test__start_subtest("send_signal_nmi_remote"))
+ test_send_signal_nmi(false, true);
+ if (test__start_subtest("send_signal_tracepoint_thread_remote"))
+ test_send_signal_tracepoint(true, true);
+ if (test__start_subtest("send_signal_perf_thread_remote"))
+ test_send_signal_perf(true, true);
+ if (test__start_subtest("send_signal_nmi_thread_remote"))
+ test_send_signal_nmi(true, true);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_create.c b/tools/testing/selftests/bpf/prog_tests/sock_create.c
new file mode 100644
index 000000000000..187ffc5e60c4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_create.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+static char bpf_log_buf[4096];
+static bool verbose;
+
+enum sock_create_test_error {
+ OK = 0,
+ DENY_CREATE,
+};
+
+static struct sock_create_test {
+ const char *descr;
+ const struct bpf_insn insns[64];
+ enum bpf_attach_type attach_type;
+ enum bpf_attach_type expected_attach_type;
+
+ int domain;
+ int type;
+ int protocol;
+
+ int optname;
+ int optval;
+ enum sock_create_test_error error;
+} tests[] = {
+ {
+ .descr = "AF_INET set priority",
+ .insns = {
+ /* r3 = 123 (priority) */
+ BPF_MOV64_IMM(BPF_REG_3, 123),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, priority)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_PRIORITY,
+ .optval = 123,
+ },
+ {
+ .descr = "AF_INET6 set priority",
+ .insns = {
+ /* r3 = 123 (priority) */
+ BPF_MOV64_IMM(BPF_REG_3, 123),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, priority)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_PRIORITY,
+ .optval = 123,
+ },
+ {
+ .descr = "AF_INET set mark",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* get uid of process */
+ BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff),
+
+ /* if uid is 0, use given mark(666), else use uid as the mark */
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 666),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_MARK,
+ .optval = 666,
+ },
+ {
+ .descr = "AF_INET6 set mark",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* get uid of process */
+ BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff),
+
+ /* if uid is 0, use given mark(666), else use uid as the mark */
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 666),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_MARK,
+ .optval = 666,
+ },
+ {
+ .descr = "AF_INET bound to iface",
+ .insns = {
+ /* r3 = 1 (lo interface) */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, bound_dev_if)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_BINDTOIFINDEX,
+ .optval = 1,
+ },
+ {
+ .descr = "AF_INET6 bound to iface",
+ .insns = {
+ /* r3 = 1 (lo interface) */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, bound_dev_if)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_BINDTOIFINDEX,
+ .optval = 1,
+ },
+ {
+ .descr = "block AF_INET, SOCK_DGRAM, IPPROTO_ICMP socket",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
+
+ /* sock->family == AF_INET */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, family)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET, 5),
+
+ /* sock->type == SOCK_DGRAM */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, type)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3),
+
+ /* sock->protocol == IPPROTO_ICMP */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, protocol)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMP, 1),
+
+ /* return 0 (block) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMP,
+
+ .error = DENY_CREATE,
+ },
+ {
+ .descr = "block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 socket",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
+
+ /* sock->family == AF_INET6 */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, family)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET6, 5),
+
+ /* sock->type == SOCK_DGRAM */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, type)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3),
+
+ /* sock->protocol == IPPROTO_ICMPV6 */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, protocol)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMPV6, 1),
+
+ /* return 0 (block) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMPV6,
+
+ .error = DENY_CREATE,
+ },
+ {
+ .descr = "load w/o expected_attach_type (compat mode)",
+ .insns = {
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = 0,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ },
+};
+
+static int load_prog(const struct bpf_insn *insns,
+ enum bpf_attach_type expected_attach_type)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = expected_attach_type,
+ .log_level = 2,
+ .log_buf = bpf_log_buf,
+ .log_size = sizeof(bpf_log_buf),
+ );
+ int fd, insns_cnt = 0;
+
+ for (;
+ insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
+ insns_cnt++) {
+ }
+ insns_cnt++;
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns,
+ insns_cnt, &opts);
+ if (verbose && fd < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return fd;
+}
+
+static int run_test(int cgroup_fd, struct sock_create_test *test)
+{
+ int sock_fd, err, prog_fd, optval, ret = -1;
+ socklen_t optlen = sizeof(optval);
+
+ prog_fd = load_prog(test->insns, test->expected_attach_type);
+ if (prog_fd < 0) {
+ log_err("Failed to load BPF program");
+ return -1;
+ }
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
+ if (err < 0) {
+ log_err("Failed to attach BPF program");
+ goto close_prog_fd;
+ }
+
+ sock_fd = socket(test->domain, test->type, test->protocol);
+ if (sock_fd < 0) {
+ if (test->error == DENY_CREATE)
+ ret = 0;
+ else
+ log_err("Failed to create socket");
+
+ goto detach_prog;
+ }
+
+ if (test->optname) {
+ err = getsockopt(sock_fd, SOL_SOCKET, test->optname, &optval, &optlen);
+ if (err) {
+ log_err("Failed to call getsockopt");
+ goto cleanup;
+ }
+
+ if (optval != test->optval) {
+ errno = 0;
+ log_err("getsockopt returned unexpected optval");
+ goto cleanup;
+ }
+ }
+
+ ret = test->error != OK;
+
+cleanup:
+ close(sock_fd);
+detach_prog:
+ bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type);
+close_prog_fd:
+ close(prog_fd);
+ return ret;
+}
+
+void test_sock_create(void)
+{
+ int cgroup_fd, i;
+
+ cgroup_fd = test__join_cgroup("/sock_create");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!test__start_subtest(tests[i].descr))
+ continue;
+
+ ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
+ }
+
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c
new file mode 100644
index 000000000000..788135c9c673
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+#define TEST_NS "sock_post_bind"
+
+static char bpf_log_buf[4096];
+
+static struct sock_post_bind_test {
+ const char *descr;
+ /* BPF prog properties */
+ const struct bpf_insn insns[64];
+ enum bpf_attach_type attach_type;
+ enum bpf_attach_type expected_attach_type;
+ /* Socket properties */
+ int domain;
+ int type;
+ /* Endpoint to bind() to */
+ const char *ip;
+ unsigned short port;
+ unsigned short port_retry;
+
+ /* Expected test result */
+ enum {
+ ATTACH_REJECT,
+ BIND_REJECT,
+ SUCCESS,
+ RETRY_SUCCESS,
+ RETRY_REJECT
+ } result;
+} tests[] = {
+ {
+ .descr = "attach type mismatch bind4 vs bind6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "attach type mismatch bind6 vs bind4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "attach type mismatch default vs bind4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = 0,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "attach type mismatch bind6 vs sock_create",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "bind4 reject all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "0.0.0.0",
+ .result = BIND_REJECT,
+ },
+ {
+ .descr = "bind6 reject all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::",
+ .result = BIND_REJECT,
+ },
+ {
+ .descr = "bind6 deny specific IP & port",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip6[3])),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x00000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::1",
+ .port = 8193,
+ .result = BIND_REJECT,
+ },
+ {
+ .descr = "bind4 allow specific IP & port",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "127.0.0.1",
+ .port = 4098,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "bind4 deny specific IP & port of TCP, and retry",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "127.0.0.1",
+ .port = 4098,
+ .port_retry = 5000,
+ .result = RETRY_SUCCESS,
+ },
+ {
+ .descr = "bind4 deny specific IP & port of UDP, and retry",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ .ip = "127.0.0.1",
+ .port = 4098,
+ .port_retry = 5000,
+ .result = RETRY_SUCCESS,
+ },
+ {
+ .descr = "bind6 deny specific IP & port, and retry",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip6[3])),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x00000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::1",
+ .port = 8193,
+ .port_retry = 9000,
+ .result = RETRY_SUCCESS,
+ },
+ {
+ .descr = "bind4 allow all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "0.0.0.0",
+ .result = SUCCESS,
+ },
+ {
+ .descr = "bind6 allow all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::",
+ .result = SUCCESS,
+ },
+};
+
+static int load_prog(const struct bpf_insn *insns,
+ enum bpf_attach_type expected_attach_type)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = expected_attach_type,
+ .log_level = 2,
+ .log_buf = bpf_log_buf,
+ .log_size = sizeof(bpf_log_buf),
+ );
+ int fd, insns_cnt = 0;
+
+ for (;
+ insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
+ insns_cnt++) {
+ }
+ insns_cnt++;
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns,
+ insns_cnt, &opts);
+ if (fd < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return fd;
+}
+
+static int bind_sock(int domain, int type, const char *ip,
+ unsigned short port, unsigned short port_retry)
+{
+ struct sockaddr_storage addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr_in *addr4;
+ int sockfd = -1;
+ socklen_t len;
+ int res = SUCCESS;
+
+ sockfd = socket(domain, type, 0);
+ if (sockfd < 0)
+ goto err;
+
+ memset(&addr, 0, sizeof(addr));
+
+ if (domain == AF_INET) {
+ len = sizeof(struct sockaddr_in);
+ addr4 = (struct sockaddr_in *)&addr;
+ addr4->sin_family = domain;
+ addr4->sin_port = htons(port);
+ if (inet_pton(domain, ip, (void *)&addr4->sin_addr) != 1)
+ goto err;
+ } else if (domain == AF_INET6) {
+ len = sizeof(struct sockaddr_in6);
+ addr6 = (struct sockaddr_in6 *)&addr;
+ addr6->sin6_family = domain;
+ addr6->sin6_port = htons(port);
+ if (inet_pton(domain, ip, (void *)&addr6->sin6_addr) != 1)
+ goto err;
+ } else {
+ goto err;
+ }
+
+ if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) {
+ /* sys_bind() may fail for different reasons, errno has to be
+ * checked to confirm that BPF program rejected it.
+ */
+ if (errno != EPERM)
+ goto err;
+ if (port_retry)
+ goto retry;
+ res = BIND_REJECT;
+ goto out;
+ }
+
+ goto out;
+retry:
+ if (domain == AF_INET)
+ addr4->sin_port = htons(port_retry);
+ else
+ addr6->sin6_port = htons(port_retry);
+ if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) {
+ if (errno != EPERM)
+ goto err;
+ res = RETRY_REJECT;
+ } else {
+ res = RETRY_SUCCESS;
+ }
+ goto out;
+err:
+ res = -1;
+out:
+ close(sockfd);
+ return res;
+}
+
+static int run_test(int cgroup_fd, struct sock_post_bind_test *test)
+{
+ int err, prog_fd, res, ret = 0;
+
+ prog_fd = load_prog(test->insns, test->expected_attach_type);
+ if (prog_fd < 0)
+ goto err;
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
+ if (err < 0) {
+ if (test->result == ATTACH_REJECT)
+ goto out;
+ else
+ goto err;
+ }
+
+ res = bind_sock(test->domain, test->type, test->ip, test->port,
+ test->port_retry);
+ if (res > 0 && test->result == res)
+ goto out;
+err:
+ ret = -1;
+out:
+ /* Detaching w/o checking return code: best effort attempt. */
+ if (prog_fd != -1)
+ bpf_prog_detach(cgroup_fd, test->attach_type);
+ close(prog_fd);
+ return ret;
+}
+
+void test_sock_post_bind(void)
+{
+ struct netns_obj *ns;
+ int cgroup_fd;
+ int i;
+
+ cgroup_fd = test__join_cgroup("/post_bind");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup"))
+ return;
+
+ ns = netns_new(TEST_NS, true);
+ if (!ASSERT_OK_PTR(ns, "netns_new"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!test__start_subtest(tests[i].descr))
+ continue;
+
+ ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
+ }
+
+cleanup:
+ netns_free(ns);
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c
index 9c31b7004f9c..fdf13ed0152a 100644
--- a/tools/testing/selftests/bpf/prog_tests/subskeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c
@@ -46,7 +46,8 @@ static int subskeleton_lib_subresult(struct bpf_object *obj)
return result;
}
-void test_subskeleton(void)
+/* initialize and load through skeleton, then instantiate subskeleton out of it */
+static void subtest_skel_subskeleton(void)
{
int err, result;
struct test_subskeleton *skel;
@@ -76,3 +77,76 @@ void test_subskeleton(void)
cleanup:
test_subskeleton__destroy(skel);
}
+
+/* initialize and load through generic bpf_object API, then instantiate subskeleton out of it */
+static void subtest_obj_subskeleton(void)
+{
+ int err, result;
+ const void *elf_bytes;
+ size_t elf_bytes_sz = 0, rodata_sz = 0, bss_sz = 0;
+ struct bpf_object *obj;
+ const struct bpf_map *map;
+ const struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+ struct test_subskeleton__rodata *rodata;
+ struct test_subskeleton__bss *bss;
+
+ elf_bytes = test_subskeleton__elf_bytes(&elf_bytes_sz);
+ if (!ASSERT_OK_PTR(elf_bytes, "elf_bytes"))
+ return;
+
+ obj = bpf_object__open_mem(elf_bytes, elf_bytes_sz, NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
+ return;
+
+ map = bpf_object__find_map_by_name(obj, ".rodata");
+ if (!ASSERT_OK_PTR(map, "rodata_map_by_name"))
+ goto cleanup;
+
+ rodata = bpf_map__initial_value(map, &rodata_sz);
+ if (!ASSERT_OK_PTR(rodata, "rodata_get"))
+ goto cleanup;
+
+ rodata->rovar1 = 10;
+ rodata->var1 = 1;
+ subskeleton_lib_setup(obj);
+
+ err = bpf_object__load(obj);
+ if (!ASSERT_OK(err, "obj_load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(obj, "handler1");
+ if (!ASSERT_OK_PTR(prog, "prog_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ map = bpf_object__find_map_by_name(obj, ".bss");
+ if (!ASSERT_OK_PTR(map, "bss_map_by_name"))
+ goto cleanup;
+
+ bss = bpf_map__initial_value(map, &bss_sz);
+ if (!ASSERT_OK_PTR(rodata, "rodata_get"))
+ goto cleanup;
+
+ result = subskeleton_lib_subresult(obj) * 10;
+ ASSERT_EQ(bss->out1, result, "out1");
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+}
+
+
+void test_subskeleton(void)
+{
+ if (test__start_subtest("skel_subskel"))
+ subtest_skel_subskeleton();
+ if (test__start_subtest("obj_subskel"))
+ subtest_obj_subskeleton();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 21c5a37846ad..40f22454cf05 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1496,8 +1496,8 @@ static void test_tailcall_bpf2bpf_hierarchy_3(void)
RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
}
-/* test_tailcall_freplace checks that the attached freplace prog is OK to
- * update the prog_array map.
+/* test_tailcall_freplace checks that the freplace prog fails to update the
+ * prog_array map, no matter whether the freplace prog attaches to its target.
*/
static void test_tailcall_freplace(void)
{
@@ -1505,7 +1505,7 @@ static void test_tailcall_freplace(void)
struct bpf_link *freplace_link = NULL;
struct bpf_program *freplace_prog;
struct tc_bpf2bpf *tc_skel = NULL;
- int prog_fd, map_fd;
+ int prog_fd, tc_prog_fd, map_fd;
char buff[128] = {};
int err, key;
@@ -1523,9 +1523,10 @@ static void test_tailcall_freplace(void)
if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
goto out;
- prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
freplace_prog = freplace_skel->progs.entry_freplace;
- err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog");
+ err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
+ "subprog_tc");
if (!ASSERT_OK(err, "set_attach_target"))
goto out;
@@ -1533,27 +1534,116 @@ static void test_tailcall_freplace(void)
if (!ASSERT_OK(err, "tailcall_freplace__load"))
goto out;
- freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd,
- "subprog");
+ map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
+ prog_fd = bpf_program__fd(freplace_prog);
+ key = 0;
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "update jmp_table failure");
+
+ freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
+ "subprog_tc");
if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
goto out;
- map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
- prog_fd = bpf_program__fd(freplace_prog);
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "update jmp_table failure");
+
+out:
+ bpf_link__destroy(freplace_link);
+ tailcall_freplace__destroy(freplace_skel);
+ tc_bpf2bpf__destroy(tc_skel);
+}
+
+/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
+ * callee prog with freplace prog or fails to update an extended prog to
+ * prog_array map.
+ */
+static void test_tailcall_bpf2bpf_freplace(void)
+{
+ struct tailcall_freplace *freplace_skel = NULL;
+ struct bpf_link *freplace_link = NULL;
+ struct tc_bpf2bpf *tc_skel = NULL;
+ char buff[128] = {};
+ int prog_fd, map_fd;
+ int err, key;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ tc_skel = tc_bpf2bpf__open_and_load();
+ if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ freplace_skel = tailcall_freplace__open();
+ if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
+ goto out;
+
+ err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_OK(err, "set_attach_target"))
+ goto out;
+
+ err = tailcall_freplace__load(freplace_skel);
+ if (!ASSERT_OK(err, "tailcall_freplace__load"))
+ goto out;
+
+ /* OK to attach then detach freplace prog. */
+
+ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ goto out;
+
+ err = bpf_link__destroy(freplace_link);
+ if (!ASSERT_OK(err, "destroy link"))
+ goto out;
+
+ /* OK to update prog_array map then delete element from the map. */
+
key = 0;
+ map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
if (!ASSERT_OK(err, "update jmp_table"))
goto out;
- prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
- err = bpf_prog_test_run_opts(prog_fd, &topts);
- ASSERT_OK(err, "test_run");
- ASSERT_EQ(topts.retval, 34, "test_run retval");
+ err = bpf_map_delete_elem(map_fd, &key);
+ if (!ASSERT_OK(err, "delete_elem from jmp_table"))
+ goto out;
+
+ /* Fail to attach a tail callee prog with freplace prog. */
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
+ goto out;
+
+ err = bpf_map_delete_elem(map_fd, &key);
+ if (!ASSERT_OK(err, "delete_elem from jmp_table"))
+ goto out;
+
+ /* Fail to update an extended prog to prog_array map. */
+
+ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (!ASSERT_ERR(err, "update jmp_table failure"))
+ goto out;
out:
bpf_link__destroy(freplace_link);
- tc_bpf2bpf__destroy(tc_skel);
tailcall_freplace__destroy(freplace_skel);
+ tc_bpf2bpf__destroy(tc_skel);
}
void test_tailcalls(void)
@@ -1606,4 +1696,6 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_hierarchy_3();
if (test__start_subtest("tailcall_freplace"))
test_tailcall_freplace();
+ if (test__start_subtest("tailcall_bpf2bpf_freplace"))
+ test_tailcall_bpf2bpf_freplace();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
index d4579f735398..83b90335967a 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
@@ -68,6 +68,74 @@ cleanup:
task_kfunc_success__destroy(skel);
}
+static int run_vpid_test(void *prog_name)
+{
+ struct task_kfunc_success *skel;
+ struct bpf_program *prog;
+ int prog_fd, err = 0;
+
+ if (getpid() != 1)
+ return 1;
+
+ skel = open_load_task_kfunc_skel();
+ if (!skel)
+ return 2;
+
+ if (skel->bss->err) {
+ err = 3;
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!prog) {
+ err = 4;
+ goto cleanup;
+ }
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ err = 5;
+ goto cleanup;
+ }
+
+ if (bpf_prog_test_run_opts(prog_fd, NULL)) {
+ err = 6;
+ goto cleanup;
+ }
+
+ if (skel->bss->err)
+ err = 7 + skel->bss->err;
+cleanup:
+ task_kfunc_success__destroy(skel);
+ return err;
+}
+
+static void run_vpid_success_test(const char *prog_name)
+{
+ const int stack_size = 1024 * 1024;
+ int child_pid, wstatus;
+ char *stack;
+
+ stack = (char *)malloc(stack_size);
+ if (!ASSERT_OK_PTR(stack, "clone_stack"))
+ return;
+
+ child_pid = clone(run_vpid_test, stack + stack_size,
+ CLONE_NEWPID | SIGCHLD, (void *)prog_name);
+ if (!ASSERT_GT(child_pid, -1, "child_pid"))
+ goto cleanup;
+
+ if (!ASSERT_GT(waitpid(child_pid, &wstatus, 0), -1, "waitpid"))
+ goto cleanup;
+
+ if (WEXITSTATUS(wstatus) > 7)
+ ASSERT_OK(WEXITSTATUS(wstatus) - 7, "vpid_test_failure");
+ else
+ ASSERT_OK(WEXITSTATUS(wstatus), "run_vpid_test_err");
+cleanup:
+ free(stack);
+}
+
static const char * const success_tests[] = {
"test_task_acquire_release_argument",
"test_task_acquire_release_current",
@@ -83,6 +151,11 @@ static const char * const success_tests[] = {
"test_task_kfunc_flavor_relo_not_found",
};
+static const char * const vpid_success_tests[] = {
+ "test_task_from_vpid_current",
+ "test_task_from_vpid_invalid",
+};
+
void test_task_kfunc(void)
{
int i;
@@ -94,5 +167,12 @@ void test_task_kfunc(void)
run_success_test(success_tests[i]);
}
+ for (i = 0; i < ARRAY_SIZE(vpid_success_tests); i++) {
+ if (!test__start_subtest(vpid_success_tests[i]))
+ continue;
+
+ run_vpid_success_test(vpid_success_tests[i]);
+ }
+
RUN_TESTS(task_kfunc_failure);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index c33c05161a9e..00cc9d0aee5d 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -7,12 +7,20 @@
#include <pthread.h>
#include <sys/syscall.h> /* For SYS_xxx definitions */
#include <sys/types.h>
+#include <sys/eventfd.h>
+#include <sys/mman.h>
#include <test_progs.h>
+#include <bpf/btf.h>
#include "task_local_storage_helpers.h"
#include "task_local_storage.skel.h"
#include "task_local_storage_exit_creds.skel.h"
#include "task_ls_recursion.skel.h"
#include "task_storage_nodeadlock.skel.h"
+#include "uptr_test_common.h"
+#include "task_ls_uptr.skel.h"
+#include "uptr_update_failure.skel.h"
+#include "uptr_failure.skel.h"
+#include "uptr_map_failure.skel.h"
static void test_sys_enter_exit(void)
{
@@ -227,6 +235,259 @@ done:
sched_setaffinity(getpid(), sizeof(old), &old);
}
+static struct user_data udata __attribute__((aligned(16))) = {
+ .a = 1,
+ .b = 2,
+};
+
+static struct user_data udata2 __attribute__((aligned(16))) = {
+ .a = 3,
+ .b = 4,
+};
+
+static void check_udata2(int expected)
+{
+ udata2.result = udata2.nested_result = 0;
+ usleep(1);
+ ASSERT_EQ(udata2.result, expected, "udata2.result");
+ ASSERT_EQ(udata2.nested_result, expected, "udata2.nested_result");
+}
+
+static void test_uptr_basic(void)
+{
+ int map_fd, parent_task_fd, ev_fd;
+ struct value_type value = {};
+ struct task_ls_uptr *skel;
+ pid_t child_pid, my_tid;
+ __u64 ev_dummy_data = 1;
+ int err;
+
+ my_tid = syscall(SYS_gettid);
+ parent_task_fd = sys_pidfd_open(my_tid, 0);
+ if (!ASSERT_OK_FD(parent_task_fd, "parent_task_fd"))
+ return;
+
+ ev_fd = eventfd(0, 0);
+ if (!ASSERT_OK_FD(ev_fd, "ev_fd")) {
+ close(parent_task_fd);
+ return;
+ }
+
+ skel = task_ls_uptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.datamap);
+ value.udata = &udata;
+ value.nested.udata = &udata;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "update_elem(udata)"))
+ goto out;
+
+ err = task_ls_uptr__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ child_pid = fork();
+ if (!ASSERT_NEQ(child_pid, -1, "fork"))
+ goto out;
+
+ /* Call syscall in the child process, but access the map value of
+ * the parent process in the BPF program to check if the user kptr
+ * is translated/mapped correctly.
+ */
+ if (child_pid == 0) {
+ /* child */
+
+ /* Overwrite the user_data in the child process to check if
+ * the BPF program accesses the user_data of the parent.
+ */
+ udata.a = 0;
+ udata.b = 0;
+
+ /* Wait for the parent to set child_pid */
+ read(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data));
+ exit(0);
+ }
+
+ skel->bss->parent_pid = my_tid;
+ skel->bss->target_pid = child_pid;
+
+ write(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data));
+
+ err = waitpid(child_pid, NULL, 0);
+ ASSERT_EQ(err, child_pid, "waitpid");
+ ASSERT_EQ(udata.result, MAGIC_VALUE + udata.a + udata.b, "udata.result");
+ ASSERT_EQ(udata.nested_result, MAGIC_VALUE + udata.a + udata.b, "udata.nested_result");
+
+ skel->bss->target_pid = my_tid;
+
+ /* update_elem: uptr changes from udata1 to udata2 */
+ value.udata = &udata2;
+ value.nested.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST);
+ if (!ASSERT_OK(err, "update_elem(udata2)"))
+ goto out;
+ check_udata2(MAGIC_VALUE + udata2.a + udata2.b);
+
+ /* update_elem: uptr changes from udata2 uptr to NULL */
+ memset(&value, 0, sizeof(value));
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST);
+ if (!ASSERT_OK(err, "update_elem(udata2)"))
+ goto out;
+ check_udata2(0);
+
+ /* update_elem: uptr changes from NULL to udata2 */
+ value.udata = &udata2;
+ value.nested.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST);
+ if (!ASSERT_OK(err, "update_elem(udata2)"))
+ goto out;
+ check_udata2(MAGIC_VALUE + udata2.a + udata2.b);
+
+ /* Check if user programs can access the value of user kptrs
+ * through bpf_map_lookup_elem(). Make sure the kernel value is not
+ * leaked.
+ */
+ err = bpf_map_lookup_elem(map_fd, &parent_task_fd, &value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ goto out;
+ ASSERT_EQ(value.udata, NULL, "value.udata");
+ ASSERT_EQ(value.nested.udata, NULL, "value.nested.udata");
+
+ /* delete_elem */
+ err = bpf_map_delete_elem(map_fd, &parent_task_fd);
+ ASSERT_OK(err, "delete_elem(udata2)");
+ check_udata2(0);
+
+ /* update_elem: add uptr back to test map_free */
+ value.udata = &udata2;
+ value.nested.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST);
+ ASSERT_OK(err, "update_elem(udata2)");
+
+out:
+ task_ls_uptr__destroy(skel);
+ close(ev_fd);
+ close(parent_task_fd);
+}
+
+static void test_uptr_across_pages(void)
+{
+ int page_size = getpagesize();
+ struct value_type value = {};
+ struct task_ls_uptr *skel;
+ int err, task_fd, map_fd;
+ void *mem;
+
+ task_fd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_OK_FD(task_fd, "task_fd"))
+ return;
+
+ mem = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (!ASSERT_OK_PTR(mem, "mmap(page_size * 2)")) {
+ close(task_fd);
+ return;
+ }
+
+ skel = task_ls_uptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.datamap);
+ value.udata = mem + page_size - offsetof(struct user_data, b);
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, 0);
+ if (!ASSERT_ERR(err, "update_elem(udata)"))
+ goto out;
+ ASSERT_EQ(errno, EOPNOTSUPP, "errno");
+
+ value.udata = mem + page_size - sizeof(struct user_data);
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, 0);
+ ASSERT_OK(err, "update_elem(udata)");
+
+out:
+ task_ls_uptr__destroy(skel);
+ close(task_fd);
+ munmap(mem, page_size * 2);
+}
+
+static void test_uptr_update_failure(void)
+{
+ struct value_lock_type value = {};
+ struct uptr_update_failure *skel;
+ int err, task_fd, map_fd;
+
+ task_fd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_OK_FD(task_fd, "task_fd"))
+ return;
+
+ skel = uptr_update_failure__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.datamap);
+
+ value.udata = &udata;
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_F_LOCK);
+ if (!ASSERT_ERR(err, "update_elem(udata, BPF_F_LOCK)"))
+ goto out;
+ ASSERT_EQ(errno, EOPNOTSUPP, "errno");
+
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_EXIST);
+ if (!ASSERT_ERR(err, "update_elem(udata, BPF_EXIST)"))
+ goto out;
+ ASSERT_EQ(errno, ENOENT, "errno");
+
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "update_elem(udata, BPF_NOEXIST)"))
+ goto out;
+
+ value.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST);
+ if (!ASSERT_ERR(err, "update_elem(udata2, BPF_NOEXIST)"))
+ goto out;
+ ASSERT_EQ(errno, EEXIST, "errno");
+
+out:
+ uptr_update_failure__destroy(skel);
+ close(task_fd);
+}
+
+static void test_uptr_map_failure(const char *map_name, int expected_errno)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, create_attr);
+ struct uptr_map_failure *skel;
+ struct bpf_map *map;
+ struct btf *btf;
+ int map_fd, err;
+
+ skel = uptr_map_failure__open();
+ if (!ASSERT_OK_PTR(skel, "uptr_map_failure__open"))
+ return;
+
+ map = bpf_object__find_map_by_name(skel->obj, map_name);
+ btf = bpf_object__btf(skel->obj);
+ err = btf__load_into_kernel(btf);
+ if (!ASSERT_OK(err, "btf__load_into_kernel"))
+ goto done;
+
+ create_attr.map_flags = bpf_map__map_flags(map);
+ create_attr.btf_fd = btf__fd(btf);
+ create_attr.btf_key_type_id = bpf_map__btf_key_type_id(map);
+ create_attr.btf_value_type_id = bpf_map__btf_value_type_id(map);
+ map_fd = bpf_map_create(bpf_map__type(map), map_name,
+ bpf_map__key_size(map), bpf_map__value_size(map),
+ 0, &create_attr);
+ if (ASSERT_ERR_FD(map_fd, "map_create"))
+ ASSERT_EQ(errno, expected_errno, "errno");
+ else
+ close(map_fd);
+
+done:
+ uptr_map_failure__destroy(skel);
+}
+
void test_task_local_storage(void)
{
if (test__start_subtest("sys_enter_exit"))
@@ -237,4 +498,21 @@ void test_task_local_storage(void)
test_recursion();
if (test__start_subtest("nodeadlock"))
test_nodeadlock();
+ if (test__start_subtest("uptr_basic"))
+ test_uptr_basic();
+ if (test__start_subtest("uptr_across_pages"))
+ test_uptr_across_pages();
+ if (test__start_subtest("uptr_update_failure"))
+ test_uptr_update_failure();
+ if (test__start_subtest("uptr_map_failure_e2big")) {
+ if (getpagesize() == PAGE_SIZE)
+ test_uptr_map_failure("large_uptr_map", E2BIG);
+ else
+ test__skip();
+ }
+ if (test__start_subtest("uptr_map_failure_size0"))
+ test_uptr_map_failure("empty_uptr_map", EINVAL);
+ if (test__start_subtest("uptr_map_failure_kstruct"))
+ test_uptr_map_failure("kstruct_uptr_map", EINVAL);
+ RUN_TESTS(uptr_failure);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 844f6fc8487b..2c39902b8a09 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -836,10 +836,10 @@ uprobe_consumer_test(struct uprobe_multi_consumers *skel,
return 0;
}
-static void consumer_test(struct uprobe_multi_consumers *skel,
- unsigned long before, unsigned long after)
+static int consumer_test(struct uprobe_multi_consumers *skel,
+ unsigned long before, unsigned long after)
{
- int err, idx;
+ int err, idx, ret = -1;
printf("consumer_test before %lu after %lu\n", before, after);
@@ -869,32 +869,29 @@ static void consumer_test(struct uprobe_multi_consumers *skel,
fmt = "prog 0/1: uprobe";
} else {
/*
- * uprobe return is tricky ;-)
- *
* to trigger uretprobe consumer, the uretprobe needs to be installed,
* which means one of the 'return' uprobes was alive when probe was hit:
*
* idxs: 2/3 uprobe return in 'installed' mask
- *
- * in addition if 'after' state removes everything that was installed in
- * 'before' state, then uprobe kernel object goes away and return uprobe
- * is not installed and we won't hit it even if it's in 'after' state.
*/
unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */
- unsigned long probe_preserved = before & after; /* did uprobe go away */
- if (had_uretprobes && probe_preserved && test_bit(idx, after))
+ if (had_uretprobes && test_bit(idx, after))
val++;
fmt = "idx 2/3: uretprobe";
}
- ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt);
+ if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
+ goto cleanup;
skel->bss->uprobe_result[idx] = 0;
}
+ ret = 0;
+
cleanup:
for (idx = 0; idx < 4; idx++)
uprobe_detach(skel, idx);
+ return ret;
}
static void test_consumers(void)
@@ -946,9 +943,11 @@ static void test_consumers(void)
for (before = 0; before < 16; before++) {
for (after = 0; after < 16; after++)
- consumer_test(skel, before, after);
+ if (consumer_test(skel, before, after))
+ goto out;
}
+out:
uprobe_multi_consumers__destroy(skel);
}