aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-12-18 16:46:07 -0800
committerJakub Kicinski <kuba@kernel.org>2023-12-18 16:46:08 -0800
commitc49b292d031e385abf764ded32cd953c77e73f2d (patch)
tree3f13748b32a3c273c5315286a3acaf45447a5437 /tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
parentMerge tag 'wireless-next-2023-12-18' of git://git.kernel.org/pub/scm/linux/ke... (diff)
parentbpf: Ensure precise is reset to false in __mark_reg_const_zero() (diff)
downloadlinux-c49b292d031e385abf764ded32cd953c77e73f2d.tar.gz
linux-c49b292d031e385abf764ded32cd953c77e73f2d.zip
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2023-12-18 This PR is larger than usual and contains changes in various parts of the kernel. The main changes are: 1) Fix kCFI bugs in BPF, from Peter Zijlstra. End result: all forms of indirect calls from BPF into kernel and from kernel into BPF work with CFI enabled. This allows BPF to work with CONFIG_FINEIBT=y. 2) Introduce BPF token object, from Andrii Nakryiko. It adds an ability to delegate a subset of BPF features from privileged daemon (e.g., systemd) through special mount options for userns-bound BPF FS to a trusted unprivileged application. The design accommodates suggestions from Christian Brauner and Paul Moore. Example: $ sudo mkdir -p /sys/fs/bpf/token $ sudo mount -t bpf bpffs /sys/fs/bpf/token \ -o delegate_cmds=prog_load:MAP_CREATE \ -o delegate_progs=kprobe \ -o delegate_attachs=xdp 3) Various verifier improvements and fixes, from Andrii Nakryiko, Andrei Matei. - Complete precision tracking support for register spills - Fix verification of possibly-zero-sized stack accesses - Fix access to uninit stack slots - Track aligned STACK_ZERO cases as imprecise spilled registers. It improves the verifier "instructions processed" metric from single digit to 50-60% for some programs. - Fix verifier retval logic 4) Support for VLAN tag in XDP hints, from Larysa Zaremba. 5) Allocate BPF trampoline via bpf_prog_pack mechanism, from Song Liu. End result: better memory utilization and lower I$ miss for calls to BPF via BPF trampoline. 6) Fix race between BPF prog accessing inner map and parallel delete, from Hou Tao. 7) Add bpf_xdp_get_xfrm_state() kfunc, from Daniel Xu. It allows BPF interact with IPSEC infra. The intent is to support software RSS (via XDP) for the upcoming ipsec pcpu work. Experiments on AWS demonstrate single tunnel pcpu ipsec reaching line rate on 100G ENA nics. 8) Expand bpf_cgrp_storage to support cgroup1 non-attach, from Yafang Shao. 9) BPF file verification via fsverity, from Song Liu. It allows BPF progs get fsverity digest. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (164 commits) bpf: Ensure precise is reset to false in __mark_reg_const_zero() selftests/bpf: Add more uprobe multi fail tests bpf: Fail uprobe multi link with negative offset selftests/bpf: Test the release of map btf s390/bpf: Fix indirect trampoline generation selftests/bpf: Temporarily disable dummy_struct_ops test on s390 x86/cfi,bpf: Fix bpf_exception_cb() signature bpf: Fix dtor CFI cfi: Add CFI_NOSEAL() x86/cfi,bpf: Fix bpf_struct_ops CFI x86/cfi,bpf: Fix bpf_callback_t CFI x86/cfi,bpf: Fix BPF JIT call cfi: Flip headers selftests/bpf: Add test for abnormal cnt during multi-kprobe attachment selftests/bpf: Don't use libbpf_get_error() in kprobe_multi_test selftests/bpf: Add test for abnormal cnt during multi-uprobe attachment bpf: Limit the number of kprobes when attaching program to multiple kprobes bpf: Limit the number of uprobes when attaching program to multiple uprobes bpf: xdp: Register generic_kfunc_set with XDP programs selftests/bpf: utilize string values for delegate_xxx mount options ... ==================== Link: https://lore.kernel.org/r/20231219000520.34178-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c98
1 files changed, 97 insertions, 1 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
index 63e776f4176e..747761572098 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
@@ -19,6 +19,21 @@ struct socket_cookie {
__u64 cookie_value;
};
+static bool is_cgroup1;
+static int target_hid;
+
+#define CGROUP_MODE_SET(skel) \
+{ \
+ skel->bss->is_cgroup1 = is_cgroup1; \
+ skel->bss->target_hid = target_hid; \
+}
+
+static void cgroup_mode_value_init(bool cgroup, int hid)
+{
+ is_cgroup1 = cgroup;
+ target_hid = hid;
+}
+
static void test_tp_btf(int cgroup_fd)
{
struct cgrp_ls_tp_btf *skel;
@@ -29,6 +44,8 @@ static void test_tp_btf(int cgroup_fd)
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
+ CGROUP_MODE_SET(skel);
+
/* populate a value in map_b */
err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
if (!ASSERT_OK(err, "map_update_elem"))
@@ -130,6 +147,8 @@ static void test_recursion(int cgroup_fd)
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
+ CGROUP_MODE_SET(skel);
+
err = cgrp_ls_recursion__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
@@ -165,6 +184,8 @@ static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ CGROUP_MODE_SET(skel);
+
bpf_program__set_autoload(skel->progs.cgroup_iter, true);
err = cgrp_ls_sleepable__load(skel);
if (!ASSERT_OK(err, "skel_load"))
@@ -202,6 +223,7 @@ static void test_yes_rcu_lock(__u64 cgroup_id)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ CGROUP_MODE_SET(skel);
skel->bss->target_pid = syscall(SYS_gettid);
bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
@@ -229,6 +251,8 @@ static void test_no_rcu_lock(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ CGROUP_MODE_SET(skel);
+
bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
err = cgrp_ls_sleepable__load(skel);
ASSERT_ERR(err, "skel_load");
@@ -236,7 +260,25 @@ static void test_no_rcu_lock(void)
cgrp_ls_sleepable__destroy(skel);
}
-void test_cgrp_local_storage(void)
+static void test_cgrp1_no_rcu_lock(void)
+{
+ struct cgrp_ls_sleepable *skel;
+ int err;
+
+ skel = cgrp_ls_sleepable__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+
+ bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
+ err = cgrp_ls_sleepable__load(skel);
+ ASSERT_OK(err, "skel_load");
+
+ cgrp_ls_sleepable__destroy(skel);
+}
+
+static void cgrp2_local_storage(void)
{
__u64 cgroup_id;
int cgroup_fd;
@@ -245,6 +287,8 @@ void test_cgrp_local_storage(void)
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
return;
+ cgroup_mode_value_init(0, -1);
+
cgroup_id = get_cgroup_id("/cgrp_local_storage");
if (test__start_subtest("tp_btf"))
test_tp_btf(cgroup_fd);
@@ -263,3 +307,55 @@ void test_cgrp_local_storage(void)
close(cgroup_fd);
}
+
+static void cgrp1_local_storage(void)
+{
+ int cgrp1_fd, cgrp1_hid, cgrp1_id, err;
+
+ /* Setup cgroup1 hierarchy */
+ err = setup_classid_environment();
+ if (!ASSERT_OK(err, "setup_classid_environment"))
+ return;
+
+ err = join_classid();
+ if (!ASSERT_OK(err, "join_cgroup1"))
+ goto cleanup;
+
+ cgrp1_fd = open_classid();
+ if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
+ goto cleanup;
+
+ cgrp1_id = get_classid_cgroup_id();
+ if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
+ goto close_fd;
+
+ cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
+ if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
+ goto close_fd;
+
+ cgroup_mode_value_init(1, cgrp1_hid);
+
+ if (test__start_subtest("cgrp1_tp_btf"))
+ test_tp_btf(cgrp1_fd);
+ if (test__start_subtest("cgrp1_recursion"))
+ test_recursion(cgrp1_fd);
+ if (test__start_subtest("cgrp1_negative"))
+ test_negative();
+ if (test__start_subtest("cgrp1_iter_sleepable"))
+ test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
+ if (test__start_subtest("cgrp1_yes_rcu_lock"))
+ test_yes_rcu_lock(cgrp1_id);
+ if (test__start_subtest("cgrp1_no_rcu_lock"))
+ test_cgrp1_no_rcu_lock();
+
+close_fd:
+ close(cgrp1_fd);
+cleanup:
+ cleanup_classid_environment();
+}
+
+void test_cgrp_local_storage(void)
+{
+ cgrp2_local_storage();
+ cgrp1_local_storage();
+}