aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-04-12 12:48:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-04-12 12:48:10 -0700
commitb676ac484f847bbe5c7d29603f41475b64fefe55 (patch)
treef986e0d55d3068ca4552ecca0a425f4d5db9a820 /tools/testing/selftests/bpf
parentMerge tag 'pwm/for-6.15-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kern... (diff)
parentbpf: Convert ringbuf map to rqspinlock (diff)
downloadlinux-b676ac484f847bbe5c7d29603f41475b64fefe55.tar.gz
linux-b676ac484f847bbe5c7d29603f41475b64fefe55.zip
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Followup fixes for resilient spinlock (Kumar Kartikeya Dwivedi): - Make res_spin_lock test less verbose, since it was spamming BPF CI on failure, and make the check for AA deadlock stronger - Fix rebasing mistake and use architecture provided res_smp_cond_load_acquire - Convert BPF maps (queue_stack and ringbuf) to resilient spinlock to address long standing syzbot reports - Make sure that classic BPF load instruction from SKF_[NET|LL]_OFF offsets works when skb is fragmeneted (Willem de Bruijn) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Convert ringbuf map to rqspinlock bpf: Convert queue_stack map to rqspinlock bpf: Use architecture provided res_smp_cond_load_acquire selftests/bpf: Make res_spin_lock AA test condition stronger selftests/net: test sk_filter support for SKF_NET_OFF on frags bpf: support SKF_NET_OFF and SKF_LL_OFF on skb frags selftests/bpf: Make res_spin_lock test less verbose
Diffstat (limited to 'tools/testing/selftests/bpf')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/res_spin_lock.c7
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock.c10
2 files changed, 12 insertions, 5 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
index 115287ba441b..0703e987df89 100644
--- a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
@@ -25,8 +25,11 @@ static void *spin_lock_thread(void *arg)
while (!READ_ONCE(skip)) {
err = bpf_prog_test_run_opts(prog_fd, &topts);
- ASSERT_OK(err, "test_run");
- ASSERT_OK(topts.retval, "test_run retval");
+ if (err || topts.retval) {
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+ break;
+ }
}
pthread_exit(arg);
}
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock.c b/tools/testing/selftests/bpf/progs/res_spin_lock.c
index b33385dfbd35..22c4fb8b9266 100644
--- a/tools/testing/selftests/bpf/progs/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock.c
@@ -38,13 +38,14 @@ int res_spin_lock_test(struct __sk_buff *ctx)
r = bpf_res_spin_lock(&elem1->lock);
if (r)
return r;
- if (!bpf_res_spin_lock(&elem2->lock)) {
+ r = bpf_res_spin_lock(&elem2->lock);
+ if (!r) {
bpf_res_spin_unlock(&elem2->lock);
bpf_res_spin_unlock(&elem1->lock);
return -1;
}
bpf_res_spin_unlock(&elem1->lock);
- return 0;
+ return r != -EDEADLK;
}
SEC("tc")
@@ -124,12 +125,15 @@ int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
/* Trigger AA, after exhausting entries in the held lock table. This
* time, only the timeout can save us, as AA detection won't succeed.
*/
- if (!bpf_res_spin_lock(locks[34])) {
+ ret = bpf_res_spin_lock(locks[34]);
+ if (!ret) {
bpf_res_spin_unlock(locks[34]);
ret = 1;
goto end;
}
+ ret = ret != -ETIMEDOUT ? 2 : 0;
+
end:
for (i = i - 1; i >= 0; i--)
bpf_res_spin_unlock(locks[i]);