1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <sched.h>
#include <pthread.h>
#include <test_progs.h>
#include "timer_start_delete_race.skel.h"
/*
* Test for race between bpf_timer_start() and map element deletion.
*
* The race scenario:
* - CPU 1: bpf_timer_start() proceeds to bpf_async_process() and is about
* to call hrtimer_start() but hasn't yet
* - CPU 2: map_delete_elem() calls __bpf_async_cancel_and_free(), since
* timer is not scheduled yet hrtimer_try_to_cancel() is a nop,
* then calls bpf_async_refcount_put() dropping refcnt to zero
* and scheduling call_rcu_tasks_trace()
* - CPU 1: continues and calls hrtimer_start()
* - After RCU tasks trace grace period: memory is freed
* - Timer callback fires on freed memory: UAF!
*
* This test stresses this race by having two threads:
* - Thread 1: repeatedly starts timers
* - Thread 2: repeatedly deletes map elements
*
* KASAN should detect use-after-free.
*/
#define ITERATIONS 1000
struct ctx {
struct timer_start_delete_race *skel;
volatile bool start;
volatile bool stop;
int errors;
};
static void *start_timer_thread(void *arg)
{
struct ctx *ctx = arg;
cpu_set_t cpuset;
int fd, i;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
while (!ctx->start && !ctx->stop)
usleep(1);
if (ctx->stop)
return NULL;
fd = bpf_program__fd(ctx->skel->progs.start_timer);
for (i = 0; i < ITERATIONS && !ctx->stop; i++) {
LIBBPF_OPTS(bpf_test_run_opts, opts);
int err;
err = bpf_prog_test_run_opts(fd, &opts);
if (err || opts.retval) {
ctx->errors++;
break;
}
}
return NULL;
}
static void *delete_elem_thread(void *arg)
{
struct ctx *ctx = arg;
cpu_set_t cpuset;
int fd, i;
CPU_ZERO(&cpuset);
CPU_SET(1, &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
while (!ctx->start && !ctx->stop)
usleep(1);
if (ctx->stop)
return NULL;
fd = bpf_program__fd(ctx->skel->progs.delete_elem);
for (i = 0; i < ITERATIONS && !ctx->stop; i++) {
LIBBPF_OPTS(bpf_test_run_opts, opts);
int err;
err = bpf_prog_test_run_opts(fd, &opts);
if (err || opts.retval) {
ctx->errors++;
break;
}
}
return NULL;
}
void test_timer_start_delete_race(void)
{
struct timer_start_delete_race *skel;
pthread_t threads[2];
struct ctx ctx = {};
int err;
skel = timer_start_delete_race__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
ctx.skel = skel;
err = pthread_create(&threads[0], NULL, start_timer_thread, &ctx);
if (!ASSERT_OK(err, "create start_timer_thread")) {
ctx.stop = true;
goto cleanup;
}
err = pthread_create(&threads[1], NULL, delete_elem_thread, &ctx);
if (!ASSERT_OK(err, "create delete_elem_thread")) {
ctx.stop = true;
pthread_join(threads[0], NULL);
goto cleanup;
}
ctx.start = true;
pthread_join(threads[0], NULL);
pthread_join(threads[1], NULL);
ASSERT_EQ(ctx.errors, 0, "thread_errors");
/* Either KASAN will catch UAF or kernel will crash or nothing happens */
cleanup:
timer_start_delete_race__destroy(skel);
}
|