diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-19 10:40:30 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-19 10:40:30 -0700 |
| commit | bf61759db409ce21a8f2a5bb442b7c35905a713d (patch) | |
| tree | 41477119a61d32ad1a1d1e6e05b1608753d7f4c7 /kernel | |
| parent | Merge tag 'cgroup-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/k... (diff) | |
| parent | sched_ext: idle: Handle migration-disabled tasks in idle selection (diff) | |
| download | linux-bf61759db409ce21a8f2a5bb442b7c35905a713d.tar.gz linux-bf61759db409ce21a8f2a5bb442b7c35905a713d.zip | |
Merge tag 'sched_ext-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext
Pull sched_ext fixes from Tejun Heo:
- Fix handling of migration disabled tasks in default idle selection
- update_locked_rq() called __this_cpu_write() spuriously with NULL
when @rq was not locked. As the writes were spurious, it didn't break
anything directly. However, the function could be called in a
preemptible leading to a context warning in __this_cpu_write(). Skip
the spurious NULL writes.
- Selftest fix on UP
* tag 'sched_ext-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext:
sched_ext: idle: Handle migration-disabled tasks in idle selection
sched/ext: Prevent update_locked_rq() calls with NULL rq
selftests/sched_ext: Fix exit selftest hang on UP
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/ext.c | 12 | ||||
| -rw-r--r-- | kernel/sched/ext_idle.c | 2 |
2 files changed, 9 insertions, 5 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b498d867ba21..7dd5cbcb7a06 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void) #define SCX_CALL_OP(sch, mask, op, rq, args...) \ do { \ - update_locked_rq(rq); \ + if (rq) \ + update_locked_rq(rq); \ if (mask) { \ scx_kf_allow(mask); \ (sch)->ops.op(args); \ @@ -1280,14 +1281,16 @@ do { \ } else { \ (sch)->ops.op(args); \ } \ - update_locked_rq(NULL); \ + if (rq) \ + update_locked_rq(NULL); \ } while (0) #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ ({ \ __typeof__((sch)->ops.op(args)) __ret; \ \ - update_locked_rq(rq); \ + if (rq) \ + update_locked_rq(rq); \ if (mask) { \ scx_kf_allow(mask); \ __ret = (sch)->ops.op(args); \ @@ -1295,7 +1298,8 @@ do { \ } else { \ __ret = (sch)->ops.op(args); \ } \ - update_locked_rq(NULL); \ + if (rq) \ + update_locked_rq(NULL); \ __ret; \ }) diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 6d29d3cbc670..001fb88a8481 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -903,7 +903,7 @@ s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags, * selection optimizations and simply check whether the previously * used CPU is idle and within the allowed cpumask. */ - if (p->nr_cpus_allowed == 1) { + if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) { if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) && scx_idle_test_and_clear_cpu(prev_cpu)) cpu = prev_cpu; |
