diff options
| author | Tejun Heo <tj@kernel.org> | 2024-07-12 08:20:33 -1000 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2024-07-12 08:20:33 -1000 |
| commit | f47a818950dd5e5d16eb6e9c1713bb0bc61649cd (patch) | |
| tree | fcd6043b68c4db74531942a7e5a79d94028a64f5 /kernel/sched/ext.c | |
| parent | sched_ext: Unpin and repin rq lock from balance_scx() (diff) | |
| download | linux-f47a818950dd5e5d16eb6e9c1713bb0bc61649cd.tar.gz linux-f47a818950dd5e5d16eb6e9c1713bb0bc61649cd.zip | |
sched_ext: s/SCX_RQ_BALANCING/SCX_RQ_IN_BALANCE/ and add SCX_RQ_IN_WAKEUP
SCX_RQ_BALANCING is used to mark that the rq is currently in balance().
Rename it to SCX_RQ_IN_BALANCE and add SCX_RQ_IN_WAKEUP which marks whether
the rq is currently enqueueing for a wakeup. This will be used to implement
direct dispatching to local DSQ of another CPU.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Vernet <void@manifault.com>
Diffstat (limited to 'kernel/sched/ext.c')
| -rw-r--r-- | kernel/sched/ext.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index d4f801cd2548..57d6ea65f857 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1827,6 +1827,9 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags { int sticky_cpu = p->scx.sticky_cpu; + if (enq_flags & ENQUEUE_WAKEUP) + rq->scx.flags |= SCX_RQ_IN_WAKEUP; + enq_flags |= rq->scx.extra_enq_flags; if (sticky_cpu >= 0) @@ -1843,7 +1846,7 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags if (p->scx.flags & SCX_TASK_QUEUED) { WARN_ON_ONCE(!task_runnable(p)); - return; + goto out; } set_task_runnable(rq, p); @@ -1858,6 +1861,8 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags touch_core_sched(rq, p); do_enqueue_task(rq, p, enq_flags, sticky_cpu); +out: + rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; } static void ops_dequeue(struct task_struct *p, u64 deq_flags) @@ -2420,7 +2425,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev, bool local) bool has_tasks = false; lockdep_assert_rq_held(rq); - rq->scx.flags |= SCX_RQ_BALANCING; + rq->scx.flags |= SCX_RQ_IN_BALANCE; if (static_branch_unlikely(&scx_ops_cpu_preempt) && unlikely(rq->scx.cpu_released)) { @@ -2514,7 +2519,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev, bool local) has_tasks: has_tasks = true; out: - rq->scx.flags &= ~SCX_RQ_BALANCING; + rq->scx.flags &= ~SCX_RQ_IN_BALANCE; return has_tasks; } @@ -5063,7 +5068,7 @@ static bool can_skip_idle_kick(struct rq *rq) * The race window is small and we don't and can't guarantee that @rq is * only kicked while idle anyway. Skip only when sure. */ - return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_BALANCING); + return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); } static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) |
