From 894d1b3db41cf7e6ae0304429a1747b3c3f390bc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 9 Oct 2024 16:53:34 -0700 Subject: locking/mutex: Remove wakeups from under mutex::wait_lock In preparation to nest mutex::wait_lock under rq::lock we need to remove wakeups from under it. Do this by utilizing wake_qs to defer the wakeup until after the lock is dropped. [Heavily changed after 55f036ca7e74 ("locking: WW mutex cleanup") and 08295b3b5bee ("locking: Implement an algorithm choice for Wound-Wait mutexes")] [jstultz: rebased to mainline, added extra wake_up_q & init to avoid hangs, similar to Connor's rework of this patch] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Juri Lelli Signed-off-by: John Stultz Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Metin Kaya Acked-by: Davidlohr Bueso Tested-by: K Prateek Nayak Tested-by: Metin Kaya Link: https://lore.kernel.org/r/20241009235352.1614323-2-jstultz@google.com --- kernel/locking/rtmutex_api.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel/locking/rtmutex_api.c') diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c index a6974d044593..2bc14c049a64 100644 --- a/kernel/locking/rtmutex_api.c +++ b/kernel/locking/rtmutex_api.c @@ -275,6 +275,7 @@ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock) * @lock: the rt_mutex to take * @waiter: the pre-initialized rt_mutex_waiter * @task: the task to prepare + * @wake_q: the wake_q to wake tasks after we release the wait_lock * * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. @@ -291,7 +292,8 @@ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock) */ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, - struct task_struct *task) + struct task_struct *task, + struct wake_q_head *wake_q) { int ret; @@ -302,7 +304,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, /* We enforce deadlock detection for futexes */ ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL, - RT_MUTEX_FULL_CHAINWALK); + RT_MUTEX_FULL_CHAINWALK, wake_q); if (ret && !rt_mutex_owner(lock)) { /* @@ -341,12 +343,16 @@ int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, struct task_struct *task) { int ret; + DEFINE_WAKE_Q(wake_q); raw_spin_lock_irq(&lock->wait_lock); - ret = __rt_mutex_start_proxy_lock(lock, waiter, task); + ret = __rt_mutex_start_proxy_lock(lock, waiter, task, &wake_q); if (unlikely(ret)) remove_waiter(lock, waiter); + preempt_disable(); raw_spin_unlock_irq(&lock->wait_lock); + wake_up_q(&wake_q); + preempt_enable(); return ret; } -- cgit v1.2.3