diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2025-05-27 12:17:06 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2025-05-28 06:29:17 -0400 |
| commit | 8e86e73626527e5a69bf5263d6bbe9c2a86b4319 (patch) | |
| tree | d56a6bc701bded71bf3befc233df4f73bd0589a9 /kernel | |
| parent | Merge tag 'kvm-x86-svm-6.16' of https://github.com/kvm-x86/linux into HEAD (diff) | |
| parent | rust: add helper for mutex_trylock (diff) | |
| download | linux-8e86e73626527e5a69bf5263d6bbe9c2a86b4319.tar.gz linux-8e86e73626527e5a69bf5263d6bbe9c2a86b4319.zip | |
Merge branch 'kvm-lockdep-common' into HEAD
Introduce new mutex locking functions mutex_trylock_nest_lock() and
mutex_lock_killable_nest_lock() and use them to clean up locking
of all vCPUs for a VM.
For x86, this removes some complex code that was used instead
of lockdep's "nest_lock" feature.
For ARM and RISC-V, this removes a lockdep warning when the VM is
configured to have more than MAX_LOCK_DEPTH vCPUs, and removes a fair
amount of duplicate code by sharing the logic across all architectures.
Signed-off-by: Paolo BOnzini <pbonzini@redhat.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/locking/mutex.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 555e2b3a665a..234923121ff0 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -808,11 +808,12 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); int __sched -mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) +_mutex_lock_killable(struct mutex *lock, unsigned int subclass, + struct lockdep_map *nest) { - return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); + return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_); } -EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); +EXPORT_SYMBOL_GPL(_mutex_lock_killable); int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) @@ -1062,6 +1063,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, #endif +#ifndef CONFIG_DEBUG_LOCK_ALLOC /** * mutex_trylock - try to acquire the mutex, without waiting * @lock: the mutex to be acquired @@ -1078,17 +1080,24 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, */ int __sched mutex_trylock(struct mutex *lock) { + MUTEX_WARN_ON(lock->magic != lock); + return __mutex_trylock(lock); +} +EXPORT_SYMBOL(mutex_trylock); +#else +int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) +{ bool locked; MUTEX_WARN_ON(lock->magic != lock); - locked = __mutex_trylock(lock); if (locked) - mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_); return locked; } -EXPORT_SYMBOL(mutex_trylock); +EXPORT_SYMBOL(_mutex_trylock_nest_lock); +#endif #ifndef CONFIG_DEBUG_LOCK_ALLOC int __sched |
