diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-29 18:11:32 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-29 18:11:32 -0700 |
| commit | 72b8944f147e151e845d976e7f48beff38967499 (patch) | |
| tree | 9e4b484be7381d01702f37ef36aa71026e1b27d8 /kernel | |
| parent | Merge tag 'perf-core-2025-07-28' of git://git.kernel.org/pub/scm/linux/kernel... (diff) | |
| parent | Merge tag 'lockdep-for-tip.2025.07.16' of git://git.kernel.org/pub/scm/linux/... (diff) | |
| download | linux-72b8944f147e151e845d976e7f48beff38967499.tar.gz linux-72b8944f147e151e845d976e7f48beff38967499.zip | |
Merge tag 'locking-core-2025-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Locking primitives:
- Mark devm_mutex_init() as __must_check and fix drivers that didn't
check the return code (Thomas Weißschuh)
- Reorganize <linux/local_lock.h> to better expose the internal APIs
to local variables (Sebastian Andrzej Siewior)
- Remove OWNER_SPINNABLE in rwsem (Jinliang Zheng)
- Remove redundant #ifdefs in the mutex code (Ran Xiaokai)
Lockdep:
- Avoid returning struct in lock_stats() (Arnd Bergmann)
- Change `static const` into enum for LOCKF_*_IRQ_* (Arnd Bergmann)
- Temporarily use synchronize_rcu_expedited() in
lockdep_unregister_key() to speed things up. (Breno Leitao)
Rust runtime:
- Add #[must_use] to Lock::try_lock() (Jason Devers)"
* tag 'locking-core-2025-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Speed up lockdep_unregister_key() with expedited RCU synchronization
locking/mutex: Remove redundant #ifdefs
locking/lockdep: Change 'static const' variables to enum values
locking/lockdep: Avoid struct return in lock_stats()
locking/rwsem: Use OWNER_NONSPINNABLE directly instead of OWNER_SPINNABLE
rust: sync: Add #[must_use] to Lock::try_lock()
locking/mutex: Mark devm_mutex_init() as __must_check
leds: lp8860: Check return value of devm_mutex_init()
spi: spi-nxp-fspi: Check return value of devm_mutex_init()
local_lock: Move this_cpu_ptr() notation from internal to main header
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/locking/lockdep.c | 39 | ||||
| -rw-r--r-- | kernel/locking/lockdep_internals.h | 18 | ||||
| -rw-r--r-- | kernel/locking/lockdep_proc.c | 2 | ||||
| -rw-r--r-- | kernel/locking/mutex.c | 4 | ||||
| -rw-r--r-- | kernel/locking/rwsem.c | 4 |
5 files changed, 34 insertions, 33 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index dd2bbf73718b..2d4c5bab5af8 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -297,33 +297,30 @@ static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) dst->nr += src->nr; } -struct lock_class_stats lock_stats(struct lock_class *class) +void lock_stats(struct lock_class *class, struct lock_class_stats *stats) { - struct lock_class_stats stats; int cpu, i; - memset(&stats, 0, sizeof(struct lock_class_stats)); + memset(stats, 0, sizeof(struct lock_class_stats)); for_each_possible_cpu(cpu) { struct lock_class_stats *pcs = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; - for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) - stats.contention_point[i] += pcs->contention_point[i]; + for (i = 0; i < ARRAY_SIZE(stats->contention_point); i++) + stats->contention_point[i] += pcs->contention_point[i]; - for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) - stats.contending_point[i] += pcs->contending_point[i]; + for (i = 0; i < ARRAY_SIZE(stats->contending_point); i++) + stats->contending_point[i] += pcs->contending_point[i]; - lock_time_add(&pcs->read_waittime, &stats.read_waittime); - lock_time_add(&pcs->write_waittime, &stats.write_waittime); + lock_time_add(&pcs->read_waittime, &stats->read_waittime); + lock_time_add(&pcs->write_waittime, &stats->write_waittime); - lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); - lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); + lock_time_add(&pcs->read_holdtime, &stats->read_holdtime); + lock_time_add(&pcs->write_holdtime, &stats->write_holdtime); - for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) - stats.bounces[i] += pcs->bounces[i]; + for (i = 0; i < ARRAY_SIZE(stats->bounces); i++) + stats->bounces[i] += pcs->bounces[i]; } - - return stats; } void clear_lock_stats(struct lock_class *class) @@ -6619,8 +6616,16 @@ void lockdep_unregister_key(struct lock_class_key *key) if (need_callback) call_rcu(&delayed_free.rcu_head, free_zapped_rcu); - /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ - synchronize_rcu(); + /* + * Wait until is_dynamic_key() has finished accessing k->hash_entry. + * + * Some operations like __qdisc_destroy() will call this in a debug + * kernel, and the network traffic is disabled while waiting, hence + * the delay of the wait matters in debugging cases. Currently use a + * synchronize_rcu_expedited() to speed up the wait at the cost of + * system IPIs. TODO: Replace RCU with hazptr for this. + */ + synchronize_rcu_expedited(); } EXPORT_SYMBOL_GPL(lockdep_unregister_key); diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 82156caf77d1..0e5e6ffe91a3 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -47,29 +47,31 @@ enum { __LOCKF(USED_READ) }; +enum { #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | -static const unsigned long LOCKF_ENABLED_IRQ = + LOCKF_ENABLED_IRQ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | -static const unsigned long LOCKF_USED_IN_IRQ = + LOCKF_USED_IN_IRQ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | -static const unsigned long LOCKF_ENABLED_IRQ_READ = + LOCKF_ENABLED_IRQ_READ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | -static const unsigned long LOCKF_USED_IN_IRQ_READ = + LOCKF_USED_IN_IRQ_READ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE +}; #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index b52c07c4707c..1916db9aa46b 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -657,7 +657,7 @@ static int lock_stat_open(struct inode *inode, struct file *file) if (!test_bit(idx, lock_classes_in_use)) continue; iter->class = class; - iter->stats = lock_stats(class); + lock_stats(class, &iter->stats); iter++; } diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 80d778fedd60..de7d6702cd96 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -191,9 +191,7 @@ static void __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct list_head *list) { -#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX); -#endif debug_mutex_add_waiter(lock, waiter, current); list_add_tail(&waiter->list, list); @@ -209,9 +207,7 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) __mutex_clear_flag(lock, MUTEX_FLAGS); debug_mutex_remove_waiter(lock, waiter, current); -#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER hung_task_clear_blocker(); -#endif } /* diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 2ddb827e3bea..8572dba95af4 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -727,8 +727,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) return ret; } -#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER) - static inline enum owner_state rwsem_owner_state(struct task_struct *owner, unsigned long flags) { @@ -835,7 +833,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) enum owner_state owner_state; owner_state = rwsem_spin_on_owner(sem); - if (!(owner_state & OWNER_SPINNABLE)) + if (owner_state == OWNER_NONSPINNABLE) break; /* |
