diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-03-14 20:37:31 +0100 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-03-14 20:37:31 +0100 |
| commit | b0d8bef8ed805ca92eb91e86acf3ce89cbebc8ce (patch) | |
| tree | 8838391a76f0cf75ffcd31166996d03d19c580a6 /kernel/locking | |
| parent | softirq: Consolidate common code in tasklet_[hi]_action() (diff) | |
| parent | Merge tag 'nfs-for-4.16-4' of git://git.linux-nfs.org/projects/trondmy/linux-nfs (diff) | |
| download | linux-b0d8bef8ed805ca92eb91e86acf3ce89cbebc8ce.tar.gz linux-b0d8bef8ed805ca92eb91e86acf3ce89cbebc8ce.zip | |
Merge branch 'linus' into irq/core to pick up dependencies.
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/qspinlock.c | 21 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 5 |
2 files changed, 18 insertions, 8 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 38ece035039e..d880296245c5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -379,6 +379,14 @@ queue: tail = encode_tail(smp_processor_id(), idx); node += idx; + + /* + * Ensure that we increment the head node->count before initialising + * the actual node. If the compiler is kind enough to reorder these + * stores, then an IRQ could overwrite our assignments. + */ + barrier(); + node->locked = 0; node->next = NULL; pv_init_node(node); @@ -408,14 +416,15 @@ queue: */ if (old & _Q_TAIL_MASK) { prev = decode_tail(old); + /* - * The above xchg_tail() is also a load of @lock which - * generates, through decode_tail(), a pointer. The address - * dependency matches the RELEASE of xchg_tail() such that - * the subsequent access to @prev happens after. + * We must ensure that the stores to @node are observed before + * the write to prev->next. The address dependency from + * xchg_tail is not sufficient to ensure this because the read + * component of xchg_tail is unordered with respect to the + * initialisation of @node. */ - - WRITE_ONCE(prev->next, node); + smp_store_release(&prev->next, node); pv_wait_node(node, prev); arch_mcs_spin_lock_contended(&node->locked); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 65cc0cb984e6..940633c63254 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) { DEFINE_WAKE_Q(wake_q); + unsigned long flags; bool postunlock; - raw_spin_lock_irq(&lock->wait_lock); + raw_spin_lock_irqsave(&lock->wait_lock, flags); postunlock = __rt_mutex_futex_unlock(lock, &wake_q); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); if (postunlock) rt_mutex_postunlock(&wake_q); |
