1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Queued read/write locks |
4 | * |
5 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
6 | * |
7 | * Authors: Waiman Long <waiman.long@hp.com> |
8 | */ |
9 | #include <linux/smp.h> |
10 | #include <linux/bug.h> |
11 | #include <linux/cpumask.h> |
12 | #include <linux/percpu.h> |
13 | #include <linux/hardirq.h> |
14 | #include <linux/spinlock.h> |
15 | #include <trace/events/lock.h> |
16 | |
17 | /** |
18 | * queued_read_lock_slowpath - acquire read lock of a queued rwlock |
19 | * @lock: Pointer to queued rwlock structure |
20 | */ |
21 | void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) |
22 | { |
23 | /* |
24 | * Readers come here when they cannot get the lock without waiting |
25 | */ |
26 | if (unlikely(in_interrupt())) { |
27 | /* |
28 | * Readers in interrupt context will get the lock immediately |
29 | * if the writer is just waiting (not holding the lock yet), |
30 | * so spin with ACQUIRE semantics until the lock is available |
31 | * without waiting in the queue. |
32 | */ |
33 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
34 | return; |
35 | } |
36 | atomic_sub(_QR_BIAS, v: &lock->cnts); |
37 | |
38 | trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ); |
39 | |
40 | /* |
41 | * Put the reader into the wait queue |
42 | */ |
43 | arch_spin_lock(&lock->wait_lock); |
44 | atomic_add(_QR_BIAS, v: &lock->cnts); |
45 | |
46 | /* |
47 | * The ACQUIRE semantics of the following spinning code ensure |
48 | * that accesses can't leak upwards out of our subsequent critical |
49 | * section in the case that the lock is currently held for write. |
50 | */ |
51 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
52 | |
53 | /* |
54 | * Signal the next one in queue to become queue head |
55 | */ |
56 | arch_spin_unlock(&lock->wait_lock); |
57 | |
58 | trace_contention_end(lock, ret: 0); |
59 | } |
60 | EXPORT_SYMBOL(queued_read_lock_slowpath); |
61 | |
62 | /** |
63 | * queued_write_lock_slowpath - acquire write lock of a queued rwlock |
64 | * @lock : Pointer to queued rwlock structure |
65 | */ |
66 | void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock) |
67 | { |
68 | int cnts; |
69 | |
70 | trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE); |
71 | |
72 | /* Put the writer into the wait queue */ |
73 | arch_spin_lock(&lock->wait_lock); |
74 | |
75 | /* Try to acquire the lock directly if no reader is present */ |
76 | if (!(cnts = atomic_read(v: &lock->cnts)) && |
77 | atomic_try_cmpxchg_acquire(v: &lock->cnts, old: &cnts, _QW_LOCKED)) |
78 | goto unlock; |
79 | |
80 | /* Set the waiting flag to notify readers that a writer is pending */ |
81 | atomic_or(_QW_WAITING, v: &lock->cnts); |
82 | |
83 | /* When no more readers or writers, set the locked flag */ |
84 | do { |
85 | cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); |
86 | } while (!atomic_try_cmpxchg_acquire(v: &lock->cnts, old: &cnts, _QW_LOCKED)); |
87 | unlock: |
88 | arch_spin_unlock(&lock->wait_lock); |
89 | |
90 | trace_contention_end(lock, ret: 0); |
91 | } |
92 | EXPORT_SYMBOL(queued_write_lock_slowpath); |
93 | |