Warning: This file is not a C or C++ file. It does not have highlighting.
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | |
3 | /* |
4 | * 'Generic' ticket-lock implementation. |
5 | * |
6 | * It relies on atomic_fetch_add() having well defined forward progress |
7 | * guarantees under contention. If your architecture cannot provide this, stick |
8 | * to a test-and-set lock. |
9 | * |
10 | * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a |
11 | * sub-word of the value. This is generally true for anything LL/SC although |
12 | * you'd be hard pressed to find anything useful in architecture specifications |
13 | * about this. If your architecture cannot do this you might be better off with |
14 | * a test-and-set. |
15 | * |
16 | * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence |
17 | * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with |
18 | * a full fence after the spin to upgrade the otherwise-RCpc |
19 | * atomic_cond_read_acquire(). |
20 | * |
21 | * The implementation uses smp_cond_load_acquire() to spin, so if the |
22 | * architecture has WFE like instructions to sleep instead of poll for word |
23 | * modifications be sure to implement that (see ARM64 for example). |
24 | * |
25 | */ |
26 | |
27 | #ifndef __ASM_GENERIC_SPINLOCK_H |
28 | #define __ASM_GENERIC_SPINLOCK_H |
29 | |
30 | #include <linux/atomic.h> |
31 | #include <asm-generic/spinlock_types.h> |
32 | |
33 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
34 | { |
35 | u32 val = atomic_fetch_add(1<<16, lock); |
36 | u16 ticket = val >> 16; |
37 | |
38 | if (ticket == (u16)val) |
39 | return; |
40 | |
41 | /* |
42 | * atomic_cond_read_acquire() is RCpc, but rather than defining a |
43 | * custom cond_read_rcsc() here we just emit a full fence. We only |
44 | * need the prior reads before subsequent writes ordering from |
45 | * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we |
46 | * have no outstanding writes due to the atomic_fetch_add() the extra |
47 | * orderings are free. |
48 | */ |
49 | atomic_cond_read_acquire(lock, ticket == (u16)VAL); |
50 | smp_mb(); |
51 | } |
52 | |
53 | static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) |
54 | { |
55 | u32 old = atomic_read(lock); |
56 | |
57 | if ((old >> 16) != (old & 0xffff)) |
58 | return false; |
59 | |
60 | return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ |
61 | } |
62 | |
63 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
64 | { |
65 | u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); |
66 | u32 val = atomic_read(lock); |
67 | |
68 | smp_store_release(ptr, (u16)val + 1); |
69 | } |
70 | |
71 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
72 | { |
73 | u32 val = lock.counter; |
74 | |
75 | return ((val >> 16) == (val & 0xffff)); |
76 | } |
77 | |
78 | static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) |
79 | { |
80 | arch_spinlock_t val = READ_ONCE(*lock); |
81 | |
82 | return !arch_spin_value_unlocked(val); |
83 | } |
84 | |
85 | static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) |
86 | { |
87 | u32 val = atomic_read(lock); |
88 | |
89 | return (s16)((val >> 16) - (val & 0xffff)) > 1; |
90 | } |
91 | |
92 | #include <asm/qrwlock.h> |
93 | |
94 | #endif /* __ASM_GENERIC_SPINLOCK_H */ |
95 |
Warning: This file is not a C or C++ file. It does not have highlighting.