1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ |
3 | * |
4 | * Written by David Howells (dhowells@redhat.com). |
5 | * |
6 | * Derived from asm-x86/semaphore.h |
7 | * |
8 | * |
9 | * The MSW of the count is the negated number of active writers and waiting |
10 | * lockers, and the LSW is the total number of active locks |
11 | * |
12 | * The lock count is initialized to 0 (no active and no waiting lockers). |
13 | * |
14 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an |
15 | * uncontended lock. This can be determined because XADD returns the old value. |
16 | * Readers increment by 1 and see a positive value when uncontended, negative |
17 | * if there are writers (and maybe) readers waiting (in which case it goes to |
18 | * sleep). |
19 | * |
20 | * The value of WAITING_BIAS supports up to 32766 waiting processes. This can |
21 | * be extended to 65534 by manually checking the whole MSW rather than relying |
22 | * on the S flag. |
23 | * |
24 | * The value of ACTIVE_BIAS supports up to 65535 active processes. |
25 | * |
26 | * This should be totally fair - if anything is waiting, a process that wants a |
27 | * lock will go to the back of the queue. When the currently active lock is |
28 | * released, if there's a writer at the front of the queue, then that and only |
29 | * that will be woken up; if there's a bunch of consecutive readers at the |
30 | * front, then they'll all be woken up, but no other readers will be. |
31 | */ |
32 | |
33 | #ifndef _ASM_X86_RWSEM_H |
34 | #define _ASM_X86_RWSEM_H |
35 | |
36 | #ifndef _LINUX_RWSEM_H |
37 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" |
38 | #endif |
39 | |
40 | #ifdef __KERNEL__ |
41 | #include <asm/asm.h> |
42 | |
43 | /* |
44 | * The bias values and the counter type limits the number of |
45 | * potential readers/writers to 32767 for 32 bits and 2147483647 |
46 | * for 64 bits. |
47 | */ |
48 | |
49 | #ifdef CONFIG_X86_64 |
50 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
51 | #else |
52 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
53 | #endif |
54 | |
55 | #define RWSEM_UNLOCKED_VALUE 0x00000000L |
56 | #define RWSEM_ACTIVE_BIAS 0x00000001L |
57 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) |
58 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
59 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
60 | |
61 | /* |
62 | * lock for reading |
63 | */ |
64 | #define ____down_read(sem, slow_path) \ |
65 | ({ \ |
66 | struct rw_semaphore* ret; \ |
67 | asm volatile("# beginning down_read\n\t" \ |
68 | LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \ |
69 | /* adds 0x00000001 */ \ |
70 | " jns 1f\n" \ |
71 | " call " slow_path "\n" \ |
72 | "1:\n\t" \ |
73 | "# ending down_read\n\t" \ |
74 | : "+m" (sem->count), "=a" (ret), \ |
75 | ASM_CALL_CONSTRAINT \ |
76 | : [sem] "a" (sem) \ |
77 | : "memory", "cc"); \ |
78 | ret; \ |
79 | }) |
80 | |
81 | static inline void __down_read(struct rw_semaphore *sem) |
82 | { |
83 | ____down_read(sem, "call_rwsem_down_read_failed" ); |
84 | } |
85 | |
86 | static inline int __down_read_killable(struct rw_semaphore *sem) |
87 | { |
88 | if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable" ))) |
89 | return -EINTR; |
90 | return 0; |
91 | } |
92 | |
93 | /* |
94 | * trylock for reading -- returns 1 if successful, 0 if contention |
95 | */ |
96 | static inline bool __down_read_trylock(struct rw_semaphore *sem) |
97 | { |
98 | long result, tmp; |
99 | asm volatile("# beginning __down_read_trylock\n\t" |
100 | " mov %[count],%[result]\n\t" |
101 | "1:\n\t" |
102 | " mov %[result],%[tmp]\n\t" |
103 | " add %[inc],%[tmp]\n\t" |
104 | " jle 2f\n\t" |
105 | LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t" |
106 | " jnz 1b\n\t" |
107 | "2:\n\t" |
108 | "# ending __down_read_trylock\n\t" |
109 | : [count] "+m" (sem->count), [result] "=&a" (result), |
110 | [tmp] "=&r" (tmp) |
111 | : [inc] "i" (RWSEM_ACTIVE_READ_BIAS) |
112 | : "memory" , "cc" ); |
113 | return result >= 0; |
114 | } |
115 | |
116 | /* |
117 | * lock for writing |
118 | */ |
119 | #define ____down_write(sem, slow_path) \ |
120 | ({ \ |
121 | long tmp; \ |
122 | struct rw_semaphore* ret; \ |
123 | \ |
124 | asm volatile("# beginning down_write\n\t" \ |
125 | LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \ |
126 | /* adds 0xffff0001, returns the old value */ \ |
127 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ |
128 | /* was the active mask 0 before? */\ |
129 | " jz 1f\n" \ |
130 | " call " slow_path "\n" \ |
131 | "1:\n" \ |
132 | "# ending down_write" \ |
133 | : "+m" (sem->count), [tmp] "=d" (tmp), \ |
134 | "=a" (ret), ASM_CALL_CONSTRAINT \ |
135 | : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \ |
136 | : "memory", "cc"); \ |
137 | ret; \ |
138 | }) |
139 | |
140 | static inline void __down_write(struct rw_semaphore *sem) |
141 | { |
142 | ____down_write(sem, "call_rwsem_down_write_failed" ); |
143 | } |
144 | |
145 | static inline int __down_write_killable(struct rw_semaphore *sem) |
146 | { |
147 | if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable" ))) |
148 | return -EINTR; |
149 | |
150 | return 0; |
151 | } |
152 | |
153 | /* |
154 | * trylock for writing -- returns 1 if successful, 0 if contention |
155 | */ |
156 | static inline bool __down_write_trylock(struct rw_semaphore *sem) |
157 | { |
158 | bool result; |
159 | long tmp0, tmp1; |
160 | asm volatile("# beginning __down_write_trylock\n\t" |
161 | " mov %[count],%[tmp0]\n\t" |
162 | "1:\n\t" |
163 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" |
164 | /* was the active mask 0 before? */ |
165 | " jnz 2f\n\t" |
166 | " mov %[tmp0],%[tmp1]\n\t" |
167 | " add %[inc],%[tmp1]\n\t" |
168 | LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t" |
169 | " jnz 1b\n\t" |
170 | "2:\n\t" |
171 | CC_SET(e) |
172 | "# ending __down_write_trylock\n\t" |
173 | : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0), |
174 | [tmp1] "=&r" (tmp1), CC_OUT(e) (result) |
175 | : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS) |
176 | : "memory" ); |
177 | return result; |
178 | } |
179 | |
180 | /* |
181 | * unlock after reading |
182 | */ |
183 | static inline void __up_read(struct rw_semaphore *sem) |
184 | { |
185 | long tmp; |
186 | asm volatile("# beginning __up_read\n\t" |
187 | LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" |
188 | /* subtracts 1, returns the old value */ |
189 | " jns 1f\n\t" |
190 | " call call_rwsem_wake\n" /* expects old value in %edx */ |
191 | "1:\n" |
192 | "# ending __up_read\n" |
193 | : "+m" (sem->count), [tmp] "=d" (tmp) |
194 | : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS) |
195 | : "memory" , "cc" ); |
196 | } |
197 | |
198 | /* |
199 | * unlock after writing |
200 | */ |
201 | static inline void __up_write(struct rw_semaphore *sem) |
202 | { |
203 | long tmp; |
204 | asm volatile("# beginning __up_write\n\t" |
205 | LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" |
206 | /* subtracts 0xffff0001, returns the old value */ |
207 | " jns 1f\n\t" |
208 | " call call_rwsem_wake\n" /* expects old value in %edx */ |
209 | "1:\n\t" |
210 | "# ending __up_write\n" |
211 | : "+m" (sem->count), [tmp] "=d" (tmp) |
212 | : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS) |
213 | : "memory" , "cc" ); |
214 | } |
215 | |
216 | /* |
217 | * downgrade write lock to read lock |
218 | */ |
219 | static inline void __downgrade_write(struct rw_semaphore *sem) |
220 | { |
221 | asm volatile("# beginning __downgrade_write\n\t" |
222 | LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t" |
223 | /* |
224 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) |
225 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) |
226 | */ |
227 | " jns 1f\n\t" |
228 | " call call_rwsem_downgrade_wake\n" |
229 | "1:\n\t" |
230 | "# ending __downgrade_write\n" |
231 | : "+m" (sem->count) |
232 | : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS) |
233 | : "memory" , "cc" ); |
234 | } |
235 | |
236 | #endif /* __KERNEL__ */ |
237 | #endif /* _ASM_X86_RWSEM_H */ |
238 | |