1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_SPINLOCK_H |
3 | #define __LINUX_SPINLOCK_H |
4 | #define __LINUX_INSIDE_SPINLOCK_H |
5 | |
6 | /* |
7 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
8 | * |
9 | * here's the role of the various spinlock/rwlock related include files: |
10 | * |
11 | * on SMP builds: |
12 | * |
13 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
14 | * initializers |
15 | * |
16 | * linux/spinlock_types_raw: |
17 | * The raw types and initializers |
18 | * linux/spinlock_types.h: |
19 | * defines the generic type and initializers |
20 | * |
21 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
22 | * implementations, mostly inline assembly code |
23 | * |
24 | * (also included on UP-debug builds:) |
25 | * |
26 | * linux/spinlock_api_smp.h: |
27 | * contains the prototypes for the _spin_*() APIs. |
28 | * |
29 | * linux/spinlock.h: builds the final spin_*() APIs. |
30 | * |
31 | * on UP builds: |
32 | * |
33 | * linux/spinlock_type_up.h: |
34 | * contains the generic, simplified UP spinlock type. |
35 | * (which is an empty structure on non-debug builds) |
36 | * |
37 | * linux/spinlock_types_raw: |
38 | * The raw RT types and initializers |
39 | * linux/spinlock_types.h: |
40 | * defines the generic type and initializers |
41 | * |
42 | * linux/spinlock_up.h: |
43 | * contains the arch_spin_*()/etc. version of UP |
44 | * builds. (which are NOPs on non-debug, non-preempt |
45 | * builds) |
46 | * |
47 | * (included on UP-non-debug builds:) |
48 | * |
49 | * linux/spinlock_api_up.h: |
50 | * builds the _spin_*() APIs. |
51 | * |
52 | * linux/spinlock.h: builds the final spin_*() APIs. |
53 | */ |
54 | |
55 | #include <linux/typecheck.h> |
56 | #include <linux/preempt.h> |
57 | #include <linux/linkage.h> |
58 | #include <linux/compiler.h> |
59 | #include <linux/irqflags.h> |
60 | #include <linux/thread_info.h> |
61 | #include <linux/stringify.h> |
62 | #include <linux/bottom_half.h> |
63 | #include <linux/lockdep.h> |
64 | #include <linux/cleanup.h> |
65 | #include <asm/barrier.h> |
66 | #include <asm/mmiowb.h> |
67 | |
68 | |
69 | /* |
70 | * Must define these before including other files, inline functions need them |
71 | */ |
72 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
73 | |
74 | #define LOCK_SECTION_START(extra) \ |
75 | ".subsection 1\n\t" \ |
76 | extra \ |
77 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
78 | LOCK_SECTION_NAME ":\n\t" \ |
79 | ".endif\n" |
80 | |
81 | #define LOCK_SECTION_END \ |
82 | ".previous\n\t" |
83 | |
84 | #define __lockfunc __section(".spinlock.text") |
85 | |
86 | /* |
87 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
88 | */ |
89 | #include <linux/spinlock_types.h> |
90 | |
91 | /* |
92 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
93 | */ |
94 | #ifdef CONFIG_SMP |
95 | # include <asm/spinlock.h> |
96 | #else |
97 | # include <linux/spinlock_up.h> |
98 | #endif |
99 | |
100 | #ifdef CONFIG_DEBUG_SPINLOCK |
101 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
102 | struct lock_class_key *key, short inner); |
103 | |
104 | # define raw_spin_lock_init(lock) \ |
105 | do { \ |
106 | static struct lock_class_key __key; \ |
107 | \ |
108 | __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ |
109 | } while (0) |
110 | |
111 | #else |
112 | # define raw_spin_lock_init(lock) \ |
113 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
114 | #endif |
115 | |
116 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
117 | |
118 | #ifdef arch_spin_is_contended |
119 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
120 | #else |
121 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
122 | #endif /*arch_spin_is_contended*/ |
123 | |
124 | /* |
125 | * smp_mb__after_spinlock() provides the equivalent of a full memory barrier |
126 | * between program-order earlier lock acquisitions and program-order later |
127 | * memory accesses. |
128 | * |
129 | * This guarantees that the following two properties hold: |
130 | * |
131 | * 1) Given the snippet: |
132 | * |
133 | * { X = 0; Y = 0; } |
134 | * |
135 | * CPU0 CPU1 |
136 | * |
137 | * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); |
138 | * spin_lock(S); smp_mb(); |
139 | * smp_mb__after_spinlock(); r1 = READ_ONCE(X); |
140 | * r0 = READ_ONCE(Y); |
141 | * spin_unlock(S); |
142 | * |
143 | * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) |
144 | * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments |
145 | * preceding the call to smp_mb__after_spinlock() in __schedule() and in |
146 | * try_to_wake_up(). |
147 | * |
148 | * 2) Given the snippet: |
149 | * |
150 | * { X = 0; Y = 0; } |
151 | * |
152 | * CPU0 CPU1 CPU2 |
153 | * |
154 | * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); |
155 | * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); |
156 | * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); |
157 | * WRITE_ONCE(Y, 1); |
158 | * spin_unlock(S); |
159 | * |
160 | * it is forbidden that CPU0's critical section executes before CPU1's |
161 | * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) |
162 | * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments |
163 | * preceding the calls to smp_rmb() in try_to_wake_up() for similar |
164 | * snippets but "projected" onto two CPUs. |
165 | * |
166 | * Property (2) upgrades the lock to an RCsc lock. |
167 | * |
168 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after |
169 | * the LL/SC loop, they need no further barriers. Similarly all our TSO |
170 | * architectures imply an smp_mb() for each atomic instruction and equally don't |
171 | * need more. |
172 | * |
173 | * Architectures that can implement ACQUIRE better need to take care. |
174 | */ |
175 | #ifndef smp_mb__after_spinlock |
176 | #define smp_mb__after_spinlock() kcsan_mb() |
177 | #endif |
178 | |
179 | #ifdef CONFIG_DEBUG_SPINLOCK |
180 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
181 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
182 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
183 | #else |
184 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
185 | { |
186 | __acquire(lock); |
187 | arch_spin_lock(&lock->raw_lock); |
188 | mmiowb_spin_lock(); |
189 | } |
190 | |
191 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
192 | { |
193 | int ret = arch_spin_trylock(&(lock)->raw_lock); |
194 | |
195 | if (ret) |
196 | mmiowb_spin_lock(); |
197 | |
198 | return ret; |
199 | } |
200 | |
201 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
202 | { |
203 | mmiowb_spin_unlock(); |
204 | arch_spin_unlock(&lock->raw_lock); |
205 | __release(lock); |
206 | } |
207 | #endif |
208 | |
209 | /* |
210 | * Define the various spin_lock methods. Note we define these |
211 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The |
212 | * various methods are defined as nops in the case they are not |
213 | * required. |
214 | */ |
215 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
216 | |
217 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
218 | |
219 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
220 | # define raw_spin_lock_nested(lock, subclass) \ |
221 | _raw_spin_lock_nested(lock, subclass) |
222 | |
223 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
224 | do { \ |
225 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
226 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
227 | } while (0) |
228 | #else |
229 | /* |
230 | * Always evaluate the 'subclass' argument to avoid that the compiler |
231 | * warns about set-but-not-used variables when building with |
232 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
233 | */ |
234 | # define raw_spin_lock_nested(lock, subclass) \ |
235 | _raw_spin_lock(((void)(subclass), (lock))) |
236 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
237 | #endif |
238 | |
239 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
240 | |
241 | #define raw_spin_lock_irqsave(lock, flags) \ |
242 | do { \ |
243 | typecheck(unsigned long, flags); \ |
244 | flags = _raw_spin_lock_irqsave(lock); \ |
245 | } while (0) |
246 | |
247 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
248 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
249 | do { \ |
250 | typecheck(unsigned long, flags); \ |
251 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
252 | } while (0) |
253 | #else |
254 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
255 | do { \ |
256 | typecheck(unsigned long, flags); \ |
257 | flags = _raw_spin_lock_irqsave(lock); \ |
258 | } while (0) |
259 | #endif |
260 | |
261 | #else |
262 | |
263 | #define raw_spin_lock_irqsave(lock, flags) \ |
264 | do { \ |
265 | typecheck(unsigned long, flags); \ |
266 | _raw_spin_lock_irqsave(lock, flags); \ |
267 | } while (0) |
268 | |
269 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
270 | raw_spin_lock_irqsave(lock, flags) |
271 | |
272 | #endif |
273 | |
274 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
275 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
276 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
277 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
278 | |
279 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
280 | do { \ |
281 | typecheck(unsigned long, flags); \ |
282 | _raw_spin_unlock_irqrestore(lock, flags); \ |
283 | } while (0) |
284 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
285 | |
286 | #define raw_spin_trylock_bh(lock) \ |
287 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
288 | |
289 | #define raw_spin_trylock_irq(lock) \ |
290 | ({ \ |
291 | local_irq_disable(); \ |
292 | raw_spin_trylock(lock) ? \ |
293 | 1 : ({ local_irq_enable(); 0; }); \ |
294 | }) |
295 | |
296 | #define raw_spin_trylock_irqsave(lock, flags) \ |
297 | ({ \ |
298 | local_irq_save(flags); \ |
299 | raw_spin_trylock(lock) ? \ |
300 | 1 : ({ local_irq_restore(flags); 0; }); \ |
301 | }) |
302 | |
303 | #ifndef CONFIG_PREEMPT_RT |
304 | /* Include rwlock functions for !RT */ |
305 | #include <linux/rwlock.h> |
306 | #endif |
307 | |
308 | /* |
309 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
310 | */ |
311 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
312 | # include <linux/spinlock_api_smp.h> |
313 | #else |
314 | # include <linux/spinlock_api_up.h> |
315 | #endif |
316 | |
317 | /* Non PREEMPT_RT kernel, map to raw spinlocks: */ |
318 | #ifndef CONFIG_PREEMPT_RT |
319 | |
320 | /* |
321 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
322 | */ |
323 | |
324 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
325 | { |
326 | return &lock->rlock; |
327 | } |
328 | |
329 | #ifdef CONFIG_DEBUG_SPINLOCK |
330 | |
331 | # define spin_lock_init(lock) \ |
332 | do { \ |
333 | static struct lock_class_key __key; \ |
334 | \ |
335 | __raw_spin_lock_init(spinlock_check(lock), \ |
336 | #lock, &__key, LD_WAIT_CONFIG); \ |
337 | } while (0) |
338 | |
339 | #else |
340 | |
341 | # define spin_lock_init(_lock) \ |
342 | do { \ |
343 | spinlock_check(_lock); \ |
344 | *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ |
345 | } while (0) |
346 | |
347 | #endif |
348 | |
349 | static __always_inline void spin_lock(spinlock_t *lock) |
350 | { |
351 | raw_spin_lock(&lock->rlock); |
352 | } |
353 | |
354 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
355 | { |
356 | raw_spin_lock_bh(&lock->rlock); |
357 | } |
358 | |
359 | static __always_inline int spin_trylock(spinlock_t *lock) |
360 | { |
361 | return raw_spin_trylock(&lock->rlock); |
362 | } |
363 | |
364 | #define spin_lock_nested(lock, subclass) \ |
365 | do { \ |
366 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
367 | } while (0) |
368 | |
369 | #define spin_lock_nest_lock(lock, nest_lock) \ |
370 | do { \ |
371 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
372 | } while (0) |
373 | |
374 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
375 | { |
376 | raw_spin_lock_irq(&lock->rlock); |
377 | } |
378 | |
379 | #define spin_lock_irqsave(lock, flags) \ |
380 | do { \ |
381 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
382 | } while (0) |
383 | |
384 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
385 | do { \ |
386 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
387 | } while (0) |
388 | |
389 | static __always_inline void spin_unlock(spinlock_t *lock) |
390 | { |
391 | raw_spin_unlock(&lock->rlock); |
392 | } |
393 | |
394 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
395 | { |
396 | raw_spin_unlock_bh(&lock->rlock); |
397 | } |
398 | |
399 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
400 | { |
401 | raw_spin_unlock_irq(&lock->rlock); |
402 | } |
403 | |
404 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
405 | { |
406 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
407 | } |
408 | |
409 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
410 | { |
411 | return raw_spin_trylock_bh(&lock->rlock); |
412 | } |
413 | |
414 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
415 | { |
416 | return raw_spin_trylock_irq(&lock->rlock); |
417 | } |
418 | |
419 | #define spin_trylock_irqsave(lock, flags) \ |
420 | ({ \ |
421 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
422 | }) |
423 | |
424 | /** |
425 | * spin_is_locked() - Check whether a spinlock is locked. |
426 | * @lock: Pointer to the spinlock. |
427 | * |
428 | * This function is NOT required to provide any memory ordering |
429 | * guarantees; it could be used for debugging purposes or, when |
430 | * additional synchronization is needed, accompanied with other |
431 | * constructs (memory barriers) enforcing the synchronization. |
432 | * |
433 | * Returns: 1 if @lock is locked, 0 otherwise. |
434 | * |
435 | * Note that the function only tells you that the spinlock is |
436 | * seen to be locked, not that it is locked on your CPU. |
437 | * |
438 | * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, |
439 | * the return value is always 0 (see include/linux/spinlock_up.h). |
440 | * Therefore you should not rely heavily on the return value. |
441 | */ |
442 | static __always_inline int spin_is_locked(spinlock_t *lock) |
443 | { |
444 | return raw_spin_is_locked(&lock->rlock); |
445 | } |
446 | |
447 | static __always_inline int spin_is_contended(spinlock_t *lock) |
448 | { |
449 | return raw_spin_is_contended(&lock->rlock); |
450 | } |
451 | |
452 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
453 | |
454 | #else /* !CONFIG_PREEMPT_RT */ |
455 | # include <linux/spinlock_rt.h> |
456 | #endif /* CONFIG_PREEMPT_RT */ |
457 | |
458 | /* |
459 | * Pull the atomic_t declaration: |
460 | * (asm-mips/atomic.h needs above definitions) |
461 | */ |
462 | #include <linux/atomic.h> |
463 | /** |
464 | * atomic_dec_and_lock - lock on reaching reference count zero |
465 | * @atomic: the atomic counter |
466 | * @lock: the spinlock in question |
467 | * |
468 | * Decrements @atomic by 1. If the result is 0, returns true and locks |
469 | * @lock. Returns false for all other cases. |
470 | */ |
471 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
472 | #define atomic_dec_and_lock(atomic, lock) \ |
473 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
474 | |
475 | extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
476 | unsigned long *flags); |
477 | #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ |
478 | __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) |
479 | |
480 | extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock); |
481 | #define atomic_dec_and_raw_lock(atomic, lock) \ |
482 | __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock)) |
483 | |
484 | extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, |
485 | unsigned long *flags); |
486 | #define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \ |
487 | __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))) |
488 | |
489 | int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, |
490 | size_t max_size, unsigned int cpu_mult, |
491 | gfp_t gfp, const char *name, |
492 | struct lock_class_key *key); |
493 | |
494 | #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ |
495 | ({ \ |
496 | static struct lock_class_key key; \ |
497 | int ret; \ |
498 | \ |
499 | ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ |
500 | cpu_mult, gfp, #locks, &key); \ |
501 | ret; \ |
502 | }) |
503 | |
504 | void free_bucket_spinlocks(spinlock_t *locks); |
505 | |
506 | DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, |
507 | raw_spin_lock(_T->lock), |
508 | raw_spin_unlock(_T->lock)) |
509 | |
510 | DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, |
511 | raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), |
512 | raw_spin_unlock(_T->lock)) |
513 | |
514 | DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, |
515 | raw_spin_lock_irq(_T->lock), |
516 | raw_spin_unlock_irq(_T->lock)) |
517 | |
518 | DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, |
519 | raw_spin_lock_irqsave(_T->lock, _T->flags), |
520 | raw_spin_unlock_irqrestore(_T->lock, _T->flags), |
521 | unsigned long flags) |
522 | |
523 | DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, |
524 | spin_lock(_T->lock), |
525 | spin_unlock(_T->lock)) |
526 | |
527 | DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, |
528 | spin_lock_irq(_T->lock), |
529 | spin_unlock_irq(_T->lock)) |
530 | |
531 | DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, |
532 | spin_lock_irqsave(_T->lock, _T->flags), |
533 | spin_unlock_irqrestore(_T->lock, _T->flags), |
534 | unsigned long flags) |
535 | |
536 | #undef __LINUX_INSIDE_SPINLOCK_H |
537 | #endif /* __LINUX_SPINLOCK_H */ |
538 | |