1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_SEQLOCK_H |
3 | #define __LINUX_SEQLOCK_H |
4 | |
5 | /* |
6 | * seqcount_t / seqlock_t - a reader-writer consistency mechanism with |
7 | * lockless readers (read-only retry loops), and no writer starvation. |
8 | * |
9 | * See Documentation/locking/seqlock.rst |
10 | * |
11 | * Copyrights: |
12 | * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli |
13 | * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH |
14 | */ |
15 | |
16 | #include <linux/compiler.h> |
17 | #include <linux/kcsan-checks.h> |
18 | #include <linux/lockdep.h> |
19 | #include <linux/mutex.h> |
20 | #include <linux/preempt.h> |
21 | #include <linux/spinlock.h> |
22 | |
23 | #include <asm/processor.h> |
24 | |
25 | /* |
26 | * The seqlock seqcount_t interface does not prescribe a precise sequence of |
27 | * read begin/retry/end. For readers, typically there is a call to |
28 | * read_seqcount_begin() and read_seqcount_retry(), however, there are more |
29 | * esoteric cases which do not follow this pattern. |
30 | * |
31 | * As a consequence, we take the following best-effort approach for raw usage |
32 | * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, |
33 | * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as |
34 | * atomics; if there is a matching read_seqcount_retry() call, no following |
35 | * memory operations are considered atomic. Usage of the seqlock_t interface |
36 | * is not affected. |
37 | */ |
38 | #define KCSAN_SEQLOCK_REGION_MAX 1000 |
39 | |
40 | /* |
41 | * Sequence counters (seqcount_t) |
42 | * |
43 | * This is the raw counting mechanism, without any writer protection. |
44 | * |
45 | * Write side critical sections must be serialized and non-preemptible. |
46 | * |
47 | * If readers can be invoked from hardirq or softirq contexts, |
48 | * interrupts or bottom halves must also be respectively disabled before |
49 | * entering the write section. |
50 | * |
51 | * This mechanism can't be used if the protected data contains pointers, |
52 | * as the writer can invalidate a pointer that a reader is following. |
53 | * |
54 | * If the write serialization mechanism is one of the common kernel |
55 | * locking primitives, use a sequence counter with associated lock |
56 | * (seqcount_LOCKNAME_t) instead. |
57 | * |
58 | * If it's desired to automatically handle the sequence counter writer |
59 | * serialization and non-preemptibility requirements, use a sequential |
60 | * lock (seqlock_t) instead. |
61 | * |
62 | * See Documentation/locking/seqlock.rst |
63 | */ |
64 | typedef struct seqcount { |
65 | unsigned sequence; |
66 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
67 | struct lockdep_map dep_map; |
68 | #endif |
69 | } seqcount_t; |
70 | |
71 | static inline void __seqcount_init(seqcount_t *s, const char *name, |
72 | struct lock_class_key *key) |
73 | { |
74 | /* |
75 | * Make sure we are not reinitializing a held lock: |
76 | */ |
77 | lockdep_init_map(lock: &s->dep_map, name, key, subclass: 0); |
78 | s->sequence = 0; |
79 | } |
80 | |
81 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
82 | |
83 | # define SEQCOUNT_DEP_MAP_INIT(lockname) \ |
84 | .dep_map = { .name = #lockname } |
85 | |
86 | /** |
87 | * seqcount_init() - runtime initializer for seqcount_t |
88 | * @s: Pointer to the seqcount_t instance |
89 | */ |
90 | # define seqcount_init(s) \ |
91 | do { \ |
92 | static struct lock_class_key __key; \ |
93 | __seqcount_init((s), #s, &__key); \ |
94 | } while (0) |
95 | |
96 | static inline void seqcount_lockdep_reader_access(const seqcount_t *s) |
97 | { |
98 | seqcount_t *l = (seqcount_t *)s; |
99 | unsigned long flags; |
100 | |
101 | local_irq_save(flags); |
102 | seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); |
103 | seqcount_release(&l->dep_map, _RET_IP_); |
104 | local_irq_restore(flags); |
105 | } |
106 | |
107 | #else |
108 | # define SEQCOUNT_DEP_MAP_INIT(lockname) |
109 | # define seqcount_init(s) __seqcount_init(s, NULL, NULL) |
110 | # define seqcount_lockdep_reader_access(x) |
111 | #endif |
112 | |
113 | /** |
114 | * SEQCNT_ZERO() - static initializer for seqcount_t |
115 | * @name: Name of the seqcount_t instance |
116 | */ |
117 | #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } |
118 | |
119 | /* |
120 | * Sequence counters with associated locks (seqcount_LOCKNAME_t) |
121 | * |
122 | * A sequence counter which associates the lock used for writer |
123 | * serialization at initialization time. This enables lockdep to validate |
124 | * that the write side critical section is properly serialized. |
125 | * |
126 | * For associated locks which do not implicitly disable preemption, |
127 | * preemption protection is enforced in the write side function. |
128 | * |
129 | * Lockdep is never used in any for the raw write variants. |
130 | * |
131 | * See Documentation/locking/seqlock.rst |
132 | */ |
133 | |
134 | /* |
135 | * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot |
136 | * disable preemption. It can lead to higher latencies, and the write side |
137 | * sections will not be able to acquire locks which become sleeping locks |
138 | * (e.g. spinlock_t). |
139 | * |
140 | * To remain preemptible while avoiding a possible livelock caused by the |
141 | * reader preempting the writer, use a different technique: let the reader |
142 | * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the |
143 | * case, acquire then release the associated LOCKNAME writer serialization |
144 | * lock. This will allow any possibly-preempted writer to make progress |
145 | * until the end of its writer serialization lock critical section. |
146 | * |
147 | * This lock-unlock technique must be implemented for all of PREEMPT_RT |
148 | * sleeping locks. See Documentation/locking/locktypes.rst |
149 | */ |
150 | #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) |
151 | #define __SEQ_LOCK(expr) expr |
152 | #else |
153 | #define __SEQ_LOCK(expr) |
154 | #endif |
155 | |
156 | /* |
157 | * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated |
158 | * @seqcount: The real sequence counter |
159 | * @lock: Pointer to the associated lock |
160 | * |
161 | * A plain sequence counter with external writer synchronization by |
162 | * LOCKNAME @lock. The lock is associated to the sequence counter in the |
163 | * static initializer or init function. This enables lockdep to validate |
164 | * that the write side critical section is properly serialized. |
165 | * |
166 | * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex |
167 | */ |
168 | |
169 | /* |
170 | * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t |
171 | * @s: Pointer to the seqcount_LOCKNAME_t instance |
172 | * @lock: Pointer to the associated lock |
173 | */ |
174 | |
175 | #define seqcount_LOCKNAME_init(s, _lock, lockname) \ |
176 | do { \ |
177 | seqcount_##lockname##_t *____s = (s); \ |
178 | seqcount_init(&____s->seqcount); \ |
179 | __SEQ_LOCK(____s->lock = (_lock)); \ |
180 | } while (0) |
181 | |
182 | #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) |
183 | #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) |
184 | #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) |
185 | #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) |
186 | |
187 | /* |
188 | * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers |
189 | * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t |
190 | * |
191 | * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t |
192 | * @locktype: LOCKNAME canonical C data type |
193 | * @preemptible: preemptibility of above locktype |
194 | * @lockbase: prefix for associated lock/unlock |
195 | */ |
196 | #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ |
197 | typedef struct seqcount_##lockname { \ |
198 | seqcount_t seqcount; \ |
199 | __SEQ_LOCK(locktype *lock); \ |
200 | } seqcount_##lockname##_t; \ |
201 | \ |
202 | static __always_inline seqcount_t * \ |
203 | __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ |
204 | { \ |
205 | return &s->seqcount; \ |
206 | } \ |
207 | \ |
208 | static __always_inline const seqcount_t * \ |
209 | __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ |
210 | { \ |
211 | return &s->seqcount; \ |
212 | } \ |
213 | \ |
214 | static __always_inline unsigned \ |
215 | __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ |
216 | { \ |
217 | unsigned seq = READ_ONCE(s->seqcount.sequence); \ |
218 | \ |
219 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ |
220 | return seq; \ |
221 | \ |
222 | if (preemptible && unlikely(seq & 1)) { \ |
223 | __SEQ_LOCK(lockbase##_lock(s->lock)); \ |
224 | __SEQ_LOCK(lockbase##_unlock(s->lock)); \ |
225 | \ |
226 | /* \ |
227 | * Re-read the sequence counter since the (possibly \ |
228 | * preempted) writer made progress. \ |
229 | */ \ |
230 | seq = READ_ONCE(s->seqcount.sequence); \ |
231 | } \ |
232 | \ |
233 | return seq; \ |
234 | } \ |
235 | \ |
236 | static __always_inline bool \ |
237 | __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ |
238 | { \ |
239 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ |
240 | return preemptible; \ |
241 | \ |
242 | /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \ |
243 | return false; \ |
244 | } \ |
245 | \ |
246 | static __always_inline void \ |
247 | __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ |
248 | { \ |
249 | __SEQ_LOCK(lockdep_assert_held(s->lock)); \ |
250 | } |
251 | |
252 | /* |
253 | * __seqprop() for seqcount_t |
254 | */ |
255 | |
256 | static inline seqcount_t *__seqprop_ptr(seqcount_t *s) |
257 | { |
258 | return s; |
259 | } |
260 | |
261 | static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) |
262 | { |
263 | return s; |
264 | } |
265 | |
266 | static inline unsigned __seqprop_sequence(const seqcount_t *s) |
267 | { |
268 | return READ_ONCE(s->sequence); |
269 | } |
270 | |
271 | static inline bool __seqprop_preemptible(const seqcount_t *s) |
272 | { |
273 | return false; |
274 | } |
275 | |
276 | static inline void __seqprop_assert(const seqcount_t *s) |
277 | { |
278 | lockdep_assert_preemption_disabled(); |
279 | } |
280 | |
281 | #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) |
282 | |
283 | SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) |
284 | SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) |
285 | SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) |
286 | SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) |
287 | |
288 | /* |
289 | * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t |
290 | * @name: Name of the seqcount_LOCKNAME_t instance |
291 | * @lock: Pointer to the associated LOCKNAME |
292 | */ |
293 | |
294 | #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \ |
295 | .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ |
296 | __SEQ_LOCK(.lock = (assoc_lock)) \ |
297 | } |
298 | |
299 | #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) |
300 | #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) |
301 | #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) |
302 | #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) |
303 | #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) |
304 | |
305 | #define __seqprop_case(s, lockname, prop) \ |
306 | seqcount_##lockname##_t: __seqprop_##lockname##_##prop |
307 | |
308 | #define __seqprop(s, prop) _Generic(*(s), \ |
309 | seqcount_t: __seqprop_##prop, \ |
310 | __seqprop_case((s), raw_spinlock, prop), \ |
311 | __seqprop_case((s), spinlock, prop), \ |
312 | __seqprop_case((s), rwlock, prop), \ |
313 | __seqprop_case((s), mutex, prop)) |
314 | |
315 | #define seqprop_ptr(s) __seqprop(s, ptr)(s) |
316 | #define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s) |
317 | #define seqprop_sequence(s) __seqprop(s, sequence)(s) |
318 | #define seqprop_preemptible(s) __seqprop(s, preemptible)(s) |
319 | #define seqprop_assert(s) __seqprop(s, assert)(s) |
320 | |
321 | /** |
322 | * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier |
323 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
324 | * |
325 | * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
326 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
327 | * provided before actually loading any of the variables that are to be |
328 | * protected in this critical section. |
329 | * |
330 | * Use carefully, only in critical code, and comment how the barrier is |
331 | * provided. |
332 | * |
333 | * Return: count to be passed to read_seqcount_retry() |
334 | */ |
335 | #define __read_seqcount_begin(s) \ |
336 | ({ \ |
337 | unsigned __seq; \ |
338 | \ |
339 | while ((__seq = seqprop_sequence(s)) & 1) \ |
340 | cpu_relax(); \ |
341 | \ |
342 | kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ |
343 | __seq; \ |
344 | }) |
345 | |
346 | /** |
347 | * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep |
348 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
349 | * |
350 | * Return: count to be passed to read_seqcount_retry() |
351 | */ |
352 | #define raw_read_seqcount_begin(s) \ |
353 | ({ \ |
354 | unsigned _seq = __read_seqcount_begin(s); \ |
355 | \ |
356 | smp_rmb(); \ |
357 | _seq; \ |
358 | }) |
359 | |
360 | /** |
361 | * read_seqcount_begin() - begin a seqcount_t read critical section |
362 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
363 | * |
364 | * Return: count to be passed to read_seqcount_retry() |
365 | */ |
366 | #define read_seqcount_begin(s) \ |
367 | ({ \ |
368 | seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \ |
369 | raw_read_seqcount_begin(s); \ |
370 | }) |
371 | |
372 | /** |
373 | * raw_read_seqcount() - read the raw seqcount_t counter value |
374 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
375 | * |
376 | * raw_read_seqcount opens a read critical section of the given |
377 | * seqcount_t, without any lockdep checking, and without checking or |
378 | * masking the sequence counter LSB. Calling code is responsible for |
379 | * handling that. |
380 | * |
381 | * Return: count to be passed to read_seqcount_retry() |
382 | */ |
383 | #define raw_read_seqcount(s) \ |
384 | ({ \ |
385 | unsigned __seq = seqprop_sequence(s); \ |
386 | \ |
387 | smp_rmb(); \ |
388 | kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ |
389 | __seq; \ |
390 | }) |
391 | |
392 | /** |
393 | * raw_seqcount_begin() - begin a seqcount_t read critical section w/o |
394 | * lockdep and w/o counter stabilization |
395 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
396 | * |
397 | * raw_seqcount_begin opens a read critical section of the given |
398 | * seqcount_t. Unlike read_seqcount_begin(), this function will not wait |
399 | * for the count to stabilize. If a writer is active when it begins, it |
400 | * will fail the read_seqcount_retry() at the end of the read critical |
401 | * section instead of stabilizing at the beginning of it. |
402 | * |
403 | * Use this only in special kernel hot paths where the read section is |
404 | * small and has a high probability of success through other external |
405 | * means. It will save a single branching instruction. |
406 | * |
407 | * Return: count to be passed to read_seqcount_retry() |
408 | */ |
409 | #define raw_seqcount_begin(s) \ |
410 | ({ \ |
411 | /* \ |
412 | * If the counter is odd, let read_seqcount_retry() fail \ |
413 | * by decrementing the counter. \ |
414 | */ \ |
415 | raw_read_seqcount(s) & ~1; \ |
416 | }) |
417 | |
418 | /** |
419 | * __read_seqcount_retry() - end a seqcount_t read section w/o barrier |
420 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
421 | * @start: count, from read_seqcount_begin() |
422 | * |
423 | * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
424 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
425 | * provided before actually loading any of the variables that are to be |
426 | * protected in this critical section. |
427 | * |
428 | * Use carefully, only in critical code, and comment how the barrier is |
429 | * provided. |
430 | * |
431 | * Return: true if a read section retry is required, else false |
432 | */ |
433 | #define __read_seqcount_retry(s, start) \ |
434 | do___read_seqcount_retry(seqprop_const_ptr(s), start) |
435 | |
436 | static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) |
437 | { |
438 | kcsan_atomic_next(n: 0); |
439 | return unlikely(READ_ONCE(s->sequence) != start); |
440 | } |
441 | |
442 | /** |
443 | * read_seqcount_retry() - end a seqcount_t read critical section |
444 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
445 | * @start: count, from read_seqcount_begin() |
446 | * |
447 | * read_seqcount_retry closes the read critical section of given |
448 | * seqcount_t. If the critical section was invalid, it must be ignored |
449 | * (and typically retried). |
450 | * |
451 | * Return: true if a read section retry is required, else false |
452 | */ |
453 | #define read_seqcount_retry(s, start) \ |
454 | do_read_seqcount_retry(seqprop_const_ptr(s), start) |
455 | |
456 | static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) |
457 | { |
458 | smp_rmb(); |
459 | return do___read_seqcount_retry(s, start); |
460 | } |
461 | |
462 | /** |
463 | * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep |
464 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
465 | * |
466 | * Context: check write_seqcount_begin() |
467 | */ |
468 | #define raw_write_seqcount_begin(s) \ |
469 | do { \ |
470 | if (seqprop_preemptible(s)) \ |
471 | preempt_disable(); \ |
472 | \ |
473 | do_raw_write_seqcount_begin(seqprop_ptr(s)); \ |
474 | } while (0) |
475 | |
476 | static inline void do_raw_write_seqcount_begin(seqcount_t *s) |
477 | { |
478 | kcsan_nestable_atomic_begin(); |
479 | s->sequence++; |
480 | smp_wmb(); |
481 | } |
482 | |
483 | /** |
484 | * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep |
485 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
486 | * |
487 | * Context: check write_seqcount_end() |
488 | */ |
489 | #define raw_write_seqcount_end(s) \ |
490 | do { \ |
491 | do_raw_write_seqcount_end(seqprop_ptr(s)); \ |
492 | \ |
493 | if (seqprop_preemptible(s)) \ |
494 | preempt_enable(); \ |
495 | } while (0) |
496 | |
497 | static inline void do_raw_write_seqcount_end(seqcount_t *s) |
498 | { |
499 | smp_wmb(); |
500 | s->sequence++; |
501 | kcsan_nestable_atomic_end(); |
502 | } |
503 | |
504 | /** |
505 | * write_seqcount_begin_nested() - start a seqcount_t write section with |
506 | * custom lockdep nesting level |
507 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
508 | * @subclass: lockdep nesting level |
509 | * |
510 | * See Documentation/locking/lockdep-design.rst |
511 | * Context: check write_seqcount_begin() |
512 | */ |
513 | #define write_seqcount_begin_nested(s, subclass) \ |
514 | do { \ |
515 | seqprop_assert(s); \ |
516 | \ |
517 | if (seqprop_preemptible(s)) \ |
518 | preempt_disable(); \ |
519 | \ |
520 | do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ |
521 | } while (0) |
522 | |
523 | static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) |
524 | { |
525 | seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
526 | do_raw_write_seqcount_begin(s); |
527 | } |
528 | |
529 | /** |
530 | * write_seqcount_begin() - start a seqcount_t write side critical section |
531 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
532 | * |
533 | * Context: sequence counter write side sections must be serialized and |
534 | * non-preemptible. Preemption will be automatically disabled if and |
535 | * only if the seqcount write serialization lock is associated, and |
536 | * preemptible. If readers can be invoked from hardirq or softirq |
537 | * context, interrupts or bottom halves must be respectively disabled. |
538 | */ |
539 | #define write_seqcount_begin(s) \ |
540 | do { \ |
541 | seqprop_assert(s); \ |
542 | \ |
543 | if (seqprop_preemptible(s)) \ |
544 | preempt_disable(); \ |
545 | \ |
546 | do_write_seqcount_begin(seqprop_ptr(s)); \ |
547 | } while (0) |
548 | |
549 | static inline void do_write_seqcount_begin(seqcount_t *s) |
550 | { |
551 | do_write_seqcount_begin_nested(s, subclass: 0); |
552 | } |
553 | |
554 | /** |
555 | * write_seqcount_end() - end a seqcount_t write side critical section |
556 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
557 | * |
558 | * Context: Preemption will be automatically re-enabled if and only if |
559 | * the seqcount write serialization lock is associated, and preemptible. |
560 | */ |
561 | #define write_seqcount_end(s) \ |
562 | do { \ |
563 | do_write_seqcount_end(seqprop_ptr(s)); \ |
564 | \ |
565 | if (seqprop_preemptible(s)) \ |
566 | preempt_enable(); \ |
567 | } while (0) |
568 | |
569 | static inline void do_write_seqcount_end(seqcount_t *s) |
570 | { |
571 | seqcount_release(&s->dep_map, _RET_IP_); |
572 | do_raw_write_seqcount_end(s); |
573 | } |
574 | |
575 | /** |
576 | * raw_write_seqcount_barrier() - do a seqcount_t write barrier |
577 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
578 | * |
579 | * This can be used to provide an ordering guarantee instead of the usual |
580 | * consistency guarantee. It is one wmb cheaper, because it can collapse |
581 | * the two back-to-back wmb()s. |
582 | * |
583 | * Note that writes surrounding the barrier should be declared atomic (e.g. |
584 | * via WRITE_ONCE): a) to ensure the writes become visible to other threads |
585 | * atomically, avoiding compiler optimizations; b) to document which writes are |
586 | * meant to propagate to the reader critical section. This is necessary because |
587 | * neither writes before nor after the barrier are enclosed in a seq-writer |
588 | * critical section that would ensure readers are aware of ongoing writes:: |
589 | * |
590 | * seqcount_t seq; |
591 | * bool X = true, Y = false; |
592 | * |
593 | * void read(void) |
594 | * { |
595 | * bool x, y; |
596 | * |
597 | * do { |
598 | * int s = read_seqcount_begin(&seq); |
599 | * |
600 | * x = X; y = Y; |
601 | * |
602 | * } while (read_seqcount_retry(&seq, s)); |
603 | * |
604 | * BUG_ON(!x && !y); |
605 | * } |
606 | * |
607 | * void write(void) |
608 | * { |
609 | * WRITE_ONCE(Y, true); |
610 | * |
611 | * raw_write_seqcount_barrier(seq); |
612 | * |
613 | * WRITE_ONCE(X, false); |
614 | * } |
615 | */ |
616 | #define raw_write_seqcount_barrier(s) \ |
617 | do_raw_write_seqcount_barrier(seqprop_ptr(s)) |
618 | |
619 | static inline void do_raw_write_seqcount_barrier(seqcount_t *s) |
620 | { |
621 | kcsan_nestable_atomic_begin(); |
622 | s->sequence++; |
623 | smp_wmb(); |
624 | s->sequence++; |
625 | kcsan_nestable_atomic_end(); |
626 | } |
627 | |
628 | /** |
629 | * write_seqcount_invalidate() - invalidate in-progress seqcount_t read |
630 | * side operations |
631 | * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants |
632 | * |
633 | * After write_seqcount_invalidate, no seqcount_t read side operations |
634 | * will complete successfully and see data older than this. |
635 | */ |
636 | #define write_seqcount_invalidate(s) \ |
637 | do_write_seqcount_invalidate(seqprop_ptr(s)) |
638 | |
639 | static inline void do_write_seqcount_invalidate(seqcount_t *s) |
640 | { |
641 | smp_wmb(); |
642 | kcsan_nestable_atomic_begin(); |
643 | s->sequence+=2; |
644 | kcsan_nestable_atomic_end(); |
645 | } |
646 | |
647 | /* |
648 | * Latch sequence counters (seqcount_latch_t) |
649 | * |
650 | * A sequence counter variant where the counter even/odd value is used to |
651 | * switch between two copies of protected data. This allows the read path, |
652 | * typically NMIs, to safely interrupt the write side critical section. |
653 | * |
654 | * As the write sections are fully preemptible, no special handling for |
655 | * PREEMPT_RT is needed. |
656 | */ |
657 | typedef struct { |
658 | seqcount_t seqcount; |
659 | } seqcount_latch_t; |
660 | |
661 | /** |
662 | * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t |
663 | * @seq_name: Name of the seqcount_latch_t instance |
664 | */ |
665 | #define SEQCNT_LATCH_ZERO(seq_name) { \ |
666 | .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ |
667 | } |
668 | |
669 | /** |
670 | * seqcount_latch_init() - runtime initializer for seqcount_latch_t |
671 | * @s: Pointer to the seqcount_latch_t instance |
672 | */ |
673 | #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount) |
674 | |
675 | /** |
676 | * raw_read_seqcount_latch() - pick even/odd latch data copy |
677 | * @s: Pointer to seqcount_latch_t |
678 | * |
679 | * See raw_write_seqcount_latch() for details and a full reader/writer |
680 | * usage example. |
681 | * |
682 | * Return: sequence counter raw value. Use the lowest bit as an index for |
683 | * picking which data copy to read. The full counter must then be checked |
684 | * with raw_read_seqcount_latch_retry(). |
685 | */ |
686 | static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) |
687 | { |
688 | /* |
689 | * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). |
690 | * Due to the dependent load, a full smp_rmb() is not needed. |
691 | */ |
692 | return READ_ONCE(s->seqcount.sequence); |
693 | } |
694 | |
695 | /** |
696 | * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section |
697 | * @s: Pointer to seqcount_latch_t |
698 | * @start: count, from raw_read_seqcount_latch() |
699 | * |
700 | * Return: true if a read section retry is required, else false |
701 | */ |
702 | static __always_inline int |
703 | raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) |
704 | { |
705 | smp_rmb(); |
706 | return unlikely(READ_ONCE(s->seqcount.sequence) != start); |
707 | } |
708 | |
709 | /** |
710 | * raw_write_seqcount_latch() - redirect latch readers to even/odd copy |
711 | * @s: Pointer to seqcount_latch_t |
712 | * |
713 | * The latch technique is a multiversion concurrency control method that allows |
714 | * queries during non-atomic modifications. If you can guarantee queries never |
715 | * interrupt the modification -- e.g. the concurrency is strictly between CPUs |
716 | * -- you most likely do not need this. |
717 | * |
718 | * Where the traditional RCU/lockless data structures rely on atomic |
719 | * modifications to ensure queries observe either the old or the new state the |
720 | * latch allows the same for non-atomic updates. The trade-off is doubling the |
721 | * cost of storage; we have to maintain two copies of the entire data |
722 | * structure. |
723 | * |
724 | * Very simply put: we first modify one copy and then the other. This ensures |
725 | * there is always one copy in a stable state, ready to give us an answer. |
726 | * |
727 | * The basic form is a data structure like:: |
728 | * |
729 | * struct latch_struct { |
730 | * seqcount_latch_t seq; |
731 | * struct data_struct data[2]; |
732 | * }; |
733 | * |
734 | * Where a modification, which is assumed to be externally serialized, does the |
735 | * following:: |
736 | * |
737 | * void latch_modify(struct latch_struct *latch, ...) |
738 | * { |
739 | * smp_wmb(); // Ensure that the last data[1] update is visible |
740 | * latch->seq.sequence++; |
741 | * smp_wmb(); // Ensure that the seqcount update is visible |
742 | * |
743 | * modify(latch->data[0], ...); |
744 | * |
745 | * smp_wmb(); // Ensure that the data[0] update is visible |
746 | * latch->seq.sequence++; |
747 | * smp_wmb(); // Ensure that the seqcount update is visible |
748 | * |
749 | * modify(latch->data[1], ...); |
750 | * } |
751 | * |
752 | * The query will have a form like:: |
753 | * |
754 | * struct entry *latch_query(struct latch_struct *latch, ...) |
755 | * { |
756 | * struct entry *entry; |
757 | * unsigned seq, idx; |
758 | * |
759 | * do { |
760 | * seq = raw_read_seqcount_latch(&latch->seq); |
761 | * |
762 | * idx = seq & 0x01; |
763 | * entry = data_query(latch->data[idx], ...); |
764 | * |
765 | * // This includes needed smp_rmb() |
766 | * } while (raw_read_seqcount_latch_retry(&latch->seq, seq)); |
767 | * |
768 | * return entry; |
769 | * } |
770 | * |
771 | * So during the modification, queries are first redirected to data[1]. Then we |
772 | * modify data[0]. When that is complete, we redirect queries back to data[0] |
773 | * and we can modify data[1]. |
774 | * |
775 | * NOTE: |
776 | * |
777 | * The non-requirement for atomic modifications does _NOT_ include |
778 | * the publishing of new entries in the case where data is a dynamic |
779 | * data structure. |
780 | * |
781 | * An iteration might start in data[0] and get suspended long enough |
782 | * to miss an entire modification sequence, once it resumes it might |
783 | * observe the new entry. |
784 | * |
785 | * NOTE2: |
786 | * |
787 | * When data is a dynamic data structure; one should use regular RCU |
788 | * patterns to manage the lifetimes of the objects within. |
789 | */ |
790 | static inline void raw_write_seqcount_latch(seqcount_latch_t *s) |
791 | { |
792 | smp_wmb(); /* prior stores before incrementing "sequence" */ |
793 | s->seqcount.sequence++; |
794 | smp_wmb(); /* increment "sequence" before following stores */ |
795 | } |
796 | |
797 | /* |
798 | * Sequential locks (seqlock_t) |
799 | * |
800 | * Sequence counters with an embedded spinlock for writer serialization |
801 | * and non-preemptibility. |
802 | * |
803 | * For more info, see: |
804 | * - Comments on top of seqcount_t |
805 | * - Documentation/locking/seqlock.rst |
806 | */ |
807 | typedef struct { |
808 | /* |
809 | * Make sure that readers don't starve writers on PREEMPT_RT: use |
810 | * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). |
811 | */ |
812 | seqcount_spinlock_t seqcount; |
813 | spinlock_t lock; |
814 | } seqlock_t; |
815 | |
816 | #define __SEQLOCK_UNLOCKED(lockname) \ |
817 | { \ |
818 | .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ |
819 | .lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
820 | } |
821 | |
822 | /** |
823 | * seqlock_init() - dynamic initializer for seqlock_t |
824 | * @sl: Pointer to the seqlock_t instance |
825 | */ |
826 | #define seqlock_init(sl) \ |
827 | do { \ |
828 | spin_lock_init(&(sl)->lock); \ |
829 | seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ |
830 | } while (0) |
831 | |
832 | /** |
833 | * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t |
834 | * @sl: Name of the seqlock_t instance |
835 | */ |
836 | #define DEFINE_SEQLOCK(sl) \ |
837 | seqlock_t sl = __SEQLOCK_UNLOCKED(sl) |
838 | |
839 | /** |
840 | * read_seqbegin() - start a seqlock_t read side critical section |
841 | * @sl: Pointer to seqlock_t |
842 | * |
843 | * Return: count, to be passed to read_seqretry() |
844 | */ |
845 | static inline unsigned read_seqbegin(const seqlock_t *sl) |
846 | { |
847 | unsigned ret = read_seqcount_begin(&sl->seqcount); |
848 | |
849 | kcsan_atomic_next(n: 0); /* non-raw usage, assume closing read_seqretry() */ |
850 | kcsan_flat_atomic_begin(); |
851 | return ret; |
852 | } |
853 | |
854 | /** |
855 | * read_seqretry() - end a seqlock_t read side section |
856 | * @sl: Pointer to seqlock_t |
857 | * @start: count, from read_seqbegin() |
858 | * |
859 | * read_seqretry closes the read side critical section of given seqlock_t. |
860 | * If the critical section was invalid, it must be ignored (and typically |
861 | * retried). |
862 | * |
863 | * Return: true if a read section retry is required, else false |
864 | */ |
865 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
866 | { |
867 | /* |
868 | * Assume not nested: read_seqretry() may be called multiple times when |
869 | * completing read critical section. |
870 | */ |
871 | kcsan_flat_atomic_end(); |
872 | |
873 | return read_seqcount_retry(&sl->seqcount, start); |
874 | } |
875 | |
876 | /* |
877 | * For all seqlock_t write side functions, use the internal |
878 | * do_write_seqcount_begin() instead of generic write_seqcount_begin(). |
879 | * This way, no redundant lockdep_assert_held() checks are added. |
880 | */ |
881 | |
882 | /** |
883 | * write_seqlock() - start a seqlock_t write side critical section |
884 | * @sl: Pointer to seqlock_t |
885 | * |
886 | * write_seqlock opens a write side critical section for the given |
887 | * seqlock_t. It also implicitly acquires the spinlock_t embedded inside |
888 | * that sequential lock. All seqlock_t write side sections are thus |
889 | * automatically serialized and non-preemptible. |
890 | * |
891 | * Context: if the seqlock_t read section, or other write side critical |
892 | * sections, can be invoked from hardirq or softirq contexts, use the |
893 | * _irqsave or _bh variants of this function instead. |
894 | */ |
895 | static inline void write_seqlock(seqlock_t *sl) |
896 | { |
897 | spin_lock(lock: &sl->lock); |
898 | do_write_seqcount_begin(s: &sl->seqcount.seqcount); |
899 | } |
900 | |
901 | /** |
902 | * write_sequnlock() - end a seqlock_t write side critical section |
903 | * @sl: Pointer to seqlock_t |
904 | * |
905 | * write_sequnlock closes the (serialized and non-preemptible) write side |
906 | * critical section of given seqlock_t. |
907 | */ |
908 | static inline void write_sequnlock(seqlock_t *sl) |
909 | { |
910 | do_write_seqcount_end(s: &sl->seqcount.seqcount); |
911 | spin_unlock(lock: &sl->lock); |
912 | } |
913 | |
914 | /** |
915 | * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section |
916 | * @sl: Pointer to seqlock_t |
917 | * |
918 | * _bh variant of write_seqlock(). Use only if the read side section, or |
919 | * other write side sections, can be invoked from softirq contexts. |
920 | */ |
921 | static inline void write_seqlock_bh(seqlock_t *sl) |
922 | { |
923 | spin_lock_bh(lock: &sl->lock); |
924 | do_write_seqcount_begin(s: &sl->seqcount.seqcount); |
925 | } |
926 | |
927 | /** |
928 | * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section |
929 | * @sl: Pointer to seqlock_t |
930 | * |
931 | * write_sequnlock_bh closes the serialized, non-preemptible, and |
932 | * softirqs-disabled, seqlock_t write side critical section opened with |
933 | * write_seqlock_bh(). |
934 | */ |
935 | static inline void write_sequnlock_bh(seqlock_t *sl) |
936 | { |
937 | do_write_seqcount_end(s: &sl->seqcount.seqcount); |
938 | spin_unlock_bh(lock: &sl->lock); |
939 | } |
940 | |
941 | /** |
942 | * write_seqlock_irq() - start a non-interruptible seqlock_t write section |
943 | * @sl: Pointer to seqlock_t |
944 | * |
945 | * _irq variant of write_seqlock(). Use only if the read side section, or |
946 | * other write sections, can be invoked from hardirq contexts. |
947 | */ |
948 | static inline void write_seqlock_irq(seqlock_t *sl) |
949 | { |
950 | spin_lock_irq(lock: &sl->lock); |
951 | do_write_seqcount_begin(s: &sl->seqcount.seqcount); |
952 | } |
953 | |
954 | /** |
955 | * write_sequnlock_irq() - end a non-interruptible seqlock_t write section |
956 | * @sl: Pointer to seqlock_t |
957 | * |
958 | * write_sequnlock_irq closes the serialized and non-interruptible |
959 | * seqlock_t write side section opened with write_seqlock_irq(). |
960 | */ |
961 | static inline void write_sequnlock_irq(seqlock_t *sl) |
962 | { |
963 | do_write_seqcount_end(s: &sl->seqcount.seqcount); |
964 | spin_unlock_irq(lock: &sl->lock); |
965 | } |
966 | |
967 | static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) |
968 | { |
969 | unsigned long flags; |
970 | |
971 | spin_lock_irqsave(&sl->lock, flags); |
972 | do_write_seqcount_begin(s: &sl->seqcount.seqcount); |
973 | return flags; |
974 | } |
975 | |
976 | /** |
977 | * write_seqlock_irqsave() - start a non-interruptible seqlock_t write |
978 | * section |
979 | * @lock: Pointer to seqlock_t |
980 | * @flags: Stack-allocated storage for saving caller's local interrupt |
981 | * state, to be passed to write_sequnlock_irqrestore(). |
982 | * |
983 | * _irqsave variant of write_seqlock(). Use it only if the read side |
984 | * section, or other write sections, can be invoked from hardirq context. |
985 | */ |
986 | #define write_seqlock_irqsave(lock, flags) \ |
987 | do { flags = __write_seqlock_irqsave(lock); } while (0) |
988 | |
989 | /** |
990 | * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write |
991 | * section |
992 | * @sl: Pointer to seqlock_t |
993 | * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() |
994 | * |
995 | * write_sequnlock_irqrestore closes the serialized and non-interruptible |
996 | * seqlock_t write section previously opened with write_seqlock_irqsave(). |
997 | */ |
998 | static inline void |
999 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
1000 | { |
1001 | do_write_seqcount_end(s: &sl->seqcount.seqcount); |
1002 | spin_unlock_irqrestore(lock: &sl->lock, flags); |
1003 | } |
1004 | |
1005 | /** |
1006 | * read_seqlock_excl() - begin a seqlock_t locking reader section |
1007 | * @sl: Pointer to seqlock_t |
1008 | * |
1009 | * read_seqlock_excl opens a seqlock_t locking reader critical section. A |
1010 | * locking reader exclusively locks out *both* other writers *and* other |
1011 | * locking readers, but it does not update the embedded sequence number. |
1012 | * |
1013 | * Locking readers act like a normal spin_lock()/spin_unlock(). |
1014 | * |
1015 | * Context: if the seqlock_t write section, *or other read sections*, can |
1016 | * be invoked from hardirq or softirq contexts, use the _irqsave or _bh |
1017 | * variant of this function instead. |
1018 | * |
1019 | * The opened read section must be closed with read_sequnlock_excl(). |
1020 | */ |
1021 | static inline void read_seqlock_excl(seqlock_t *sl) |
1022 | { |
1023 | spin_lock(lock: &sl->lock); |
1024 | } |
1025 | |
1026 | /** |
1027 | * read_sequnlock_excl() - end a seqlock_t locking reader critical section |
1028 | * @sl: Pointer to seqlock_t |
1029 | */ |
1030 | static inline void read_sequnlock_excl(seqlock_t *sl) |
1031 | { |
1032 | spin_unlock(lock: &sl->lock); |
1033 | } |
1034 | |
1035 | /** |
1036 | * read_seqlock_excl_bh() - start a seqlock_t locking reader section with |
1037 | * softirqs disabled |
1038 | * @sl: Pointer to seqlock_t |
1039 | * |
1040 | * _bh variant of read_seqlock_excl(). Use this variant only if the |
1041 | * seqlock_t write side section, *or other read sections*, can be invoked |
1042 | * from softirq contexts. |
1043 | */ |
1044 | static inline void read_seqlock_excl_bh(seqlock_t *sl) |
1045 | { |
1046 | spin_lock_bh(lock: &sl->lock); |
1047 | } |
1048 | |
1049 | /** |
1050 | * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking |
1051 | * reader section |
1052 | * @sl: Pointer to seqlock_t |
1053 | */ |
1054 | static inline void read_sequnlock_excl_bh(seqlock_t *sl) |
1055 | { |
1056 | spin_unlock_bh(lock: &sl->lock); |
1057 | } |
1058 | |
1059 | /** |
1060 | * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking |
1061 | * reader section |
1062 | * @sl: Pointer to seqlock_t |
1063 | * |
1064 | * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t |
1065 | * write side section, *or other read sections*, can be invoked from a |
1066 | * hardirq context. |
1067 | */ |
1068 | static inline void read_seqlock_excl_irq(seqlock_t *sl) |
1069 | { |
1070 | spin_lock_irq(lock: &sl->lock); |
1071 | } |
1072 | |
1073 | /** |
1074 | * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t |
1075 | * locking reader section |
1076 | * @sl: Pointer to seqlock_t |
1077 | */ |
1078 | static inline void read_sequnlock_excl_irq(seqlock_t *sl) |
1079 | { |
1080 | spin_unlock_irq(lock: &sl->lock); |
1081 | } |
1082 | |
1083 | static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) |
1084 | { |
1085 | unsigned long flags; |
1086 | |
1087 | spin_lock_irqsave(&sl->lock, flags); |
1088 | return flags; |
1089 | } |
1090 | |
1091 | /** |
1092 | * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t |
1093 | * locking reader section |
1094 | * @lock: Pointer to seqlock_t |
1095 | * @flags: Stack-allocated storage for saving caller's local interrupt |
1096 | * state, to be passed to read_sequnlock_excl_irqrestore(). |
1097 | * |
1098 | * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t |
1099 | * write side section, *or other read sections*, can be invoked from a |
1100 | * hardirq context. |
1101 | */ |
1102 | #define read_seqlock_excl_irqsave(lock, flags) \ |
1103 | do { flags = __read_seqlock_excl_irqsave(lock); } while (0) |
1104 | |
1105 | /** |
1106 | * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t |
1107 | * locking reader section |
1108 | * @sl: Pointer to seqlock_t |
1109 | * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() |
1110 | */ |
1111 | static inline void |
1112 | read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) |
1113 | { |
1114 | spin_unlock_irqrestore(lock: &sl->lock, flags); |
1115 | } |
1116 | |
1117 | /** |
1118 | * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader |
1119 | * @lock: Pointer to seqlock_t |
1120 | * @seq : Marker and return parameter. If the passed value is even, the |
1121 | * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). |
1122 | * If the passed value is odd, the reader will become a *locking* reader |
1123 | * as in read_seqlock_excl(). In the first call to this function, the |
1124 | * caller *must* initialize and pass an even value to @seq; this way, a |
1125 | * lockless read can be optimistically tried first. |
1126 | * |
1127 | * read_seqbegin_or_lock is an API designed to optimistically try a normal |
1128 | * lockless seqlock_t read section first. If an odd counter is found, the |
1129 | * lockless read trial has failed, and the next read iteration transforms |
1130 | * itself into a full seqlock_t locking reader. |
1131 | * |
1132 | * This is typically used to avoid seqlock_t lockless readers starvation |
1133 | * (too much retry loops) in the case of a sharp spike in write side |
1134 | * activity. |
1135 | * |
1136 | * Context: if the seqlock_t write section, *or other read sections*, can |
1137 | * be invoked from hardirq or softirq contexts, use the _irqsave or _bh |
1138 | * variant of this function instead. |
1139 | * |
1140 | * Check Documentation/locking/seqlock.rst for template example code. |
1141 | * |
1142 | * Return: the encountered sequence counter value, through the @seq |
1143 | * parameter, which is overloaded as a return parameter. This returned |
1144 | * value must be checked with need_seqretry(). If the read section need to |
1145 | * be retried, this returned value must also be passed as the @seq |
1146 | * parameter of the next read_seqbegin_or_lock() iteration. |
1147 | */ |
1148 | static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) |
1149 | { |
1150 | if (!(*seq & 1)) /* Even */ |
1151 | *seq = read_seqbegin(sl: lock); |
1152 | else /* Odd */ |
1153 | read_seqlock_excl(sl: lock); |
1154 | } |
1155 | |
1156 | /** |
1157 | * need_seqretry() - validate seqlock_t "locking or lockless" read section |
1158 | * @lock: Pointer to seqlock_t |
1159 | * @seq: sequence count, from read_seqbegin_or_lock() |
1160 | * |
1161 | * Return: true if a read section retry is required, false otherwise |
1162 | */ |
1163 | static inline int need_seqretry(seqlock_t *lock, int seq) |
1164 | { |
1165 | return !(seq & 1) && read_seqretry(sl: lock, start: seq); |
1166 | } |
1167 | |
1168 | /** |
1169 | * done_seqretry() - end seqlock_t "locking or lockless" reader section |
1170 | * @lock: Pointer to seqlock_t |
1171 | * @seq: count, from read_seqbegin_or_lock() |
1172 | * |
1173 | * done_seqretry finishes the seqlock_t read side critical section started |
1174 | * with read_seqbegin_or_lock() and validated by need_seqretry(). |
1175 | */ |
1176 | static inline void done_seqretry(seqlock_t *lock, int seq) |
1177 | { |
1178 | if (seq & 1) |
1179 | read_sequnlock_excl(sl: lock); |
1180 | } |
1181 | |
1182 | /** |
1183 | * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or |
1184 | * a non-interruptible locking reader |
1185 | * @lock: Pointer to seqlock_t |
1186 | * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). |
1187 | * |
1188 | * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if |
1189 | * the seqlock_t write section, *or other read sections*, can be invoked |
1190 | * from hardirq context. |
1191 | * |
1192 | * Note: Interrupts will be disabled only for "locking reader" mode. |
1193 | * |
1194 | * Return: |
1195 | * |
1196 | * 1. The saved local interrupts state in case of a locking reader, to |
1197 | * be passed to done_seqretry_irqrestore(). |
1198 | * |
1199 | * 2. The encountered sequence counter value, returned through @seq |
1200 | * overloaded as a return parameter. Check read_seqbegin_or_lock(). |
1201 | */ |
1202 | static inline unsigned long |
1203 | read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) |
1204 | { |
1205 | unsigned long flags = 0; |
1206 | |
1207 | if (!(*seq & 1)) /* Even */ |
1208 | *seq = read_seqbegin(sl: lock); |
1209 | else /* Odd */ |
1210 | read_seqlock_excl_irqsave(lock, flags); |
1211 | |
1212 | return flags; |
1213 | } |
1214 | |
1215 | /** |
1216 | * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a |
1217 | * non-interruptible locking reader section |
1218 | * @lock: Pointer to seqlock_t |
1219 | * @seq: Count, from read_seqbegin_or_lock_irqsave() |
1220 | * @flags: Caller's saved local interrupt state in case of a locking |
1221 | * reader, also from read_seqbegin_or_lock_irqsave() |
1222 | * |
1223 | * This is the _irqrestore variant of done_seqretry(). The read section |
1224 | * must've been opened with read_seqbegin_or_lock_irqsave(), and validated |
1225 | * by need_seqretry(). |
1226 | */ |
1227 | static inline void |
1228 | done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) |
1229 | { |
1230 | if (seq & 1) |
1231 | read_sequnlock_excl_irqrestore(sl: lock, flags); |
1232 | } |
1233 | #endif /* __LINUX_SEQLOCK_H */ |
1234 | |