1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#ifndef __LINUX_RCU_H
11#define __LINUX_RCU_H
12
13#include <linux/slab.h>
14#include <trace/events/rcu.h>
15
16/*
17 * Grace-period counter management.
18 *
19 * The two least significant bits contain the control flags.
20 * The most significant bits contain the grace-period sequence counter.
21 *
22 * When both control flags are zero, no grace period is in progress.
23 * When either bit is non-zero, a grace period has started and is in
24 * progress. When the grace period completes, the control flags are reset
25 * to 0 and the grace-period sequence counter is incremented.
26 *
27 * However some specific RCU usages make use of custom values.
28 *
29 * SRCU special control values:
30 *
31 * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node
32 * is initialized.
33 *
34 * SRCU_STATE_IDLE : No SRCU gp is in progress
35 *
36 * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates
37 * we are scanning the readers on the slot
38 * defined as inactive (there might well
39 * be pending readers that will use that
40 * index, but their number is bounded).
41 *
42 * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state()
43 * Indicates we are flipping the readers
44 * index and then scanning the readers on the
45 * slot newly designated as inactive (again,
46 * the number of pending readers that will use
47 * this inactive index is bounded).
48 *
49 * RCU polled GP special control value:
50 *
51 * RCU_GET_STATE_COMPLETED : State value indicating an already-completed
52 * polled GP has completed. This value covers
53 * both the state and the counter of the
54 * grace-period sequence number.
55 */
56
57#define RCU_SEQ_CTR_SHIFT 2
58#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
59
60/* Low-order bit definition for polled grace-period APIs. */
61#define RCU_GET_STATE_COMPLETED 0x1
62
63extern int sysctl_sched_rt_runtime;
64
65/*
66 * Return the counter portion of a sequence number previously returned
67 * by rcu_seq_snap() or rcu_seq_current().
68 */
69static inline unsigned long rcu_seq_ctr(unsigned long s)
70{
71 return s >> RCU_SEQ_CTR_SHIFT;
72}
73
74/*
75 * Return the state portion of a sequence number previously returned
76 * by rcu_seq_snap() or rcu_seq_current().
77 */
78static inline int rcu_seq_state(unsigned long s)
79{
80 return s & RCU_SEQ_STATE_MASK;
81}
82
83/*
84 * Set the state portion of the pointed-to sequence number.
85 * The caller is responsible for preventing conflicting updates.
86 */
87static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
88{
89 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
90 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
91}
92
93/* Adjust sequence number for start of update-side operation. */
94static inline void rcu_seq_start(unsigned long *sp)
95{
96 WRITE_ONCE(*sp, *sp + 1);
97 smp_mb(); /* Ensure update-side operation after counter increment. */
98 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
99}
100
101/* Compute the end-of-grace-period value for the specified sequence number. */
102static inline unsigned long rcu_seq_endval(unsigned long *sp)
103{
104 return (*sp | RCU_SEQ_STATE_MASK) + 1;
105}
106
107/* Adjust sequence number for end of update-side operation. */
108static inline void rcu_seq_end(unsigned long *sp)
109{
110 smp_mb(); /* Ensure update-side operation before counter increment. */
111 WARN_ON_ONCE(!rcu_seq_state(*sp));
112 WRITE_ONCE(*sp, rcu_seq_endval(sp));
113}
114
115/*
116 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
117 *
118 * This function returns the earliest value of the grace-period sequence number
119 * that will indicate that a full grace period has elapsed since the current
120 * time. Once the grace-period sequence number has reached this value, it will
121 * be safe to invoke all callbacks that have been registered prior to the
122 * current time. This value is the current grace-period number plus two to the
123 * power of the number of low-order bits reserved for state, then rounded up to
124 * the next value in which the state bits are all zero.
125 */
126static inline unsigned long rcu_seq_snap(unsigned long *sp)
127{
128 unsigned long s;
129
130 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
131 smp_mb(); /* Above access must not bleed into critical section. */
132 return s;
133}
134
135/* Return the current value the update side's sequence number, no ordering. */
136static inline unsigned long rcu_seq_current(unsigned long *sp)
137{
138 return READ_ONCE(*sp);
139}
140
141/*
142 * Given a snapshot from rcu_seq_snap(), determine whether or not the
143 * corresponding update-side operation has started.
144 */
145static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
146{
147 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
148}
149
150/*
151 * Given a snapshot from rcu_seq_snap(), determine whether or not a
152 * full update-side operation has occurred.
153 */
154static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
155{
156 return ULONG_CMP_GE(READ_ONCE(*sp), s);
157}
158
159/*
160 * Given a snapshot from rcu_seq_snap(), determine whether or not a
161 * full update-side operation has occurred, but do not allow the
162 * (ULONG_MAX / 2) safety-factor/guard-band.
163 */
164static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
165{
166 unsigned long cur_s = READ_ONCE(*sp);
167
168 return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
169}
170
171/*
172 * Has a grace period completed since the time the old gp_seq was collected?
173 */
174static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
175{
176 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
177}
178
179/*
180 * Has a grace period started since the time the old gp_seq was collected?
181 */
182static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
183{
184 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
185 new);
186}
187
188/*
189 * Roughly how many full grace periods have elapsed between the collection
190 * of the two specified grace periods?
191 */
192static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
193{
194 unsigned long rnd_diff;
195
196 if (old == new)
197 return 0;
198 /*
199 * Compute the number of grace periods (still shifted up), plus
200 * one if either of new and old is not an exact grace period.
201 */
202 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
203 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
204 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
205 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
206 return 1; /* Definitely no grace period has elapsed. */
207 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
208}
209
210/*
211 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
212 * by call_rcu() and rcu callback execution, and are therefore not part
213 * of the RCU API. These are in rcupdate.h because they are used by all
214 * RCU implementations.
215 */
216
217#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
218# define STATE_RCU_HEAD_READY 0
219# define STATE_RCU_HEAD_QUEUED 1
220
221extern const struct debug_obj_descr rcuhead_debug_descr;
222
223static inline int debug_rcu_head_queue(struct rcu_head *head)
224{
225 int r1;
226
227 r1 = debug_object_activate(addr: head, descr: &rcuhead_debug_descr);
228 debug_object_active_state(addr: head, descr: &rcuhead_debug_descr,
229 STATE_RCU_HEAD_READY,
230 STATE_RCU_HEAD_QUEUED);
231 return r1;
232}
233
234static inline void debug_rcu_head_unqueue(struct rcu_head *head)
235{
236 debug_object_active_state(addr: head, descr: &rcuhead_debug_descr,
237 STATE_RCU_HEAD_QUEUED,
238 STATE_RCU_HEAD_READY);
239 debug_object_deactivate(addr: head, descr: &rcuhead_debug_descr);
240}
241#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
242static inline int debug_rcu_head_queue(struct rcu_head *head)
243{
244 return 0;
245}
246
247static inline void debug_rcu_head_unqueue(struct rcu_head *head)
248{
249}
250#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
251
252static inline void debug_rcu_head_callback(struct rcu_head *rhp)
253{
254 if (unlikely(!rhp->func))
255 kmem_dump_obj(object: rhp);
256}
257
258extern int rcu_cpu_stall_suppress_at_boot;
259
260static inline bool rcu_stall_is_suppressed_at_boot(void)
261{
262 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
263}
264
265#ifdef CONFIG_RCU_STALL_COMMON
266
267extern int rcu_cpu_stall_ftrace_dump;
268extern int rcu_cpu_stall_suppress;
269extern int rcu_cpu_stall_timeout;
270extern int rcu_exp_cpu_stall_timeout;
271extern int rcu_cpu_stall_cputime;
272extern bool rcu_exp_stall_task_details __read_mostly;
273int rcu_jiffies_till_stall_check(void);
274int rcu_exp_jiffies_till_stall_check(void);
275
276static inline bool rcu_stall_is_suppressed(void)
277{
278 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
279}
280
281#define rcu_ftrace_dump_stall_suppress() \
282do { \
283 if (!rcu_cpu_stall_suppress) \
284 rcu_cpu_stall_suppress = 3; \
285} while (0)
286
287#define rcu_ftrace_dump_stall_unsuppress() \
288do { \
289 if (rcu_cpu_stall_suppress == 3) \
290 rcu_cpu_stall_suppress = 0; \
291} while (0)
292
293#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
294
295static inline bool rcu_stall_is_suppressed(void)
296{
297 return rcu_stall_is_suppressed_at_boot();
298}
299#define rcu_ftrace_dump_stall_suppress()
300#define rcu_ftrace_dump_stall_unsuppress()
301#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
302
303/*
304 * Strings used in tracepoints need to be exported via the
305 * tracing system such that tools like perf and trace-cmd can
306 * translate the string address pointers to actual text.
307 */
308#define TPS(x) tracepoint_string(x)
309
310/*
311 * Dump the ftrace buffer, but only one time per callsite per boot.
312 */
313#define rcu_ftrace_dump(oops_dump_mode) \
314do { \
315 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
316 \
317 if (!atomic_read(&___rfd_beenhere) && \
318 !atomic_xchg(&___rfd_beenhere, 1)) { \
319 tracing_off(); \
320 rcu_ftrace_dump_stall_suppress(); \
321 ftrace_dump(oops_dump_mode); \
322 rcu_ftrace_dump_stall_unsuppress(); \
323 } \
324} while (0)
325
326void rcu_early_boot_tests(void);
327void rcu_test_sync_prims(void);
328
329/*
330 * This function really isn't for public consumption, but RCU is special in
331 * that context switches can allow the state machine to make progress.
332 */
333extern void resched_cpu(int cpu);
334
335#if !defined(CONFIG_TINY_RCU)
336
337#include <linux/rcu_node_tree.h>
338
339extern int rcu_num_lvls;
340extern int num_rcu_lvl[];
341extern int rcu_num_nodes;
342static bool rcu_fanout_exact;
343static int rcu_fanout_leaf;
344
345/*
346 * Compute the per-level fanout, either using the exact fanout specified
347 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
348 */
349static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
350{
351 int i;
352
353 for (i = 0; i < RCU_NUM_LVLS; i++)
354 levelspread[i] = INT_MIN;
355 if (rcu_fanout_exact) {
356 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
357 for (i = rcu_num_lvls - 2; i >= 0; i--)
358 levelspread[i] = RCU_FANOUT;
359 } else {
360 int ccur;
361 int cprv;
362
363 cprv = nr_cpu_ids;
364 for (i = rcu_num_lvls - 1; i >= 0; i--) {
365 ccur = levelcnt[i];
366 levelspread[i] = (cprv + ccur - 1) / ccur;
367 cprv = ccur;
368 }
369 }
370}
371
372extern void rcu_init_geometry(void);
373
374/* Returns a pointer to the first leaf rcu_node structure. */
375#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
376
377/* Is this rcu_node a leaf? */
378#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
379
380/* Is this rcu_node the last leaf? */
381#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
382
383/*
384 * Do a full breadth-first scan of the {s,}rcu_node structures for the
385 * specified state structure (for SRCU) or the only rcu_state structure
386 * (for RCU).
387 */
388#define _rcu_for_each_node_breadth_first(sp, rnp) \
389 for ((rnp) = &(sp)->node[0]; \
390 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
391#define rcu_for_each_node_breadth_first(rnp) \
392 _rcu_for_each_node_breadth_first(&rcu_state, rnp)
393#define srcu_for_each_node_breadth_first(ssp, rnp) \
394 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp)
395
396/*
397 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
398 * Note that if there is a singleton rcu_node tree with but one rcu_node
399 * structure, this loop -will- visit the rcu_node structure. It is still
400 * a leaf node, even if it is also the root node.
401 */
402#define rcu_for_each_leaf_node(rnp) \
403 for ((rnp) = rcu_first_leaf_node(); \
404 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
405
406/*
407 * Iterate over all possible CPUs in a leaf RCU node.
408 */
409#define for_each_leaf_node_possible_cpu(rnp, cpu) \
410 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
411 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
412 (cpu) <= rnp->grphi; \
413 (cpu) = cpumask_next((cpu), cpu_possible_mask))
414
415/*
416 * Iterate over all CPUs in a leaf RCU node's specified mask.
417 */
418#define rcu_find_next_bit(rnp, cpu, mask) \
419 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
420#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
421 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
422 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
423 (cpu) <= rnp->grphi; \
424 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
425
426#endif /* !defined(CONFIG_TINY_RCU) */
427
428#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
429
430/*
431 * Wrappers for the rcu_node::lock acquire and release.
432 *
433 * Because the rcu_nodes form a tree, the tree traversal locking will observe
434 * different lock values, this in turn means that an UNLOCK of one level
435 * followed by a LOCK of another level does not imply a full memory barrier;
436 * and most importantly transitivity is lost.
437 *
438 * In order to restore full ordering between tree levels, augment the regular
439 * lock acquire functions with smp_mb__after_unlock_lock().
440 *
441 * As ->lock of struct rcu_node is a __private field, therefore one should use
442 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
443 */
444#define raw_spin_lock_rcu_node(p) \
445do { \
446 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
447 smp_mb__after_unlock_lock(); \
448} while (0)
449
450#define raw_spin_unlock_rcu_node(p) \
451do { \
452 lockdep_assert_irqs_disabled(); \
453 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
454} while (0)
455
456#define raw_spin_lock_irq_rcu_node(p) \
457do { \
458 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
459 smp_mb__after_unlock_lock(); \
460} while (0)
461
462#define raw_spin_unlock_irq_rcu_node(p) \
463do { \
464 lockdep_assert_irqs_disabled(); \
465 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
466} while (0)
467
468#define raw_spin_lock_irqsave_rcu_node(p, flags) \
469do { \
470 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
471 smp_mb__after_unlock_lock(); \
472} while (0)
473
474#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
475do { \
476 lockdep_assert_irqs_disabled(); \
477 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
478} while (0)
479
480#define raw_spin_trylock_rcu_node(p) \
481({ \
482 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
483 \
484 if (___locked) \
485 smp_mb__after_unlock_lock(); \
486 ___locked; \
487})
488
489#define raw_lockdep_assert_held_rcu_node(p) \
490 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
491
492#endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
493
494#ifdef CONFIG_TINY_RCU
495/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
496static inline bool rcu_gp_is_normal(void) { return true; }
497static inline bool rcu_gp_is_expedited(void) { return false; }
498static inline bool rcu_async_should_hurry(void) { return false; }
499static inline void rcu_expedite_gp(void) { }
500static inline void rcu_unexpedite_gp(void) { }
501static inline void rcu_async_hurry(void) { }
502static inline void rcu_async_relax(void) { }
503static inline bool rcu_cpu_online(int cpu) { return true; }
504#else /* #ifdef CONFIG_TINY_RCU */
505bool rcu_gp_is_normal(void); /* Internal RCU use. */
506bool rcu_gp_is_expedited(void); /* Internal RCU use. */
507bool rcu_async_should_hurry(void); /* Internal RCU use. */
508void rcu_expedite_gp(void);
509void rcu_unexpedite_gp(void);
510void rcu_async_hurry(void);
511void rcu_async_relax(void);
512void rcupdate_announce_bootup_oddness(void);
513bool rcu_cpu_online(int cpu);
514#ifdef CONFIG_TASKS_RCU_GENERIC
515void show_rcu_tasks_gp_kthreads(void);
516#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
517static inline void show_rcu_tasks_gp_kthreads(void) {}
518#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
519#endif /* #else #ifdef CONFIG_TINY_RCU */
520
521#ifdef CONFIG_TASKS_RCU
522struct task_struct *get_rcu_tasks_gp_kthread(void);
523#endif // # ifdef CONFIG_TASKS_RCU
524
525#ifdef CONFIG_TASKS_RUDE_RCU
526struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
527#endif // # ifdef CONFIG_TASKS_RUDE_RCU
528
529#define RCU_SCHEDULER_INACTIVE 0
530#define RCU_SCHEDULER_INIT 1
531#define RCU_SCHEDULER_RUNNING 2
532
533enum rcutorture_type {
534 RCU_FLAVOR,
535 RCU_TASKS_FLAVOR,
536 RCU_TASKS_RUDE_FLAVOR,
537 RCU_TASKS_TRACING_FLAVOR,
538 RCU_TRIVIAL_FLAVOR,
539 SRCU_FLAVOR,
540 INVALID_RCU_FLAVOR
541};
542
543#if defined(CONFIG_RCU_LAZY)
544unsigned long rcu_lazy_get_jiffies_till_flush(void);
545void rcu_lazy_set_jiffies_till_flush(unsigned long j);
546#else
547static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
548static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
549#endif
550
551#if defined(CONFIG_TREE_RCU)
552void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
553 unsigned long *gp_seq);
554void do_trace_rcu_torture_read(const char *rcutorturename,
555 struct rcu_head *rhp,
556 unsigned long secs,
557 unsigned long c_old,
558 unsigned long c);
559void rcu_gp_set_torture_wait(int duration);
560#else
561static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
562 int *flags, unsigned long *gp_seq)
563{
564 *flags = 0;
565 *gp_seq = 0;
566}
567#ifdef CONFIG_RCU_TRACE
568void do_trace_rcu_torture_read(const char *rcutorturename,
569 struct rcu_head *rhp,
570 unsigned long secs,
571 unsigned long c_old,
572 unsigned long c);
573#else
574#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
575 do { } while (0)
576#endif
577static inline void rcu_gp_set_torture_wait(int duration) { }
578#endif
579
580#ifdef CONFIG_TINY_SRCU
581
582static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
583 struct srcu_struct *sp, int *flags,
584 unsigned long *gp_seq)
585{
586 if (test_type != SRCU_FLAVOR)
587 return;
588 *flags = 0;
589 *gp_seq = sp->srcu_idx;
590}
591
592#elif defined(CONFIG_TREE_SRCU)
593
594void srcutorture_get_gp_data(enum rcutorture_type test_type,
595 struct srcu_struct *sp, int *flags,
596 unsigned long *gp_seq);
597
598#endif
599
600#ifdef CONFIG_TINY_RCU
601static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
602static inline unsigned long rcu_get_gp_seq(void) { return 0; }
603static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
604static inline unsigned long
605srcu_batches_completed(struct srcu_struct *sp) { return 0; }
606static inline void rcu_force_quiescent_state(void) { }
607static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
608static inline void show_rcu_gp_kthreads(void) { }
609static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
610static inline void rcu_fwd_progress_check(unsigned long j) { }
611static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
612static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
613#else /* #ifdef CONFIG_TINY_RCU */
614bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
615unsigned long rcu_get_gp_seq(void);
616unsigned long rcu_exp_batches_completed(void);
617unsigned long srcu_batches_completed(struct srcu_struct *sp);
618bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
619void show_rcu_gp_kthreads(void);
620int rcu_get_gp_kthreads_prio(void);
621void rcu_fwd_progress_check(unsigned long j);
622void rcu_force_quiescent_state(void);
623extern struct workqueue_struct *rcu_gp_wq;
624#ifdef CONFIG_RCU_EXP_KTHREAD
625extern struct kthread_worker *rcu_exp_gp_kworker;
626extern struct kthread_worker *rcu_exp_par_gp_kworker;
627#else /* !CONFIG_RCU_EXP_KTHREAD */
628extern struct workqueue_struct *rcu_par_gp_wq;
629#endif /* CONFIG_RCU_EXP_KTHREAD */
630void rcu_gp_slow_register(atomic_t *rgssp);
631void rcu_gp_slow_unregister(atomic_t *rgssp);
632#endif /* #else #ifdef CONFIG_TINY_RCU */
633
634#ifdef CONFIG_RCU_NOCB_CPU
635void rcu_bind_current_to_nocb(void);
636#else
637static inline void rcu_bind_current_to_nocb(void) { }
638#endif
639
640#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
641void show_rcu_tasks_classic_gp_kthread(void);
642#else
643static inline void show_rcu_tasks_classic_gp_kthread(void) {}
644#endif
645#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
646void show_rcu_tasks_rude_gp_kthread(void);
647#else
648static inline void show_rcu_tasks_rude_gp_kthread(void) {}
649#endif
650#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
651void show_rcu_tasks_trace_gp_kthread(void);
652#else
653static inline void show_rcu_tasks_trace_gp_kthread(void) {}
654#endif
655
656#ifdef CONFIG_TINY_RCU
657static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
658#else
659bool rcu_cpu_beenfullyonline(int cpu);
660#endif
661
662#ifdef CONFIG_RCU_STALL_COMMON
663int rcu_stall_notifier_call_chain(unsigned long val, void *v);
664#else // #ifdef CONFIG_RCU_STALL_COMMON
665static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
666#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
667
668#endif /* __LINUX_RCU_H */
669

source code of linux/kernel/rcu/rcu.h