1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
4 | * Internal non-public definitions. |
5 | * |
6 | * Copyright IBM Corporation, 2008 |
7 | * |
8 | * Author: Ingo Molnar <mingo@elte.hu> |
9 | * Paul E. McKenney <paulmck@linux.ibm.com> |
10 | */ |
11 | |
12 | #include <linux/cache.h> |
13 | #include <linux/kthread.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/rtmutex.h> |
16 | #include <linux/threads.h> |
17 | #include <linux/cpumask.h> |
18 | #include <linux/seqlock.h> |
19 | #include <linux/swait.h> |
20 | #include <linux/rcu_node_tree.h> |
21 | |
22 | #include "rcu_segcblist.h" |
23 | |
24 | /* Communicate arguments to a workqueue handler. */ |
25 | struct rcu_exp_work { |
26 | unsigned long rew_s; |
27 | #ifdef CONFIG_RCU_EXP_KTHREAD |
28 | struct kthread_work rew_work; |
29 | #else |
30 | struct work_struct rew_work; |
31 | #endif /* CONFIG_RCU_EXP_KTHREAD */ |
32 | }; |
33 | |
34 | /* RCU's kthread states for tracing. */ |
35 | #define RCU_KTHREAD_STOPPED 0 |
36 | #define RCU_KTHREAD_RUNNING 1 |
37 | #define RCU_KTHREAD_WAITING 2 |
38 | #define RCU_KTHREAD_OFFCPU 3 |
39 | #define RCU_KTHREAD_YIELDING 4 |
40 | #define RCU_KTHREAD_MAX 4 |
41 | |
42 | /* |
43 | * Definition for node within the RCU grace-period-detection hierarchy. |
44 | */ |
45 | struct rcu_node { |
46 | raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ |
47 | /* some rcu_state fields as well as */ |
48 | /* following. */ |
49 | unsigned long gp_seq; /* Track rsp->gp_seq. */ |
50 | unsigned long gp_seq_needed; /* Track furthest future GP request. */ |
51 | unsigned long completedqs; /* All QSes done for this node. */ |
52 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
53 | /* order for current grace period to proceed.*/ |
54 | /* In leaf rcu_node, each bit corresponds to */ |
55 | /* an rcu_data structure, otherwise, each */ |
56 | /* bit corresponds to a child rcu_node */ |
57 | /* structure. */ |
58 | unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */ |
59 | unsigned long qsmaskinit; |
60 | /* Per-GP initial value for qsmask. */ |
61 | /* Initialized from ->qsmaskinitnext at the */ |
62 | /* beginning of each grace period. */ |
63 | unsigned long qsmaskinitnext; |
64 | unsigned long expmask; /* CPUs or groups that need to check in */ |
65 | /* to allow the current expedited GP */ |
66 | /* to complete. */ |
67 | unsigned long expmaskinit; |
68 | /* Per-GP initial values for expmask. */ |
69 | /* Initialized from ->expmaskinitnext at the */ |
70 | /* beginning of each expedited GP. */ |
71 | unsigned long expmaskinitnext; |
72 | /* Online CPUs for next expedited GP. */ |
73 | /* Any CPU that has ever been online will */ |
74 | /* have its bit set. */ |
75 | unsigned long cbovldmask; |
76 | /* CPUs experiencing callback overload. */ |
77 | unsigned long ffmask; /* Fully functional CPUs. */ |
78 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
79 | /* Only one bit will be set in this mask. */ |
80 | int grplo; /* lowest-numbered CPU here. */ |
81 | int grphi; /* highest-numbered CPU here. */ |
82 | u8 grpnum; /* group number for next level up. */ |
83 | u8 level; /* root is at level 0. */ |
84 | bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ |
85 | /* exit RCU read-side critical sections */ |
86 | /* before propagating offline up the */ |
87 | /* rcu_node tree? */ |
88 | struct rcu_node *parent; |
89 | struct list_head blkd_tasks; |
90 | /* Tasks blocked in RCU read-side critical */ |
91 | /* section. Tasks are placed at the head */ |
92 | /* of this list and age towards the tail. */ |
93 | struct list_head *gp_tasks; |
94 | /* Pointer to the first task blocking the */ |
95 | /* current grace period, or NULL if there */ |
96 | /* is no such task. */ |
97 | struct list_head *exp_tasks; |
98 | /* Pointer to the first task blocking the */ |
99 | /* current expedited grace period, or NULL */ |
100 | /* if there is no such task. If there */ |
101 | /* is no current expedited grace period, */ |
102 | /* then there can cannot be any such task. */ |
103 | struct list_head *boost_tasks; |
104 | /* Pointer to first task that needs to be */ |
105 | /* priority boosted, or NULL if no priority */ |
106 | /* boosting is needed for this rcu_node */ |
107 | /* structure. If there are no tasks */ |
108 | /* queued on this rcu_node structure that */ |
109 | /* are blocking the current grace period, */ |
110 | /* there can be no such task. */ |
111 | struct rt_mutex boost_mtx; |
112 | /* Used only for the priority-boosting */ |
113 | /* side effect, not as a lock. */ |
114 | unsigned long boost_time; |
115 | /* When to start boosting (jiffies). */ |
116 | struct mutex boost_kthread_mutex; |
117 | /* Exclusion for thread spawning and affinity */ |
118 | /* manipulation. */ |
119 | struct task_struct *boost_kthread_task; |
120 | /* kthread that takes care of priority */ |
121 | /* boosting for this rcu_node structure. */ |
122 | unsigned int boost_kthread_status; |
123 | /* State of boost_kthread_task for tracing. */ |
124 | unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */ |
125 | #ifdef CONFIG_RCU_NOCB_CPU |
126 | struct swait_queue_head nocb_gp_wq[2]; |
127 | /* Place for rcu_nocb_kthread() to wait GP. */ |
128 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
129 | raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; |
130 | |
131 | spinlock_t exp_lock ____cacheline_internodealigned_in_smp; |
132 | unsigned long exp_seq_rq; |
133 | wait_queue_head_t exp_wq[4]; |
134 | struct rcu_exp_work rew; |
135 | bool exp_need_flush; /* Need to flush workitem? */ |
136 | raw_spinlock_t exp_poll_lock; |
137 | /* Lock and data for polled expedited grace periods. */ |
138 | unsigned long exp_seq_poll_rq; |
139 | struct work_struct exp_poll_wq; |
140 | } ____cacheline_internodealigned_in_smp; |
141 | |
142 | /* |
143 | * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and |
144 | * are indexed relative to this interval rather than the global CPU ID space. |
145 | * This generates the bit for a CPU in node-local masks. |
146 | */ |
147 | #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) |
148 | |
149 | /* |
150 | * Union to allow "aggregate OR" operation on the need for a quiescent |
151 | * state by the normal and expedited grace periods. |
152 | */ |
153 | union rcu_noqs { |
154 | struct { |
155 | u8 norm; |
156 | u8 exp; |
157 | } b; /* Bits. */ |
158 | u16 s; /* Set of bits, aggregate OR here. */ |
159 | }; |
160 | |
161 | /* |
162 | * Record the snapshot of the core stats at half of the first RCU stall timeout. |
163 | * The member gp_seq is used to ensure that all members are updated only once |
164 | * during the sampling period. The snapshot is taken only if this gp_seq is not |
165 | * equal to rdp->gp_seq. |
166 | */ |
167 | struct rcu_snap_record { |
168 | unsigned long gp_seq; /* Track rdp->gp_seq counter */ |
169 | u64 cputime_irq; /* Accumulated cputime of hard irqs */ |
170 | u64 cputime_softirq;/* Accumulated cputime of soft irqs */ |
171 | u64 cputime_system; /* Accumulated cputime of kernel tasks */ |
172 | unsigned long nr_hardirqs; /* Accumulated number of hard irqs */ |
173 | unsigned int nr_softirqs; /* Accumulated number of soft irqs */ |
174 | unsigned long long nr_csw; /* Accumulated number of task switches */ |
175 | unsigned long jiffies; /* Track jiffies value */ |
176 | }; |
177 | |
178 | /* Per-CPU data for read-copy update. */ |
179 | struct rcu_data { |
180 | /* 1) quiescent-state and grace-period handling : */ |
181 | unsigned long gp_seq; /* Track rsp->gp_seq counter. */ |
182 | unsigned long gp_seq_needed; /* Track furthest future GP request. */ |
183 | union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ |
184 | bool core_needs_qs; /* Core waits for quiescent state. */ |
185 | bool beenonline; /* CPU online at least once. */ |
186 | bool gpwrap; /* Possible ->gp_seq wrap. */ |
187 | bool cpu_started; /* RCU watching this onlining CPU. */ |
188 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
189 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
190 | unsigned long ticks_this_gp; /* The number of scheduling-clock */ |
191 | /* ticks this CPU has handled */ |
192 | /* during and after the last grace */ |
193 | /* period it is aware of. */ |
194 | struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ |
195 | bool defer_qs_iw_pending; /* Scheduler attention pending? */ |
196 | struct work_struct strict_work; /* Schedule readers for strict GPs. */ |
197 | |
198 | /* 2) batch handling */ |
199 | struct rcu_segcblist cblist; /* Segmented callback list, with */ |
200 | /* different callbacks waiting for */ |
201 | /* different grace periods. */ |
202 | long qlen_last_fqs_check; |
203 | /* qlen at last check for QS forcing */ |
204 | unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */ |
205 | unsigned long n_force_qs_snap; |
206 | /* did other CPU force QS recently? */ |
207 | long blimit; /* Upper limit on a processed batch */ |
208 | |
209 | /* 3) dynticks interface. */ |
210 | int dynticks_snap; /* Per-GP tracking for dynticks. */ |
211 | bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ |
212 | bool rcu_urgent_qs; /* GP old need light quiescent state. */ |
213 | bool rcu_forced_tick; /* Forced tick to provide QS. */ |
214 | bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ |
215 | |
216 | /* 4) rcu_barrier(), OOM callbacks, and expediting. */ |
217 | unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */ |
218 | struct rcu_head barrier_head; |
219 | int exp_dynticks_snap; /* Double-check need for IPI. */ |
220 | |
221 | /* 5) Callback offloading. */ |
222 | #ifdef CONFIG_RCU_NOCB_CPU |
223 | struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ |
224 | struct swait_queue_head nocb_state_wq; /* For offloading state changes */ |
225 | struct task_struct *nocb_gp_kthread; |
226 | raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ |
227 | atomic_t nocb_lock_contended; /* Contention experienced. */ |
228 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ |
229 | struct timer_list nocb_timer; /* Enforce finite deferral. */ |
230 | unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ |
231 | struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */ |
232 | /* spawning */ |
233 | |
234 | /* The following fields are used by call_rcu, hence own cacheline. */ |
235 | raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp; |
236 | struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */ |
237 | unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */ |
238 | unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */ |
239 | int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */ |
240 | |
241 | /* The following fields are used by GP kthread, hence own cacheline. */ |
242 | raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp; |
243 | u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */ |
244 | u8 nocb_gp_bypass; /* Found a bypass on last scan? */ |
245 | u8 nocb_gp_gp; /* GP to wait for on last scan? */ |
246 | unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */ |
247 | unsigned long nocb_gp_loops; /* # passes through wait code. */ |
248 | struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */ |
249 | bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */ |
250 | struct task_struct *nocb_cb_kthread; |
251 | struct list_head nocb_head_rdp; /* |
252 | * Head of rcu_data list in wakeup chain, |
253 | * if rdp_gp. |
254 | */ |
255 | struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */ |
256 | struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */ |
257 | |
258 | /* The following fields are used by CB kthread, hence new cacheline. */ |
259 | struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; |
260 | /* GP rdp takes GP-end wakeups. */ |
261 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
262 | |
263 | /* 6) RCU priority boosting. */ |
264 | struct task_struct *rcu_cpu_kthread_task; |
265 | /* rcuc per-CPU kthread or NULL. */ |
266 | unsigned int rcu_cpu_kthread_status; |
267 | char rcu_cpu_has_work; |
268 | unsigned long rcuc_activity; |
269 | |
270 | /* 7) Diagnostic data, including RCU CPU stall warnings. */ |
271 | unsigned int softirq_snap; /* Snapshot of softirq activity. */ |
272 | /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ |
273 | struct irq_work rcu_iw; /* Check for non-irq activity. */ |
274 | bool rcu_iw_pending; /* Is ->rcu_iw pending? */ |
275 | unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ |
276 | unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */ |
277 | short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */ |
278 | unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ |
279 | short rcu_onl_gp_flags; /* ->gp_flags at last online. */ |
280 | unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ |
281 | unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */ |
282 | struct rcu_snap_record snap_record; /* Snapshot of core stats at half of */ |
283 | /* the first RCU stall timeout */ |
284 | |
285 | long lazy_len; /* Length of buffered lazy callbacks. */ |
286 | int cpu; |
287 | }; |
288 | |
289 | /* Values for nocb_defer_wakeup field in struct rcu_data. */ |
290 | #define RCU_NOCB_WAKE_NOT 0 |
291 | #define RCU_NOCB_WAKE_BYPASS 1 |
292 | #define RCU_NOCB_WAKE_LAZY 2 |
293 | #define RCU_NOCB_WAKE 3 |
294 | #define RCU_NOCB_WAKE_FORCE 4 |
295 | |
296 | #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) |
297 | /* For jiffies_till_first_fqs and */ |
298 | /* and jiffies_till_next_fqs. */ |
299 | |
300 | #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ |
301 | /* delay between bouts of */ |
302 | /* quiescent-state forcing. */ |
303 | |
304 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ |
305 | /* at least one scheduling clock */ |
306 | /* irq before ratting on them. */ |
307 | |
308 | #define rcu_wait(cond) \ |
309 | do { \ |
310 | for (;;) { \ |
311 | set_current_state(TASK_INTERRUPTIBLE); \ |
312 | if (cond) \ |
313 | break; \ |
314 | schedule(); \ |
315 | } \ |
316 | __set_current_state(TASK_RUNNING); \ |
317 | } while (0) |
318 | |
319 | /* |
320 | * RCU global state, including node hierarchy. This hierarchy is |
321 | * represented in "heap" form in a dense array. The root (first level) |
322 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second |
323 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), |
324 | * and the third level in ->node[m+1] and following (->node[m+1] referenced |
325 | * by ->level[2]). The number of levels is determined by the number of |
326 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" |
327 | * consisting of a single rcu_node. |
328 | */ |
329 | struct rcu_state { |
330 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ |
331 | struct rcu_node *level[RCU_NUM_LVLS + 1]; |
332 | /* Hierarchy levels (+1 to */ |
333 | /* shut bogus gcc warning) */ |
334 | int ncpus; /* # CPUs seen so far. */ |
335 | int n_online_cpus; /* # CPUs online for RCU. */ |
336 | |
337 | /* The following fields are guarded by the root rcu_node's lock. */ |
338 | |
339 | unsigned long gp_seq ____cacheline_internodealigned_in_smp; |
340 | /* Grace-period sequence #. */ |
341 | unsigned long gp_max; /* Maximum GP duration in */ |
342 | /* jiffies. */ |
343 | struct task_struct *gp_kthread; /* Task for grace periods. */ |
344 | struct swait_queue_head gp_wq; /* Where GP task waits. */ |
345 | short gp_flags; /* Commands for GP task. */ |
346 | short gp_state; /* GP kthread sleep state. */ |
347 | unsigned long gp_wake_time; /* Last GP kthread wake. */ |
348 | unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */ |
349 | unsigned long gp_seq_polled; /* GP seq for polled API. */ |
350 | unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */ |
351 | unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */ |
352 | |
353 | /* End of fields guarded by root rcu_node's lock. */ |
354 | |
355 | struct mutex barrier_mutex; /* Guards barrier fields. */ |
356 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
357 | struct completion barrier_completion; /* Wake at barrier end. */ |
358 | unsigned long barrier_sequence; /* ++ at start and end of */ |
359 | /* rcu_barrier(). */ |
360 | /* End of fields guarded by barrier_mutex. */ |
361 | |
362 | raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */ |
363 | |
364 | struct mutex exp_mutex; /* Serialize expedited GP. */ |
365 | struct mutex exp_wake_mutex; /* Serialize wakeup. */ |
366 | unsigned long expedited_sequence; /* Take a ticket. */ |
367 | atomic_t expedited_need_qs; /* # CPUs left to check in. */ |
368 | struct swait_queue_head expedited_wq; /* Wait for check-ins. */ |
369 | int ncpus_snap; /* # CPUs seen last time. */ |
370 | u8 cbovld; /* Callback overload now? */ |
371 | u8 cbovldnext; /* ^ ^ next time? */ |
372 | |
373 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
374 | /* force_quiescent_state(). */ |
375 | unsigned long jiffies_kick_kthreads; /* Time at which to kick */ |
376 | /* kthreads, if configured. */ |
377 | unsigned long n_force_qs; /* Number of calls to */ |
378 | /* force_quiescent_state(). */ |
379 | unsigned long gp_start; /* Time at which GP started, */ |
380 | /* but in jiffies. */ |
381 | unsigned long gp_end; /* Time last GP ended, again */ |
382 | /* in jiffies. */ |
383 | unsigned long gp_activity; /* Time of last GP kthread */ |
384 | /* activity in jiffies. */ |
385 | unsigned long gp_req_activity; /* Time of last GP request */ |
386 | /* in jiffies. */ |
387 | unsigned long jiffies_stall; /* Time at which to check */ |
388 | /* for CPU stalls. */ |
389 | int nr_fqs_jiffies_stall; /* Number of fqs loops after |
390 | * which read jiffies and set |
391 | * jiffies_stall. Stall |
392 | * warnings disabled if !0. */ |
393 | unsigned long jiffies_resched; /* Time at which to resched */ |
394 | /* a reluctant CPU. */ |
395 | unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ |
396 | /* GP start. */ |
397 | const char *name; /* Name of structure. */ |
398 | char abbr; /* Abbreviated name. */ |
399 | |
400 | arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; |
401 | /* Synchronize offline with */ |
402 | /* GP pre-initialization. */ |
403 | int nocb_is_setup; /* nocb is setup from boot */ |
404 | }; |
405 | |
406 | /* Values for rcu_state structure's gp_flags field. */ |
407 | #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ |
408 | #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ |
409 | #define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */ |
410 | |
411 | /* Values for rcu_state structure's gp_state field. */ |
412 | #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ |
413 | #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ |
414 | #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ |
415 | #define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */ |
416 | #define RCU_GP_INIT 4 /* Grace-period initialization. */ |
417 | #define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */ |
418 | #define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */ |
419 | #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */ |
420 | #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */ |
421 | |
422 | /* |
423 | * In order to export the rcu_state name to the tracing tools, it |
424 | * needs to be added in the __tracepoint_string section. |
425 | * This requires defining a separate variable tp_<sname>_varname |
426 | * that points to the string being used, and this will allow |
427 | * the tracing userspace tools to be able to decipher the string |
428 | * address to the matching string. |
429 | */ |
430 | #ifdef CONFIG_PREEMPT_RCU |
431 | #define RCU_ABBR 'p' |
432 | #define RCU_NAME_RAW "rcu_preempt" |
433 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
434 | #define RCU_ABBR 's' |
435 | #define RCU_NAME_RAW "rcu_sched" |
436 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
437 | #ifndef CONFIG_TRACING |
438 | #define RCU_NAME RCU_NAME_RAW |
439 | #else /* #ifdef CONFIG_TRACING */ |
440 | static char rcu_name[] = RCU_NAME_RAW; |
441 | static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; |
442 | #define RCU_NAME rcu_name |
443 | #endif /* #else #ifdef CONFIG_TRACING */ |
444 | |
445 | /* Forward declarations for tree_plugin.h */ |
446 | static void rcu_bootup_announce(void); |
447 | static void rcu_qs(void); |
448 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
449 | #ifdef CONFIG_HOTPLUG_CPU |
450 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
451 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
452 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); |
453 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
454 | static void rcu_flavor_sched_clock_irq(int user); |
455 | static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); |
456 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
457 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
458 | static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); |
459 | static void rcu_cpu_kthread_setup(unsigned int cpu); |
460 | static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); |
461 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
462 | static bool rcu_preempt_need_deferred_qs(struct task_struct *t); |
463 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); |
464 | static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); |
465 | static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); |
466 | static void rcu_init_one_nocb(struct rcu_node *rnp); |
467 | static bool wake_nocb_gp(struct rcu_data *rdp, bool force); |
468 | static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
469 | unsigned long j, bool lazy); |
470 | static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
471 | bool *was_alldone, unsigned long flags, |
472 | bool lazy); |
473 | static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, |
474 | unsigned long flags); |
475 | static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); |
476 | static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); |
477 | static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); |
478 | static void rcu_spawn_cpu_nocb_kthread(int cpu); |
479 | static void show_rcu_nocb_state(struct rcu_data *rdp); |
480 | static void rcu_nocb_lock(struct rcu_data *rdp); |
481 | static void rcu_nocb_unlock(struct rcu_data *rdp); |
482 | static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, |
483 | unsigned long flags); |
484 | static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); |
485 | #ifdef CONFIG_RCU_NOCB_CPU |
486 | static void __init rcu_organize_nocb_kthreads(void); |
487 | |
488 | /* |
489 | * Disable IRQs before checking offloaded state so that local |
490 | * locking is safe against concurrent de-offloading. |
491 | */ |
492 | #define rcu_nocb_lock_irqsave(rdp, flags) \ |
493 | do { \ |
494 | local_irq_save(flags); \ |
495 | if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ |
496 | raw_spin_lock(&(rdp)->nocb_lock); \ |
497 | } while (0) |
498 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
499 | #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) |
500 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
501 | |
502 | static void rcu_bind_gp_kthread(void); |
503 | static bool rcu_nohz_full_cpu(void); |
504 | |
505 | /* Forward declarations for tree_stall.h */ |
506 | static void record_gp_stall_check_time(void); |
507 | static void rcu_iw_handler(struct irq_work *iwp); |
508 | static void check_cpu_stall(struct rcu_data *rdp); |
509 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
510 | const unsigned long gpssdelay); |
511 | |
512 | /* Forward declarations for tree_exp.h. */ |
513 | static void sync_rcu_do_polled_gp(struct work_struct *wp); |
514 | |