1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
4 | * |
5 | * Copyright IBM Corporation, 2008 |
6 | * |
7 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
8 | * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm |
9 | * |
10 | * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> |
11 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
12 | * |
13 | * For detailed explanation of Read-Copy Update mechanism see - |
14 | * Documentation/RCU |
15 | */ |
16 | |
17 | #ifndef __LINUX_RCUTREE_H |
18 | #define __LINUX_RCUTREE_H |
19 | |
20 | void rcu_softirq_qs(void); |
21 | void rcu_note_context_switch(bool preempt); |
22 | int rcu_needs_cpu(void); |
23 | void rcu_cpu_stall_reset(void); |
24 | void rcu_request_urgent_qs_task(struct task_struct *t); |
25 | |
26 | /* |
27 | * Note a virtualization-based context switch. This is simply a |
28 | * wrapper around rcu_note_context_switch(), which allows TINY_RCU |
29 | * to save a few bytes. The caller must have disabled interrupts. |
30 | */ |
31 | static inline void rcu_virt_note_context_switch(void) |
32 | { |
33 | rcu_note_context_switch(preempt: false); |
34 | } |
35 | |
36 | void synchronize_rcu_expedited(void); |
37 | void kvfree_call_rcu(struct rcu_head *head, void *ptr); |
38 | |
39 | void rcu_barrier(void); |
40 | void rcu_momentary_dyntick_idle(void); |
41 | void kfree_rcu_scheduler_running(void); |
42 | bool rcu_gp_might_be_stalled(void); |
43 | |
44 | struct rcu_gp_oldstate { |
45 | unsigned long rgos_norm; |
46 | unsigned long rgos_exp; |
47 | }; |
48 | |
49 | // Maximum number of rcu_gp_oldstate values corresponding to |
50 | // not-yet-completed RCU grace periods. |
51 | #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4 |
52 | |
53 | /** |
54 | * same_state_synchronize_rcu_full - Are two old-state values identical? |
55 | * @rgosp1: First old-state value. |
56 | * @rgosp2: Second old-state value. |
57 | * |
58 | * The two old-state values must have been obtained from either |
59 | * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), |
60 | * or get_completed_synchronize_rcu_full(). Returns @true if the two |
61 | * values are identical and @false otherwise. This allows structures |
62 | * whose lifetimes are tracked by old-state values to push these values |
63 | * to a list header, allowing those structures to be slightly smaller. |
64 | * |
65 | * Note that equality is judged on a bitwise basis, so that an |
66 | * @rcu_gp_oldstate structure with an already-completed state in one field |
67 | * will compare not-equal to a structure with an already-completed state |
68 | * in the other field. After all, the @rcu_gp_oldstate structure is opaque |
69 | * so how did such a situation come to pass in the first place? |
70 | */ |
71 | static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1, |
72 | struct rcu_gp_oldstate *rgosp2) |
73 | { |
74 | return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp; |
75 | } |
76 | |
77 | unsigned long start_poll_synchronize_rcu_expedited(void); |
78 | void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp); |
79 | void cond_synchronize_rcu_expedited(unsigned long oldstate); |
80 | void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp); |
81 | unsigned long get_state_synchronize_rcu(void); |
82 | void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
83 | unsigned long start_poll_synchronize_rcu(void); |
84 | void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
85 | bool poll_state_synchronize_rcu(unsigned long oldstate); |
86 | bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
87 | void cond_synchronize_rcu(unsigned long oldstate); |
88 | void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
89 | |
90 | #ifdef CONFIG_PROVE_RCU |
91 | void rcu_irq_exit_check_preempt(void); |
92 | #else |
93 | static inline void rcu_irq_exit_check_preempt(void) { } |
94 | #endif |
95 | |
96 | struct task_struct; |
97 | void rcu_preempt_deferred_qs(struct task_struct *t); |
98 | |
99 | void exit_rcu(void); |
100 | |
101 | void rcu_scheduler_starting(void); |
102 | extern int rcu_scheduler_active; |
103 | void rcu_end_inkernel_boot(void); |
104 | bool rcu_inkernel_boot_has_ended(void); |
105 | bool rcu_is_watching(void); |
106 | #ifndef CONFIG_PREEMPTION |
107 | void rcu_all_qs(void); |
108 | #endif |
109 | |
110 | /* RCUtree hotplug events */ |
111 | int rcutree_prepare_cpu(unsigned int cpu); |
112 | int rcutree_online_cpu(unsigned int cpu); |
113 | void rcutree_report_cpu_starting(unsigned int cpu); |
114 | |
115 | #ifdef CONFIG_HOTPLUG_CPU |
116 | int rcutree_dead_cpu(unsigned int cpu); |
117 | int rcutree_dying_cpu(unsigned int cpu); |
118 | int rcutree_offline_cpu(unsigned int cpu); |
119 | #else |
120 | #define rcutree_dead_cpu NULL |
121 | #define rcutree_dying_cpu NULL |
122 | #define rcutree_offline_cpu NULL |
123 | #endif |
124 | |
125 | void rcutree_migrate_callbacks(int cpu); |
126 | |
127 | /* Called from hotplug and also arm64 early secondary boot failure */ |
128 | void rcutree_report_cpu_dead(void); |
129 | |
130 | #endif /* __LINUX_RCUTREE_H */ |
131 | |