Warning: This file is not a C or C++ file. It does not have highlighting.
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
---|---|
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
4 | * |
5 | * Copyright IBM Corporation, 2008 |
6 | * |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
8 | * |
9 | * For detailed explanation of Read-Copy Update mechanism see - |
10 | * Documentation/RCU |
11 | */ |
12 | #ifndef __LINUX_TINY_H |
13 | #define __LINUX_TINY_H |
14 | |
15 | #include <asm/param.h> /* for HZ */ |
16 | |
17 | struct rcu_gp_oldstate { |
18 | unsigned long rgos_norm; |
19 | }; |
20 | |
21 | // Maximum number of rcu_gp_oldstate values corresponding to |
22 | // not-yet-completed RCU grace periods. |
23 | #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2 |
24 | |
25 | /* |
26 | * Are the two oldstate values the same? See the Tree RCU version for |
27 | * docbook header. |
28 | */ |
29 | static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1, |
30 | struct rcu_gp_oldstate *rgosp2) |
31 | { |
32 | return rgosp1->rgos_norm == rgosp2->rgos_norm; |
33 | } |
34 | |
35 | unsigned long get_state_synchronize_rcu(void); |
36 | |
37 | static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) |
38 | { |
39 | rgosp->rgos_norm = get_state_synchronize_rcu(); |
40 | } |
41 | |
42 | unsigned long start_poll_synchronize_rcu(void); |
43 | |
44 | static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) |
45 | { |
46 | rgosp->rgos_norm = start_poll_synchronize_rcu(); |
47 | } |
48 | |
49 | bool poll_state_synchronize_rcu(unsigned long oldstate); |
50 | |
51 | static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) |
52 | { |
53 | return poll_state_synchronize_rcu(rgosp->rgos_norm); |
54 | } |
55 | |
56 | static inline void cond_synchronize_rcu(unsigned long oldstate) |
57 | { |
58 | might_sleep(); |
59 | } |
60 | |
61 | static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) |
62 | { |
63 | cond_synchronize_rcu(rgosp->rgos_norm); |
64 | } |
65 | |
66 | static inline unsigned long start_poll_synchronize_rcu_expedited(void) |
67 | { |
68 | return start_poll_synchronize_rcu(); |
69 | } |
70 | |
71 | static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) |
72 | { |
73 | rgosp->rgos_norm = start_poll_synchronize_rcu_expedited(); |
74 | } |
75 | |
76 | static inline void cond_synchronize_rcu_expedited(unsigned long oldstate) |
77 | { |
78 | cond_synchronize_rcu(oldstate); |
79 | } |
80 | |
81 | static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) |
82 | { |
83 | cond_synchronize_rcu_expedited(rgosp->rgos_norm); |
84 | } |
85 | |
86 | extern void rcu_barrier(void); |
87 | |
88 | static inline void synchronize_rcu_expedited(void) |
89 | { |
90 | synchronize_rcu(); |
91 | } |
92 | |
93 | /* |
94 | * Add one more declaration of kvfree() here. It is |
95 | * not so straight forward to just include <linux/mm.h> |
96 | * where it is defined due to getting many compile |
97 | * errors caused by that include. |
98 | */ |
99 | extern void kvfree(const void *addr); |
100 | |
101 | static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr) |
102 | { |
103 | if (head) { |
104 | call_rcu(head, (rcu_callback_t) ((void *) head - ptr)); |
105 | return; |
106 | } |
107 | |
108 | // kvfree_rcu(one_arg) call. |
109 | might_sleep(); |
110 | synchronize_rcu(); |
111 | kvfree(ptr); |
112 | } |
113 | |
114 | #ifdef CONFIG_KASAN_GENERIC |
115 | void kvfree_call_rcu(struct rcu_head *head, void *ptr); |
116 | #else |
117 | static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr) |
118 | { |
119 | __kvfree_call_rcu(head, ptr); |
120 | } |
121 | #endif |
122 | |
123 | void rcu_qs(void); |
124 | |
125 | static inline void rcu_softirq_qs(void) |
126 | { |
127 | rcu_qs(); |
128 | } |
129 | |
130 | #define rcu_note_context_switch(preempt) \ |
131 | do { \ |
132 | rcu_qs(); \ |
133 | rcu_tasks_qs(current, (preempt)); \ |
134 | } while (0) |
135 | |
136 | static inline int rcu_needs_cpu(void) |
137 | { |
138 | return 0; |
139 | } |
140 | |
141 | static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } |
142 | |
143 | /* |
144 | * Take advantage of the fact that there is only one CPU, which |
145 | * allows us to ignore virtualization-based context switches. |
146 | */ |
147 | static inline void rcu_virt_note_context_switch(void) { } |
148 | static inline void rcu_cpu_stall_reset(void) { } |
149 | static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } |
150 | static inline void rcu_irq_exit_check_preempt(void) { } |
151 | static inline void exit_rcu(void) { } |
152 | static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
153 | { |
154 | return false; |
155 | } |
156 | static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } |
157 | void rcu_scheduler_starting(void); |
158 | static inline void rcu_end_inkernel_boot(void) { } |
159 | static inline bool rcu_inkernel_boot_has_ended(void) { return true; } |
160 | static inline bool rcu_is_watching(void) { return true; } |
161 | static inline void rcu_momentary_dyntick_idle(void) { } |
162 | static inline void kfree_rcu_scheduler_running(void) { } |
163 | static inline bool rcu_gp_might_be_stalled(void) { return false; } |
164 | |
165 | /* Avoid RCU read-side critical sections leaking across. */ |
166 | static inline void rcu_all_qs(void) { barrier(); } |
167 | |
168 | /* RCUtree hotplug events */ |
169 | #define rcutree_prepare_cpu NULL |
170 | #define rcutree_online_cpu NULL |
171 | #define rcutree_offline_cpu NULL |
172 | #define rcutree_dead_cpu NULL |
173 | #define rcutree_dying_cpu NULL |
174 | static inline void rcutree_report_cpu_starting(unsigned int cpu) { } |
175 | |
176 | #endif /* __LINUX_RCUTINY_H */ |
177 |
Warning: This file is not a C or C++ file. It does not have highlighting.