1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_CONTEXT_TRACKING_H |
3 | #define _LINUX_CONTEXT_TRACKING_H |
4 | |
5 | #include <linux/sched.h> |
6 | #include <linux/vtime.h> |
7 | #include <linux/context_tracking_state.h> |
8 | #include <linux/instrumentation.h> |
9 | |
10 | #include <asm/ptrace.h> |
11 | |
12 | |
13 | #ifdef CONFIG_CONTEXT_TRACKING_USER |
14 | extern void ct_cpu_track_user(int cpu); |
15 | |
16 | /* Called with interrupts disabled. */ |
17 | extern void __ct_user_enter(enum ctx_state state); |
18 | extern void __ct_user_exit(enum ctx_state state); |
19 | |
20 | extern void ct_user_enter(enum ctx_state state); |
21 | extern void ct_user_exit(enum ctx_state state); |
22 | |
23 | extern void user_enter_callable(void); |
24 | extern void user_exit_callable(void); |
25 | |
26 | static inline void user_enter(void) |
27 | { |
28 | if (context_tracking_enabled()) |
29 | ct_user_enter(CONTEXT_USER); |
30 | |
31 | } |
32 | static inline void user_exit(void) |
33 | { |
34 | if (context_tracking_enabled()) |
35 | ct_user_exit(CONTEXT_USER); |
36 | } |
37 | |
38 | /* Called with interrupts disabled. */ |
39 | static __always_inline void user_enter_irqoff(void) |
40 | { |
41 | if (context_tracking_enabled()) |
42 | __ct_user_enter(CONTEXT_USER); |
43 | |
44 | } |
45 | static __always_inline void user_exit_irqoff(void) |
46 | { |
47 | if (context_tracking_enabled()) |
48 | __ct_user_exit(CONTEXT_USER); |
49 | } |
50 | |
51 | static inline enum ctx_state exception_enter(void) |
52 | { |
53 | enum ctx_state prev_ctx; |
54 | |
55 | if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) || |
56 | !context_tracking_enabled()) |
57 | return 0; |
58 | |
59 | prev_ctx = __ct_state(); |
60 | if (prev_ctx != CONTEXT_KERNEL) |
61 | ct_user_exit(prev_ctx); |
62 | |
63 | return prev_ctx; |
64 | } |
65 | |
66 | static inline void exception_exit(enum ctx_state prev_ctx) |
67 | { |
68 | if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && |
69 | context_tracking_enabled()) { |
70 | if (prev_ctx != CONTEXT_KERNEL) |
71 | ct_user_enter(prev_ctx); |
72 | } |
73 | } |
74 | |
75 | static __always_inline bool context_tracking_guest_enter(void) |
76 | { |
77 | if (context_tracking_enabled()) |
78 | __ct_user_enter(CONTEXT_GUEST); |
79 | |
80 | return context_tracking_enabled_this_cpu(); |
81 | } |
82 | |
83 | static __always_inline void context_tracking_guest_exit(void) |
84 | { |
85 | if (context_tracking_enabled()) |
86 | __ct_user_exit(CONTEXT_GUEST); |
87 | } |
88 | |
89 | #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) |
90 | |
91 | #else |
92 | static inline void user_enter(void) { } |
93 | static inline void user_exit(void) { } |
94 | static inline void user_enter_irqoff(void) { } |
95 | static inline void user_exit_irqoff(void) { } |
96 | static inline int exception_enter(void) { return 0; } |
97 | static inline void exception_exit(enum ctx_state prev_ctx) { } |
98 | static inline int ct_state(void) { return -1; } |
99 | static inline int __ct_state(void) { return -1; } |
100 | static __always_inline bool context_tracking_guest_enter(void) { return false; } |
101 | static __always_inline void context_tracking_guest_exit(void) { } |
102 | #define CT_WARN_ON(cond) do { } while (0) |
103 | #endif /* !CONFIG_CONTEXT_TRACKING_USER */ |
104 | |
105 | #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE |
106 | extern void context_tracking_init(void); |
107 | #else |
108 | static inline void context_tracking_init(void) { } |
109 | #endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ |
110 | |
111 | #ifdef CONFIG_CONTEXT_TRACKING_IDLE |
112 | extern void ct_idle_enter(void); |
113 | extern void ct_idle_exit(void); |
114 | |
115 | /* |
116 | * Is the current CPU in an extended quiescent state? |
117 | * |
118 | * No ordering, as we are sampling CPU-local information. |
119 | */ |
120 | static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) |
121 | { |
122 | return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); |
123 | } |
124 | |
125 | /* |
126 | * Increment the current CPU's context_tracking structure's ->state field |
127 | * with ordering. Return the new value. |
128 | */ |
129 | static __always_inline unsigned long ct_state_inc(int incby) |
130 | { |
131 | return raw_atomic_add_return(i: incby, this_cpu_ptr(&context_tracking.state)); |
132 | } |
133 | |
134 | static __always_inline bool warn_rcu_enter(void) |
135 | { |
136 | bool ret = false; |
137 | |
138 | /* |
139 | * Horrible hack to shut up recursive RCU isn't watching fail since |
140 | * lots of the actual reporting also relies on RCU. |
141 | */ |
142 | preempt_disable_notrace(); |
143 | if (rcu_dynticks_curr_cpu_in_eqs()) { |
144 | ret = true; |
145 | ct_state_inc(RCU_DYNTICKS_IDX); |
146 | } |
147 | |
148 | return ret; |
149 | } |
150 | |
151 | static __always_inline void warn_rcu_exit(bool rcu) |
152 | { |
153 | if (rcu) |
154 | ct_state_inc(RCU_DYNTICKS_IDX); |
155 | preempt_enable_notrace(); |
156 | } |
157 | |
158 | #else |
159 | static inline void ct_idle_enter(void) { } |
160 | static inline void ct_idle_exit(void) { } |
161 | |
162 | static __always_inline bool warn_rcu_enter(void) { return false; } |
163 | static __always_inline void warn_rcu_exit(bool rcu) { } |
164 | #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */ |
165 | |
166 | #endif |
167 | |