1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #undef TRACE_SYSTEM |
3 | #define TRACE_SYSTEM irq_vectors |
4 | |
5 | #if !defined(_TRACE_IRQ_VECTORS_H) || defined(TRACE_HEADER_MULTI_READ) |
6 | #define _TRACE_IRQ_VECTORS_H |
7 | |
8 | #include <linux/tracepoint.h> |
9 | #include <asm/trace/common.h> |
10 | |
11 | #ifdef CONFIG_X86_LOCAL_APIC |
12 | |
13 | DECLARE_EVENT_CLASS(x86_irq_vector, |
14 | |
15 | TP_PROTO(int vector), |
16 | |
17 | TP_ARGS(vector), |
18 | |
19 | TP_STRUCT__entry( |
20 | __field( int, vector ) |
21 | ), |
22 | |
23 | TP_fast_assign( |
24 | __entry->vector = vector; |
25 | ), |
26 | |
27 | TP_printk("vector=%d" , __entry->vector) ); |
28 | |
29 | #define DEFINE_IRQ_VECTOR_EVENT(name) \ |
30 | DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \ |
31 | TP_PROTO(int vector), \ |
32 | TP_ARGS(vector), NULL, NULL); \ |
33 | DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \ |
34 | TP_PROTO(int vector), \ |
35 | TP_ARGS(vector), NULL, NULL); |
36 | |
37 | /* |
38 | * local_timer - called when entering/exiting a local timer interrupt |
39 | * vector handler |
40 | */ |
41 | DEFINE_IRQ_VECTOR_EVENT(local_timer); |
42 | |
43 | /* |
44 | * spurious_apic - called when entering/exiting a spurious apic vector handler |
45 | */ |
46 | DEFINE_IRQ_VECTOR_EVENT(spurious_apic); |
47 | |
48 | /* |
49 | * error_apic - called when entering/exiting an error apic vector handler |
50 | */ |
51 | DEFINE_IRQ_VECTOR_EVENT(error_apic); |
52 | |
53 | /* |
54 | * x86_platform_ipi - called when entering/exiting a x86 platform ipi interrupt |
55 | * vector handler |
56 | */ |
57 | DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi); |
58 | |
59 | #ifdef CONFIG_IRQ_WORK |
60 | /* |
61 | * irq_work - called when entering/exiting a irq work interrupt |
62 | * vector handler |
63 | */ |
64 | DEFINE_IRQ_VECTOR_EVENT(irq_work); |
65 | |
66 | /* |
67 | * We must dis-allow sampling irq_work_exit() because perf event sampling |
68 | * itself can cause irq_work, which would lead to an infinite loop; |
69 | * |
70 | * 1) irq_work_exit happens |
71 | * 2) generates perf sample |
72 | * 3) generates irq_work |
73 | * 4) goto 1 |
74 | */ |
75 | TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0); |
76 | #endif |
77 | |
78 | /* |
79 | * The ifdef is required because that tracepoint macro hell emits tracepoint |
80 | * code in files which include this header even if the tracepoint is not |
81 | * enabled. Brilliant stuff that. |
82 | */ |
83 | #ifdef CONFIG_SMP |
84 | /* |
85 | * reschedule - called when entering/exiting a reschedule vector handler |
86 | */ |
87 | DEFINE_IRQ_VECTOR_EVENT(reschedule); |
88 | |
89 | /* |
90 | * call_function - called when entering/exiting a call function interrupt |
91 | * vector handler |
92 | */ |
93 | DEFINE_IRQ_VECTOR_EVENT(call_function); |
94 | |
95 | /* |
96 | * call_function_single - called when entering/exiting a call function |
97 | * single interrupt vector handler |
98 | */ |
99 | DEFINE_IRQ_VECTOR_EVENT(call_function_single); |
100 | #endif |
101 | |
102 | #ifdef CONFIG_X86_MCE_THRESHOLD |
103 | /* |
104 | * threshold_apic - called when entering/exiting a threshold apic interrupt |
105 | * vector handler |
106 | */ |
107 | DEFINE_IRQ_VECTOR_EVENT(threshold_apic); |
108 | #endif |
109 | |
110 | #ifdef CONFIG_X86_MCE_AMD |
111 | /* |
112 | * deferred_error_apic - called when entering/exiting a deferred apic interrupt |
113 | * vector handler |
114 | */ |
115 | DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic); |
116 | #endif |
117 | |
118 | #ifdef CONFIG_X86_THERMAL_VECTOR |
119 | /* |
120 | * thermal_apic - called when entering/exiting a thermal apic interrupt |
121 | * vector handler |
122 | */ |
123 | DEFINE_IRQ_VECTOR_EVENT(thermal_apic); |
124 | #endif |
125 | |
126 | TRACE_EVENT(vector_config, |
127 | |
128 | TP_PROTO(unsigned int irq, unsigned int vector, |
129 | unsigned int cpu, unsigned int apicdest), |
130 | |
131 | TP_ARGS(irq, vector, cpu, apicdest), |
132 | |
133 | TP_STRUCT__entry( |
134 | __field( unsigned int, irq ) |
135 | __field( unsigned int, vector ) |
136 | __field( unsigned int, cpu ) |
137 | __field( unsigned int, apicdest ) |
138 | ), |
139 | |
140 | TP_fast_assign( |
141 | __entry->irq = irq; |
142 | __entry->vector = vector; |
143 | __entry->cpu = cpu; |
144 | __entry->apicdest = apicdest; |
145 | ), |
146 | |
147 | TP_printk("irq=%u vector=%u cpu=%u apicdest=0x%08x" , |
148 | __entry->irq, __entry->vector, __entry->cpu, |
149 | __entry->apicdest) |
150 | ); |
151 | |
152 | DECLARE_EVENT_CLASS(vector_mod, |
153 | |
154 | TP_PROTO(unsigned int irq, unsigned int vector, |
155 | unsigned int cpu, unsigned int prev_vector, |
156 | unsigned int prev_cpu), |
157 | |
158 | TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), |
159 | |
160 | TP_STRUCT__entry( |
161 | __field( unsigned int, irq ) |
162 | __field( unsigned int, vector ) |
163 | __field( unsigned int, cpu ) |
164 | __field( unsigned int, prev_vector ) |
165 | __field( unsigned int, prev_cpu ) |
166 | ), |
167 | |
168 | TP_fast_assign( |
169 | __entry->irq = irq; |
170 | __entry->vector = vector; |
171 | __entry->cpu = cpu; |
172 | __entry->prev_vector = prev_vector; |
173 | __entry->prev_cpu = prev_cpu; |
174 | |
175 | ), |
176 | |
177 | TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u" , |
178 | __entry->irq, __entry->vector, __entry->cpu, |
179 | __entry->prev_vector, __entry->prev_cpu) |
180 | ); |
181 | |
182 | #define DEFINE_IRQ_VECTOR_MOD_EVENT(name) \ |
183 | DEFINE_EVENT_FN(vector_mod, name, \ |
184 | TP_PROTO(unsigned int irq, unsigned int vector, \ |
185 | unsigned int cpu, unsigned int prev_vector, \ |
186 | unsigned int prev_cpu), \ |
187 | TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \ |
188 | |
189 | DEFINE_IRQ_VECTOR_MOD_EVENT(vector_update); |
190 | DEFINE_IRQ_VECTOR_MOD_EVENT(vector_clear); |
191 | |
192 | DECLARE_EVENT_CLASS(vector_reserve, |
193 | |
194 | TP_PROTO(unsigned int irq, int ret), |
195 | |
196 | TP_ARGS(irq, ret), |
197 | |
198 | TP_STRUCT__entry( |
199 | __field( unsigned int, irq ) |
200 | __field( int, ret ) |
201 | ), |
202 | |
203 | TP_fast_assign( |
204 | __entry->irq = irq; |
205 | __entry->ret = ret; |
206 | ), |
207 | |
208 | TP_printk("irq=%u ret=%d" , __entry->irq, __entry->ret) |
209 | ); |
210 | |
211 | #define DEFINE_IRQ_VECTOR_RESERVE_EVENT(name) \ |
212 | DEFINE_EVENT_FN(vector_reserve, name, \ |
213 | TP_PROTO(unsigned int irq, int ret), \ |
214 | TP_ARGS(irq, ret), NULL, NULL); \ |
215 | |
216 | DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve_managed); |
217 | DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve); |
218 | |
219 | TRACE_EVENT(vector_alloc, |
220 | |
221 | TP_PROTO(unsigned int irq, unsigned int vector, bool reserved, |
222 | int ret), |
223 | |
224 | TP_ARGS(irq, vector, reserved, ret), |
225 | |
226 | TP_STRUCT__entry( |
227 | __field( unsigned int, irq ) |
228 | __field( unsigned int, vector ) |
229 | __field( bool, reserved ) |
230 | __field( int, ret ) |
231 | ), |
232 | |
233 | TP_fast_assign( |
234 | __entry->irq = irq; |
235 | __entry->vector = ret < 0 ? 0 : vector; |
236 | __entry->reserved = reserved; |
237 | __entry->ret = ret > 0 ? 0 : ret; |
238 | ), |
239 | |
240 | TP_printk("irq=%u vector=%u reserved=%d ret=%d" , |
241 | __entry->irq, __entry->vector, |
242 | __entry->reserved, __entry->ret) |
243 | ); |
244 | |
245 | TRACE_EVENT(vector_alloc_managed, |
246 | |
247 | TP_PROTO(unsigned int irq, unsigned int vector, |
248 | int ret), |
249 | |
250 | TP_ARGS(irq, vector, ret), |
251 | |
252 | TP_STRUCT__entry( |
253 | __field( unsigned int, irq ) |
254 | __field( unsigned int, vector ) |
255 | __field( int, ret ) |
256 | ), |
257 | |
258 | TP_fast_assign( |
259 | __entry->irq = irq; |
260 | __entry->vector = ret < 0 ? 0 : vector; |
261 | __entry->ret = ret > 0 ? 0 : ret; |
262 | ), |
263 | |
264 | TP_printk("irq=%u vector=%u ret=%d" , |
265 | __entry->irq, __entry->vector, __entry->ret) |
266 | ); |
267 | |
268 | DECLARE_EVENT_CLASS(vector_activate, |
269 | |
270 | TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve, |
271 | bool reserve), |
272 | |
273 | TP_ARGS(irq, is_managed, can_reserve, reserve), |
274 | |
275 | TP_STRUCT__entry( |
276 | __field( unsigned int, irq ) |
277 | __field( bool, is_managed ) |
278 | __field( bool, can_reserve ) |
279 | __field( bool, reserve ) |
280 | ), |
281 | |
282 | TP_fast_assign( |
283 | __entry->irq = irq; |
284 | __entry->is_managed = is_managed; |
285 | __entry->can_reserve = can_reserve; |
286 | __entry->reserve = reserve; |
287 | ), |
288 | |
289 | TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d" , |
290 | __entry->irq, __entry->is_managed, __entry->can_reserve, |
291 | __entry->reserve) |
292 | ); |
293 | |
294 | #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \ |
295 | DEFINE_EVENT_FN(vector_activate, name, \ |
296 | TP_PROTO(unsigned int irq, bool is_managed, \ |
297 | bool can_reserve, bool reserve), \ |
298 | TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \ |
299 | |
300 | DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate); |
301 | DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate); |
302 | |
303 | TRACE_EVENT(vector_teardown, |
304 | |
305 | TP_PROTO(unsigned int irq, bool is_managed, bool has_reserved), |
306 | |
307 | TP_ARGS(irq, is_managed, has_reserved), |
308 | |
309 | TP_STRUCT__entry( |
310 | __field( unsigned int, irq ) |
311 | __field( bool, is_managed ) |
312 | __field( bool, has_reserved ) |
313 | ), |
314 | |
315 | TP_fast_assign( |
316 | __entry->irq = irq; |
317 | __entry->is_managed = is_managed; |
318 | __entry->has_reserved = has_reserved; |
319 | ), |
320 | |
321 | TP_printk("irq=%u is_managed=%d has_reserved=%d" , |
322 | __entry->irq, __entry->is_managed, __entry->has_reserved) |
323 | ); |
324 | |
325 | TRACE_EVENT(vector_setup, |
326 | |
327 | TP_PROTO(unsigned int irq, bool is_legacy, int ret), |
328 | |
329 | TP_ARGS(irq, is_legacy, ret), |
330 | |
331 | TP_STRUCT__entry( |
332 | __field( unsigned int, irq ) |
333 | __field( bool, is_legacy ) |
334 | __field( int, ret ) |
335 | ), |
336 | |
337 | TP_fast_assign( |
338 | __entry->irq = irq; |
339 | __entry->is_legacy = is_legacy; |
340 | __entry->ret = ret; |
341 | ), |
342 | |
343 | TP_printk("irq=%u is_legacy=%d ret=%d" , |
344 | __entry->irq, __entry->is_legacy, __entry->ret) |
345 | ); |
346 | |
347 | TRACE_EVENT(vector_free_moved, |
348 | |
349 | TP_PROTO(unsigned int irq, unsigned int cpu, unsigned int vector, |
350 | bool is_managed), |
351 | |
352 | TP_ARGS(irq, cpu, vector, is_managed), |
353 | |
354 | TP_STRUCT__entry( |
355 | __field( unsigned int, irq ) |
356 | __field( unsigned int, cpu ) |
357 | __field( unsigned int, vector ) |
358 | __field( bool, is_managed ) |
359 | ), |
360 | |
361 | TP_fast_assign( |
362 | __entry->irq = irq; |
363 | __entry->cpu = cpu; |
364 | __entry->vector = vector; |
365 | __entry->is_managed = is_managed; |
366 | ), |
367 | |
368 | TP_printk("irq=%u cpu=%u vector=%u is_managed=%d" , |
369 | __entry->irq, __entry->cpu, __entry->vector, |
370 | __entry->is_managed) |
371 | ); |
372 | |
373 | |
374 | #endif /* CONFIG_X86_LOCAL_APIC */ |
375 | |
376 | #undef TRACE_INCLUDE_PATH |
377 | #undef TRACE_INCLUDE_FILE |
378 | #define TRACE_INCLUDE_PATH . |
379 | #define TRACE_INCLUDE_FILE irq_vectors |
380 | #endif /* _TRACE_IRQ_VECTORS_H */ |
381 | |
382 | /* This part must be outside protection */ |
383 | #include <trace/define_trace.h> |
384 | |