1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
4 | * |
5 | * This file contains the /proc/irq/ handling code. |
6 | */ |
7 | |
8 | #include <linux/irq.h> |
9 | #include <linux/gfp.h> |
10 | #include <linux/proc_fs.h> |
11 | #include <linux/seq_file.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/kernel_stat.h> |
14 | #include <linux/mutex.h> |
15 | |
16 | #include "internals.h" |
17 | |
18 | /* |
19 | * Access rules: |
20 | * |
21 | * procfs protects read/write of /proc/irq/N/ files against a |
22 | * concurrent free of the interrupt descriptor. remove_proc_entry() |
23 | * immediately prevents new read/writes to happen and waits for |
24 | * already running read/write functions to complete. |
25 | * |
26 | * We remove the proc entries first and then delete the interrupt |
27 | * descriptor from the radix tree and free it. So it is guaranteed |
28 | * that irq_to_desc(N) is valid as long as the read/writes are |
29 | * permitted by procfs. |
30 | * |
31 | * The read from /proc/interrupts is a different problem because there |
32 | * is no protection. So the lookup and the access to irqdesc |
33 | * information must be protected by sparse_irq_lock. |
34 | */ |
35 | static struct proc_dir_entry *root_irq_dir; |
36 | |
37 | #ifdef CONFIG_SMP |
38 | |
39 | enum { |
40 | AFFINITY, |
41 | AFFINITY_LIST, |
42 | EFFECTIVE, |
43 | EFFECTIVE_LIST, |
44 | }; |
45 | |
46 | static int show_irq_affinity(int type, struct seq_file *m) |
47 | { |
48 | struct irq_desc *desc = irq_to_desc(irq: (long)m->private); |
49 | const struct cpumask *mask; |
50 | |
51 | switch (type) { |
52 | case AFFINITY: |
53 | case AFFINITY_LIST: |
54 | mask = desc->irq_common_data.affinity; |
55 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
56 | if (irqd_is_setaffinity_pending(d: &desc->irq_data)) |
57 | mask = desc->pending_mask; |
58 | #endif |
59 | break; |
60 | case EFFECTIVE: |
61 | case EFFECTIVE_LIST: |
62 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
63 | mask = irq_data_get_effective_affinity_mask(d: &desc->irq_data); |
64 | break; |
65 | #endif |
66 | default: |
67 | return -EINVAL; |
68 | } |
69 | |
70 | switch (type) { |
71 | case AFFINITY_LIST: |
72 | case EFFECTIVE_LIST: |
73 | seq_printf(m, fmt: "%*pbl\n" , cpumask_pr_args(mask)); |
74 | break; |
75 | case AFFINITY: |
76 | case EFFECTIVE: |
77 | seq_printf(m, fmt: "%*pb\n" , cpumask_pr_args(mask)); |
78 | break; |
79 | } |
80 | return 0; |
81 | } |
82 | |
83 | static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) |
84 | { |
85 | struct irq_desc *desc = irq_to_desc(irq: (long)m->private); |
86 | unsigned long flags; |
87 | cpumask_var_t mask; |
88 | |
89 | if (!zalloc_cpumask_var(mask: &mask, GFP_KERNEL)) |
90 | return -ENOMEM; |
91 | |
92 | raw_spin_lock_irqsave(&desc->lock, flags); |
93 | if (desc->affinity_hint) |
94 | cpumask_copy(dstp: mask, srcp: desc->affinity_hint); |
95 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
96 | |
97 | seq_printf(m, fmt: "%*pb\n" , cpumask_pr_args(mask)); |
98 | free_cpumask_var(mask); |
99 | |
100 | return 0; |
101 | } |
102 | |
103 | int no_irq_affinity; |
104 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
105 | { |
106 | return show_irq_affinity(type: AFFINITY, m); |
107 | } |
108 | |
109 | static int irq_affinity_list_proc_show(struct seq_file *m, void *v) |
110 | { |
111 | return show_irq_affinity(type: AFFINITY_LIST, m); |
112 | } |
113 | |
114 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
115 | static inline int irq_select_affinity_usr(unsigned int irq) |
116 | { |
117 | /* |
118 | * If the interrupt is started up already then this fails. The |
119 | * interrupt is assigned to an online CPU already. There is no |
120 | * point to move it around randomly. Tell user space that the |
121 | * selected mask is bogus. |
122 | * |
123 | * If not then any change to the affinity is pointless because the |
124 | * startup code invokes irq_setup_affinity() which will select |
125 | * a online CPU anyway. |
126 | */ |
127 | return -EINVAL; |
128 | } |
129 | #else |
130 | /* ALPHA magic affinity auto selector. Keep it for historical reasons. */ |
131 | static inline int irq_select_affinity_usr(unsigned int irq) |
132 | { |
133 | return irq_select_affinity(irq); |
134 | } |
135 | #endif |
136 | |
137 | static ssize_t write_irq_affinity(int type, struct file *file, |
138 | const char __user *buffer, size_t count, loff_t *pos) |
139 | { |
140 | unsigned int irq = (int)(long)pde_data(inode: file_inode(f: file)); |
141 | cpumask_var_t new_value; |
142 | int err; |
143 | |
144 | if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) |
145 | return -EIO; |
146 | |
147 | if (!zalloc_cpumask_var(mask: &new_value, GFP_KERNEL)) |
148 | return -ENOMEM; |
149 | |
150 | if (type) |
151 | err = cpumask_parselist_user(buf: buffer, len: count, dstp: new_value); |
152 | else |
153 | err = cpumask_parse_user(buf: buffer, len: count, dstp: new_value); |
154 | if (err) |
155 | goto free_cpumask; |
156 | |
157 | /* |
158 | * Do not allow disabling IRQs completely - it's a too easy |
159 | * way to make the system unusable accidentally :-) At least |
160 | * one online CPU still has to be targeted. |
161 | */ |
162 | if (!cpumask_intersects(src1p: new_value, cpu_online_mask)) { |
163 | /* |
164 | * Special case for empty set - allow the architecture code |
165 | * to set default SMP affinity. |
166 | */ |
167 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; |
168 | } else { |
169 | err = irq_set_affinity(irq, cpumask: new_value); |
170 | if (!err) |
171 | err = count; |
172 | } |
173 | |
174 | free_cpumask: |
175 | free_cpumask_var(mask: new_value); |
176 | return err; |
177 | } |
178 | |
179 | static ssize_t irq_affinity_proc_write(struct file *file, |
180 | const char __user *buffer, size_t count, loff_t *pos) |
181 | { |
182 | return write_irq_affinity(type: 0, file, buffer, count, pos); |
183 | } |
184 | |
185 | static ssize_t irq_affinity_list_proc_write(struct file *file, |
186 | const char __user *buffer, size_t count, loff_t *pos) |
187 | { |
188 | return write_irq_affinity(type: 1, file, buffer, count, pos); |
189 | } |
190 | |
191 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
192 | { |
193 | return single_open(file, irq_affinity_proc_show, pde_data(inode)); |
194 | } |
195 | |
196 | static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) |
197 | { |
198 | return single_open(file, irq_affinity_list_proc_show, pde_data(inode)); |
199 | } |
200 | |
201 | static const struct proc_ops irq_affinity_proc_ops = { |
202 | .proc_open = irq_affinity_proc_open, |
203 | .proc_read = seq_read, |
204 | .proc_lseek = seq_lseek, |
205 | .proc_release = single_release, |
206 | .proc_write = irq_affinity_proc_write, |
207 | }; |
208 | |
209 | static const struct proc_ops irq_affinity_list_proc_ops = { |
210 | .proc_open = irq_affinity_list_proc_open, |
211 | .proc_read = seq_read, |
212 | .proc_lseek = seq_lseek, |
213 | .proc_release = single_release, |
214 | .proc_write = irq_affinity_list_proc_write, |
215 | }; |
216 | |
217 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
218 | static int irq_effective_aff_proc_show(struct seq_file *m, void *v) |
219 | { |
220 | return show_irq_affinity(type: EFFECTIVE, m); |
221 | } |
222 | |
223 | static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v) |
224 | { |
225 | return show_irq_affinity(type: EFFECTIVE_LIST, m); |
226 | } |
227 | #endif |
228 | |
229 | static int default_affinity_show(struct seq_file *m, void *v) |
230 | { |
231 | seq_printf(m, fmt: "%*pb\n" , cpumask_pr_args(irq_default_affinity)); |
232 | return 0; |
233 | } |
234 | |
235 | static ssize_t default_affinity_write(struct file *file, |
236 | const char __user *buffer, size_t count, loff_t *ppos) |
237 | { |
238 | cpumask_var_t new_value; |
239 | int err; |
240 | |
241 | if (!zalloc_cpumask_var(mask: &new_value, GFP_KERNEL)) |
242 | return -ENOMEM; |
243 | |
244 | err = cpumask_parse_user(buf: buffer, len: count, dstp: new_value); |
245 | if (err) |
246 | goto out; |
247 | |
248 | /* |
249 | * Do not allow disabling IRQs completely - it's a too easy |
250 | * way to make the system unusable accidentally :-) At least |
251 | * one online CPU still has to be targeted. |
252 | */ |
253 | if (!cpumask_intersects(src1p: new_value, cpu_online_mask)) { |
254 | err = -EINVAL; |
255 | goto out; |
256 | } |
257 | |
258 | cpumask_copy(dstp: irq_default_affinity, srcp: new_value); |
259 | err = count; |
260 | |
261 | out: |
262 | free_cpumask_var(mask: new_value); |
263 | return err; |
264 | } |
265 | |
266 | static int default_affinity_open(struct inode *inode, struct file *file) |
267 | { |
268 | return single_open(file, default_affinity_show, pde_data(inode)); |
269 | } |
270 | |
271 | static const struct proc_ops default_affinity_proc_ops = { |
272 | .proc_open = default_affinity_open, |
273 | .proc_read = seq_read, |
274 | .proc_lseek = seq_lseek, |
275 | .proc_release = single_release, |
276 | .proc_write = default_affinity_write, |
277 | }; |
278 | |
279 | static int irq_node_proc_show(struct seq_file *m, void *v) |
280 | { |
281 | struct irq_desc *desc = irq_to_desc(irq: (long) m->private); |
282 | |
283 | seq_printf(m, fmt: "%d\n" , irq_desc_get_node(desc)); |
284 | return 0; |
285 | } |
286 | #endif |
287 | |
288 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
289 | { |
290 | struct irq_desc *desc = irq_to_desc(irq: (long) m->private); |
291 | |
292 | seq_printf(m, fmt: "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n" , |
293 | desc->irq_count, desc->irqs_unhandled, |
294 | jiffies_to_msecs(j: desc->last_unhandled)); |
295 | return 0; |
296 | } |
297 | |
298 | #define MAX_NAMELEN 128 |
299 | |
300 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
301 | { |
302 | struct irq_desc *desc = irq_to_desc(irq); |
303 | struct irqaction *action; |
304 | unsigned long flags; |
305 | int ret = 1; |
306 | |
307 | raw_spin_lock_irqsave(&desc->lock, flags); |
308 | for_each_action_of_desc(desc, action) { |
309 | if ((action != new_action) && action->name && |
310 | !strcmp(new_action->name, action->name)) { |
311 | ret = 0; |
312 | break; |
313 | } |
314 | } |
315 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
316 | return ret; |
317 | } |
318 | |
319 | void register_handler_proc(unsigned int irq, struct irqaction *action) |
320 | { |
321 | char name [MAX_NAMELEN]; |
322 | struct irq_desc *desc = irq_to_desc(irq); |
323 | |
324 | if (!desc->dir || action->dir || !action->name || |
325 | !name_unique(irq, new_action: action)) |
326 | return; |
327 | |
328 | snprintf(buf: name, MAX_NAMELEN, fmt: "%s" , action->name); |
329 | |
330 | /* create /proc/irq/1234/handler/ */ |
331 | action->dir = proc_mkdir(name, desc->dir); |
332 | } |
333 | |
334 | #undef MAX_NAMELEN |
335 | |
336 | #define MAX_NAMELEN 10 |
337 | |
338 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
339 | { |
340 | static DEFINE_MUTEX(register_lock); |
341 | void __maybe_unused *irqp = (void *)(unsigned long) irq; |
342 | char name [MAX_NAMELEN]; |
343 | |
344 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) |
345 | return; |
346 | |
347 | /* |
348 | * irq directories are registered only when a handler is |
349 | * added, not when the descriptor is created, so multiple |
350 | * tasks might try to register at the same time. |
351 | */ |
352 | mutex_lock(®ister_lock); |
353 | |
354 | if (desc->dir) |
355 | goto out_unlock; |
356 | |
357 | sprintf(buf: name, fmt: "%d" , irq); |
358 | |
359 | /* create /proc/irq/1234 */ |
360 | desc->dir = proc_mkdir(name, root_irq_dir); |
361 | if (!desc->dir) |
362 | goto out_unlock; |
363 | |
364 | #ifdef CONFIG_SMP |
365 | /* create /proc/irq/<irq>/smp_affinity */ |
366 | proc_create_data("smp_affinity" , 0644, desc->dir, |
367 | &irq_affinity_proc_ops, irqp); |
368 | |
369 | /* create /proc/irq/<irq>/affinity_hint */ |
370 | proc_create_single_data(name: "affinity_hint" , mode: 0444, parent: desc->dir, |
371 | show: irq_affinity_hint_proc_show, data: irqp); |
372 | |
373 | /* create /proc/irq/<irq>/smp_affinity_list */ |
374 | proc_create_data("smp_affinity_list" , 0644, desc->dir, |
375 | &irq_affinity_list_proc_ops, irqp); |
376 | |
377 | proc_create_single_data(name: "node" , mode: 0444, parent: desc->dir, show: irq_node_proc_show, |
378 | data: irqp); |
379 | # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
380 | proc_create_single_data(name: "effective_affinity" , mode: 0444, parent: desc->dir, |
381 | show: irq_effective_aff_proc_show, data: irqp); |
382 | proc_create_single_data(name: "effective_affinity_list" , mode: 0444, parent: desc->dir, |
383 | show: irq_effective_aff_list_proc_show, data: irqp); |
384 | # endif |
385 | #endif |
386 | proc_create_single_data(name: "spurious" , mode: 0444, parent: desc->dir, |
387 | show: irq_spurious_proc_show, data: (void *)(long)irq); |
388 | |
389 | out_unlock: |
390 | mutex_unlock(lock: ®ister_lock); |
391 | } |
392 | |
393 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) |
394 | { |
395 | char name [MAX_NAMELEN]; |
396 | |
397 | if (!root_irq_dir || !desc->dir) |
398 | return; |
399 | #ifdef CONFIG_SMP |
400 | remove_proc_entry("smp_affinity" , desc->dir); |
401 | remove_proc_entry("affinity_hint" , desc->dir); |
402 | remove_proc_entry("smp_affinity_list" , desc->dir); |
403 | remove_proc_entry("node" , desc->dir); |
404 | # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
405 | remove_proc_entry("effective_affinity" , desc->dir); |
406 | remove_proc_entry("effective_affinity_list" , desc->dir); |
407 | # endif |
408 | #endif |
409 | remove_proc_entry("spurious" , desc->dir); |
410 | |
411 | sprintf(buf: name, fmt: "%u" , irq); |
412 | remove_proc_entry(name, root_irq_dir); |
413 | } |
414 | |
415 | #undef MAX_NAMELEN |
416 | |
417 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) |
418 | { |
419 | proc_remove(action->dir); |
420 | } |
421 | |
422 | static void register_default_affinity_proc(void) |
423 | { |
424 | #ifdef CONFIG_SMP |
425 | proc_create(name: "irq/default_smp_affinity" , mode: 0644, NULL, |
426 | proc_ops: &default_affinity_proc_ops); |
427 | #endif |
428 | } |
429 | |
430 | void init_irq_proc(void) |
431 | { |
432 | unsigned int irq; |
433 | struct irq_desc *desc; |
434 | |
435 | /* create /proc/irq */ |
436 | root_irq_dir = proc_mkdir("irq" , NULL); |
437 | if (!root_irq_dir) |
438 | return; |
439 | |
440 | register_default_affinity_proc(); |
441 | |
442 | /* |
443 | * Create entries for all existing IRQs. |
444 | */ |
445 | for_each_irq_desc(irq, desc) |
446 | register_irq_proc(irq, desc); |
447 | } |
448 | |
449 | #ifdef CONFIG_GENERIC_IRQ_SHOW |
450 | |
451 | int __weak arch_show_interrupts(struct seq_file *p, int prec) |
452 | { |
453 | return 0; |
454 | } |
455 | |
456 | #ifndef ACTUAL_NR_IRQS |
457 | # define ACTUAL_NR_IRQS nr_irqs |
458 | #endif |
459 | |
460 | int show_interrupts(struct seq_file *p, void *v) |
461 | { |
462 | static int prec; |
463 | |
464 | unsigned long flags, any_count = 0; |
465 | int i = *(loff_t *) v, j; |
466 | struct irqaction *action; |
467 | struct irq_desc *desc; |
468 | |
469 | if (i > ACTUAL_NR_IRQS) |
470 | return 0; |
471 | |
472 | if (i == ACTUAL_NR_IRQS) |
473 | return arch_show_interrupts(p, prec); |
474 | |
475 | /* print header and calculate the width of the first column */ |
476 | if (i == 0) { |
477 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) |
478 | j *= 10; |
479 | |
480 | seq_printf(m: p, fmt: "%*s" , prec + 8, "" ); |
481 | for_each_online_cpu(j) |
482 | seq_printf(m: p, fmt: "CPU%-8d" , j); |
483 | seq_putc(m: p, c: '\n'); |
484 | } |
485 | |
486 | rcu_read_lock(); |
487 | desc = irq_to_desc(irq: i); |
488 | if (!desc || irq_settings_is_hidden(desc)) |
489 | goto outsparse; |
490 | |
491 | if (desc->kstat_irqs) { |
492 | for_each_online_cpu(j) |
493 | any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); |
494 | } |
495 | |
496 | if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) |
497 | goto outsparse; |
498 | |
499 | seq_printf(m: p, fmt: "%*d: " , prec, i); |
500 | for_each_online_cpu(j) |
501 | seq_printf(m: p, fmt: "%10u " , desc->kstat_irqs ? |
502 | *per_cpu_ptr(desc->kstat_irqs, j) : 0); |
503 | |
504 | raw_spin_lock_irqsave(&desc->lock, flags); |
505 | if (desc->irq_data.chip) { |
506 | if (desc->irq_data.chip->irq_print_chip) |
507 | desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); |
508 | else if (desc->irq_data.chip->name) |
509 | seq_printf(m: p, fmt: " %8s" , desc->irq_data.chip->name); |
510 | else |
511 | seq_printf(m: p, fmt: " %8s" , "-" ); |
512 | } else { |
513 | seq_printf(m: p, fmt: " %8s" , "None" ); |
514 | } |
515 | if (desc->irq_data.domain) |
516 | seq_printf(m: p, fmt: " %*lu" , prec, desc->irq_data.hwirq); |
517 | else |
518 | seq_printf(m: p, fmt: " %*s" , prec, "" ); |
519 | #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL |
520 | seq_printf(p, " %-8s" , irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge" ); |
521 | #endif |
522 | if (desc->name) |
523 | seq_printf(m: p, fmt: "-%-8s" , desc->name); |
524 | |
525 | action = desc->action; |
526 | if (action) { |
527 | seq_printf(m: p, fmt: " %s" , action->name); |
528 | while ((action = action->next) != NULL) |
529 | seq_printf(m: p, fmt: ", %s" , action->name); |
530 | } |
531 | |
532 | seq_putc(m: p, c: '\n'); |
533 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
534 | outsparse: |
535 | rcu_read_unlock(); |
536 | return 0; |
537 | } |
538 | #endif |
539 | |