• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/kernel/irq/proc.c
4  *
5  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6  *
7  * This file contains the /proc/irq/ handling code.
8  */
9 
10 #include <linux/irq.h>
11 #include <linux/gfp.h>
12 #include <linux/proc_fs.h>
13 #include <linux/seq_file.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mutex.h>
17 
18 #include "internals.h"
19 
20 /*
21  * Access rules:
22  *
23  * procfs protects read/write of /proc/irq/N/ files against a
24  * concurrent free of the interrupt descriptor. remove_proc_entry()
25  * immediately prevents new read/writes to happen and waits for
26  * already running read/write functions to complete.
27  *
28  * We remove the proc entries first and then delete the interrupt
29  * descriptor from the radix tree and free it. So it is guaranteed
30  * that irq_to_desc(N) is valid as long as the read/writes are
31  * permitted by procfs.
32  *
33  * The read from /proc/interrupts is a different problem because there
34  * is no protection. So the lookup and the access to irqdesc
35  * information must be protected by sparse_irq_lock.
36  */
37 static struct proc_dir_entry *root_irq_dir;
38 
39 #ifdef CONFIG_SMP
40 
41 enum {
42 	AFFINITY,
43 	AFFINITY_LIST,
44 	EFFECTIVE,
45 	EFFECTIVE_LIST,
46 };
47 
show_irq_affinity(int type,struct seq_file * m)48 static int show_irq_affinity(int type, struct seq_file *m)
49 {
50 	struct irq_desc *desc = irq_to_desc((long)m->private);
51 	const struct cpumask *mask;
52 
53 	switch (type) {
54 	case AFFINITY:
55 	case AFFINITY_LIST:
56 		mask = desc->irq_common_data.affinity;
57 #ifdef CONFIG_GENERIC_PENDING_IRQ
58 		if (irqd_is_setaffinity_pending(&desc->irq_data))
59 			mask = desc->pending_mask;
60 #endif
61 		break;
62 	case EFFECTIVE:
63 	case EFFECTIVE_LIST:
64 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
65 		mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
66 		break;
67 #endif
68 	default:
69 		return -EINVAL;
70 	}
71 
72 	switch (type) {
73 	case AFFINITY_LIST:
74 	case EFFECTIVE_LIST:
75 		seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
76 		break;
77 	case AFFINITY:
78 	case EFFECTIVE:
79 		seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
80 		break;
81 	}
82 	return 0;
83 }
84 
irq_affinity_hint_proc_show(struct seq_file * m,void * v)85 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
86 {
87 	struct irq_desc *desc = irq_to_desc((long)m->private);
88 	unsigned long flags;
89 	cpumask_var_t mask;
90 
91 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
92 		return -ENOMEM;
93 
94 	raw_spin_lock_irqsave(&desc->lock, flags);
95 	if (desc->affinity_hint)
96 		cpumask_copy(mask, desc->affinity_hint);
97 	raw_spin_unlock_irqrestore(&desc->lock, flags);
98 
99 	seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
100 	free_cpumask_var(mask);
101 
102 	return 0;
103 }
104 
105 #ifndef is_affinity_mask_valid
106 #define is_affinity_mask_valid(val) 1
107 #endif
108 
109 int no_irq_affinity;
irq_affinity_proc_show(struct seq_file * m,void * v)110 static int irq_affinity_proc_show(struct seq_file *m, void *v)
111 {
112 	return show_irq_affinity(AFFINITY, m);
113 }
114 
irq_affinity_list_proc_show(struct seq_file * m,void * v)115 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
116 {
117 	return show_irq_affinity(AFFINITY_LIST, m);
118 }
119 
120 #ifndef CONFIG_AUTO_IRQ_AFFINITY
irq_select_affinity_usr(unsigned int irq)121 static inline int irq_select_affinity_usr(unsigned int irq)
122 {
123 	/*
124 	 * If the interrupt is started up already then this fails. The
125 	 * interrupt is assigned to an online CPU already. There is no
126 	 * point to move it around randomly. Tell user space that the
127 	 * selected mask is bogus.
128 	 *
129 	 * If not then any change to the affinity is pointless because the
130 	 * startup code invokes irq_setup_affinity() which will select
131 	 * a online CPU anyway.
132 	 */
133 	return -EINVAL;
134 }
135 #else
136 /* ALPHA magic affinity auto selector. Keep it for historical reasons. */
irq_select_affinity_usr(unsigned int irq)137 static inline int irq_select_affinity_usr(unsigned int irq)
138 {
139 	return irq_select_affinity(irq);
140 }
141 #endif
142 
write_irq_affinity(int type,struct file * file,const char __user * buffer,size_t count,loff_t * pos)143 static ssize_t write_irq_affinity(int type, struct file *file,
144 		const char __user *buffer, size_t count, loff_t *pos)
145 {
146 	unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
147 	cpumask_var_t new_value;
148 	int err;
149 
150 	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
151 		return -EIO;
152 
153 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
154 		return -ENOMEM;
155 
156 	if (type)
157 		err = cpumask_parselist_user(buffer, count, new_value);
158 	else
159 		err = cpumask_parse_user(buffer, count, new_value);
160 	if (err)
161 		goto free_cpumask;
162 
163 	if (!is_affinity_mask_valid(new_value)) {
164 		err = -EINVAL;
165 		goto free_cpumask;
166 	}
167 
168 	/*
169 	 * Do not allow disabling IRQs completely - it's a too easy
170 	 * way to make the system unusable accidentally :-) At least
171 	 * one online CPU still has to be targeted.
172 	 */
173 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
174 		/*
175 		 * Special case for empty set - allow the architecture code
176 		 * to set default SMP affinity.
177 		 */
178 		err = irq_select_affinity_usr(irq) ? -EINVAL : count;
179 	} else {
180 		irq_set_affinity(irq, new_value);
181 		err = count;
182 	}
183 
184 free_cpumask:
185 	free_cpumask_var(new_value);
186 	return err;
187 }
188 
irq_affinity_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)189 static ssize_t irq_affinity_proc_write(struct file *file,
190 		const char __user *buffer, size_t count, loff_t *pos)
191 {
192 	return write_irq_affinity(0, file, buffer, count, pos);
193 }
194 
irq_affinity_list_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)195 static ssize_t irq_affinity_list_proc_write(struct file *file,
196 		const char __user *buffer, size_t count, loff_t *pos)
197 {
198 	return write_irq_affinity(1, file, buffer, count, pos);
199 }
200 
irq_affinity_proc_open(struct inode * inode,struct file * file)201 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
202 {
203 	return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
204 }
205 
irq_affinity_list_proc_open(struct inode * inode,struct file * file)206 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
207 {
208 	return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
209 }
210 
irq_affinity_hint_proc_open(struct inode * inode,struct file * file)211 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
212 {
213 	return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
214 }
215 
216 static const struct file_operations irq_affinity_proc_fops = {
217 	.open		= irq_affinity_proc_open,
218 	.read		= seq_read,
219 	.llseek		= seq_lseek,
220 	.release	= single_release,
221 	.write		= irq_affinity_proc_write,
222 };
223 
224 static const struct file_operations irq_affinity_hint_proc_fops = {
225 	.open		= irq_affinity_hint_proc_open,
226 	.read		= seq_read,
227 	.llseek		= seq_lseek,
228 	.release	= single_release,
229 };
230 
231 static const struct file_operations irq_affinity_list_proc_fops = {
232 	.open		= irq_affinity_list_proc_open,
233 	.read		= seq_read,
234 	.llseek		= seq_lseek,
235 	.release	= single_release,
236 	.write		= irq_affinity_list_proc_write,
237 };
238 
239 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_effective_aff_proc_show(struct seq_file * m,void * v)240 static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
241 {
242 	return show_irq_affinity(EFFECTIVE, m);
243 }
244 
irq_effective_aff_list_proc_show(struct seq_file * m,void * v)245 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
246 {
247 	return show_irq_affinity(EFFECTIVE_LIST, m);
248 }
249 
irq_effective_aff_proc_open(struct inode * inode,struct file * file)250 static int irq_effective_aff_proc_open(struct inode *inode, struct file *file)
251 {
252 	return single_open(file, irq_effective_aff_proc_show, PDE_DATA(inode));
253 }
254 
irq_effective_aff_list_proc_open(struct inode * inode,struct file * file)255 static int irq_effective_aff_list_proc_open(struct inode *inode,
256 					    struct file *file)
257 {
258 	return single_open(file, irq_effective_aff_list_proc_show,
259 			   PDE_DATA(inode));
260 }
261 
262 static const struct file_operations irq_effective_aff_proc_fops = {
263 	.open		= irq_effective_aff_proc_open,
264 	.read		= seq_read,
265 	.llseek		= seq_lseek,
266 	.release	= single_release,
267 };
268 
269 static const struct file_operations irq_effective_aff_list_proc_fops = {
270 	.open		= irq_effective_aff_list_proc_open,
271 	.read		= seq_read,
272 	.llseek		= seq_lseek,
273 	.release	= single_release,
274 };
275 #endif
276 
default_affinity_show(struct seq_file * m,void * v)277 static int default_affinity_show(struct seq_file *m, void *v)
278 {
279 	seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
280 	return 0;
281 }
282 
default_affinity_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)283 static ssize_t default_affinity_write(struct file *file,
284 		const char __user *buffer, size_t count, loff_t *ppos)
285 {
286 	cpumask_var_t new_value;
287 	int err;
288 
289 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
290 		return -ENOMEM;
291 
292 	err = cpumask_parse_user(buffer, count, new_value);
293 	if (err)
294 		goto out;
295 
296 	if (!is_affinity_mask_valid(new_value)) {
297 		err = -EINVAL;
298 		goto out;
299 	}
300 
301 	/*
302 	 * Do not allow disabling IRQs completely - it's a too easy
303 	 * way to make the system unusable accidentally :-) At least
304 	 * one online CPU still has to be targeted.
305 	 */
306 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
307 		err = -EINVAL;
308 		goto out;
309 	}
310 
311 	cpumask_copy(irq_default_affinity, new_value);
312 	err = count;
313 
314 out:
315 	free_cpumask_var(new_value);
316 	return err;
317 }
318 
default_affinity_open(struct inode * inode,struct file * file)319 static int default_affinity_open(struct inode *inode, struct file *file)
320 {
321 	return single_open(file, default_affinity_show, PDE_DATA(inode));
322 }
323 
324 static const struct file_operations default_affinity_proc_fops = {
325 	.open		= default_affinity_open,
326 	.read		= seq_read,
327 	.llseek		= seq_lseek,
328 	.release	= single_release,
329 	.write		= default_affinity_write,
330 };
331 
irq_node_proc_show(struct seq_file * m,void * v)332 static int irq_node_proc_show(struct seq_file *m, void *v)
333 {
334 	struct irq_desc *desc = irq_to_desc((long) m->private);
335 
336 	seq_printf(m, "%d\n", irq_desc_get_node(desc));
337 	return 0;
338 }
339 
irq_node_proc_open(struct inode * inode,struct file * file)340 static int irq_node_proc_open(struct inode *inode, struct file *file)
341 {
342 	return single_open(file, irq_node_proc_show, PDE_DATA(inode));
343 }
344 
345 static const struct file_operations irq_node_proc_fops = {
346 	.open		= irq_node_proc_open,
347 	.read		= seq_read,
348 	.llseek		= seq_lseek,
349 	.release	= single_release,
350 };
351 #endif
352 
irq_spurious_proc_show(struct seq_file * m,void * v)353 static int irq_spurious_proc_show(struct seq_file *m, void *v)
354 {
355 	struct irq_desc *desc = irq_to_desc((long) m->private);
356 
357 	seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
358 		   desc->irq_count, desc->irqs_unhandled,
359 		   jiffies_to_msecs(desc->last_unhandled));
360 	return 0;
361 }
362 
irq_spurious_proc_open(struct inode * inode,struct file * file)363 static int irq_spurious_proc_open(struct inode *inode, struct file *file)
364 {
365 	return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
366 }
367 
368 static const struct file_operations irq_spurious_proc_fops = {
369 	.open		= irq_spurious_proc_open,
370 	.read		= seq_read,
371 	.llseek		= seq_lseek,
372 	.release	= single_release,
373 };
374 
375 #define MAX_NAMELEN 128
376 
name_unique(unsigned int irq,struct irqaction * new_action)377 static int name_unique(unsigned int irq, struct irqaction *new_action)
378 {
379 	struct irq_desc *desc = irq_to_desc(irq);
380 	struct irqaction *action;
381 	unsigned long flags;
382 	int ret = 1;
383 
384 	raw_spin_lock_irqsave(&desc->lock, flags);
385 	for_each_action_of_desc(desc, action) {
386 		if ((action != new_action) && action->name &&
387 				!strcmp(new_action->name, action->name)) {
388 			ret = 0;
389 			break;
390 		}
391 	}
392 	raw_spin_unlock_irqrestore(&desc->lock, flags);
393 	return ret;
394 }
395 
register_handler_proc(unsigned int irq,struct irqaction * action)396 void register_handler_proc(unsigned int irq, struct irqaction *action)
397 {
398 	char name [MAX_NAMELEN];
399 	struct irq_desc *desc = irq_to_desc(irq);
400 
401 	if (!desc->dir || action->dir || !action->name ||
402 					!name_unique(irq, action))
403 		return;
404 
405 	snprintf(name, MAX_NAMELEN, "%s", action->name);
406 
407 	/* create /proc/irq/1234/handler/ */
408 	action->dir = proc_mkdir(name, desc->dir);
409 }
410 
411 #undef MAX_NAMELEN
412 
413 #define MAX_NAMELEN 10
414 
register_irq_proc(unsigned int irq,struct irq_desc * desc)415 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
416 {
417 	static DEFINE_MUTEX(register_lock);
418 	void __maybe_unused *irqp = (void *)(unsigned long) irq;
419 	char name [MAX_NAMELEN];
420 
421 	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
422 		return;
423 
424 	/*
425 	 * irq directories are registered only when a handler is
426 	 * added, not when the descriptor is created, so multiple
427 	 * tasks might try to register at the same time.
428 	 */
429 	mutex_lock(&register_lock);
430 
431 	if (desc->dir)
432 		goto out_unlock;
433 
434 	sprintf(name, "%d", irq);
435 
436 	/* create /proc/irq/1234 */
437 	desc->dir = proc_mkdir(name, root_irq_dir);
438 	if (!desc->dir)
439 		goto out_unlock;
440 
441 #ifdef CONFIG_SMP
442 	/* create /proc/irq/<irq>/smp_affinity */
443 	proc_create_data("smp_affinity", 0644, desc->dir,
444 			 &irq_affinity_proc_fops, irqp);
445 
446 	/* create /proc/irq/<irq>/affinity_hint */
447 	proc_create_data("affinity_hint", 0444, desc->dir,
448 			 &irq_affinity_hint_proc_fops, irqp);
449 
450 	/* create /proc/irq/<irq>/smp_affinity_list */
451 	proc_create_data("smp_affinity_list", 0644, desc->dir,
452 			 &irq_affinity_list_proc_fops, irqp);
453 
454 	proc_create_data("node", 0444, desc->dir,
455 			 &irq_node_proc_fops, irqp);
456 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
457 	proc_create_data("effective_affinity", 0444, desc->dir,
458 			 &irq_effective_aff_proc_fops, irqp);
459 	proc_create_data("effective_affinity_list", 0444, desc->dir,
460 			 &irq_effective_aff_list_proc_fops, irqp);
461 # endif
462 #endif
463 	proc_create_data("spurious", 0444, desc->dir,
464 			 &irq_spurious_proc_fops, (void *)(long)irq);
465 
466 out_unlock:
467 	mutex_unlock(&register_lock);
468 }
469 
unregister_irq_proc(unsigned int irq,struct irq_desc * desc)470 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
471 {
472 	char name [MAX_NAMELEN];
473 
474 	if (!root_irq_dir || !desc->dir)
475 		return;
476 #ifdef CONFIG_SMP
477 	remove_proc_entry("smp_affinity", desc->dir);
478 	remove_proc_entry("affinity_hint", desc->dir);
479 	remove_proc_entry("smp_affinity_list", desc->dir);
480 	remove_proc_entry("node", desc->dir);
481 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
482 	remove_proc_entry("effective_affinity", desc->dir);
483 	remove_proc_entry("effective_affinity_list", desc->dir);
484 # endif
485 #endif
486 	remove_proc_entry("spurious", desc->dir);
487 
488 	sprintf(name, "%u", irq);
489 	remove_proc_entry(name, root_irq_dir);
490 }
491 
492 #undef MAX_NAMELEN
493 
unregister_handler_proc(unsigned int irq,struct irqaction * action)494 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
495 {
496 	proc_remove(action->dir);
497 }
498 
register_default_affinity_proc(void)499 static void register_default_affinity_proc(void)
500 {
501 #ifdef CONFIG_SMP
502 	proc_create("irq/default_smp_affinity", 0644, NULL,
503 		    &default_affinity_proc_fops);
504 #endif
505 }
506 
init_irq_proc(void)507 void init_irq_proc(void)
508 {
509 	unsigned int irq;
510 	struct irq_desc *desc;
511 
512 	/* create /proc/irq */
513 	root_irq_dir = proc_mkdir("irq", NULL);
514 	if (!root_irq_dir)
515 		return;
516 
517 	register_default_affinity_proc();
518 
519 	/*
520 	 * Create entries for all existing IRQs.
521 	 */
522 	for_each_irq_desc(irq, desc)
523 		register_irq_proc(irq, desc);
524 }
525 
526 #ifdef CONFIG_GENERIC_IRQ_SHOW
527 
arch_show_interrupts(struct seq_file * p,int prec)528 int __weak arch_show_interrupts(struct seq_file *p, int prec)
529 {
530 	return 0;
531 }
532 
533 #ifndef ACTUAL_NR_IRQS
534 # define ACTUAL_NR_IRQS nr_irqs
535 #endif
536 
show_interrupts(struct seq_file * p,void * v)537 int show_interrupts(struct seq_file *p, void *v)
538 {
539 	static int prec;
540 
541 	unsigned long flags, any_count = 0;
542 	int i = *(loff_t *) v, j;
543 	struct irqaction *action;
544 	struct irq_desc *desc;
545 
546 	if (i > ACTUAL_NR_IRQS)
547 		return 0;
548 
549 	if (i == ACTUAL_NR_IRQS)
550 		return arch_show_interrupts(p, prec);
551 
552 	/* print header and calculate the width of the first column */
553 	if (i == 0) {
554 		for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
555 			j *= 10;
556 
557 		seq_printf(p, "%*s", prec + 8, "");
558 		for_each_online_cpu(j)
559 			seq_printf(p, "CPU%-8d", j);
560 		seq_putc(p, '\n');
561 	}
562 
563 	irq_lock_sparse();
564 	desc = irq_to_desc(i);
565 	if (!desc)
566 		goto outsparse;
567 
568 	raw_spin_lock_irqsave(&desc->lock, flags);
569 	for_each_online_cpu(j)
570 		any_count |= kstat_irqs_cpu(i, j);
571 	action = desc->action;
572 	if ((!action || irq_desc_is_chained(desc)) && !any_count)
573 		goto out;
574 
575 	seq_printf(p, "%*d: ", prec, i);
576 	for_each_online_cpu(j)
577 		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
578 
579 	if (desc->irq_data.chip) {
580 		if (desc->irq_data.chip->irq_print_chip)
581 			desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
582 		else if (desc->irq_data.chip->name)
583 			seq_printf(p, " %8s", desc->irq_data.chip->name);
584 		else
585 			seq_printf(p, " %8s", "-");
586 	} else {
587 		seq_printf(p, " %8s", "None");
588 	}
589 	if (desc->irq_data.domain)
590 		seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
591 	else
592 		seq_printf(p, " %*s", prec, "");
593 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
594 	seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
595 #endif
596 	if (desc->name)
597 		seq_printf(p, "-%-8s", desc->name);
598 
599 	if (action) {
600 		seq_printf(p, "  %s", action->name);
601 		while ((action = action->next) != NULL)
602 			seq_printf(p, ", %s", action->name);
603 	}
604 
605 	seq_putc(p, '\n');
606 out:
607 	raw_spin_unlock_irqrestore(&desc->lock, flags);
608 outsparse:
609 	irq_unlock_sparse();
610 	return 0;
611 }
612 #endif
613