• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
4  *
5  * This file contains the /proc/irq/ handling code.
6  */
7 
8 #include <linux/irq.h>
9 #include <linux/gfp.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/mutex.h>
15 
16 #include "internals.h"
17 
18 /*
19  * Access rules:
20  *
21  * procfs protects read/write of /proc/irq/N/ files against a
22  * concurrent free of the interrupt descriptor. remove_proc_entry()
23  * immediately prevents new read/writes to happen and waits for
24  * already running read/write functions to complete.
25  *
26  * We remove the proc entries first and then delete the interrupt
27  * descriptor from the radix tree and free it. So it is guaranteed
28  * that irq_to_desc(N) is valid as long as the read/writes are
29  * permitted by procfs.
30  *
31  * The read from /proc/interrupts is a different problem because there
32  * is no protection. So the lookup and the access to irqdesc
33  * information must be protected by sparse_irq_lock.
34  */
35 static struct proc_dir_entry *root_irq_dir;
36 
37 #ifdef CONFIG_SMP
38 
39 enum {
40 	AFFINITY,
41 	AFFINITY_LIST,
42 	EFFECTIVE,
43 	EFFECTIVE_LIST,
44 };
45 
show_irq_affinity(int type,struct seq_file * m)46 static int show_irq_affinity(int type, struct seq_file *m)
47 {
48 	struct irq_desc *desc = irq_to_desc((long)m->private);
49 	const struct cpumask *mask;
50 
51 	switch (type) {
52 	case AFFINITY:
53 	case AFFINITY_LIST:
54 		mask = desc->irq_common_data.affinity;
55 #ifdef CONFIG_GENERIC_PENDING_IRQ
56 		if (irqd_is_setaffinity_pending(&desc->irq_data))
57 			mask = desc->pending_mask;
58 #endif
59 		break;
60 	case EFFECTIVE:
61 	case EFFECTIVE_LIST:
62 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
63 		mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
64 		break;
65 #endif
66 	default:
67 		return -EINVAL;
68 	}
69 
70 	switch (type) {
71 	case AFFINITY_LIST:
72 	case EFFECTIVE_LIST:
73 		seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
74 		break;
75 	case AFFINITY:
76 	case EFFECTIVE:
77 		seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
78 		break;
79 	}
80 	return 0;
81 }
82 
irq_affinity_hint_proc_show(struct seq_file * m,void * v)83 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
84 {
85 	struct irq_desc *desc = irq_to_desc((long)m->private);
86 	unsigned long flags;
87 	cpumask_var_t mask;
88 
89 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
90 		return -ENOMEM;
91 
92 	raw_spin_lock_irqsave(&desc->lock, flags);
93 	if (desc->affinity_hint)
94 		cpumask_copy(mask, desc->affinity_hint);
95 	raw_spin_unlock_irqrestore(&desc->lock, flags);
96 
97 	seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
98 	free_cpumask_var(mask);
99 
100 	return 0;
101 }
102 
103 int no_irq_affinity;
irq_affinity_proc_show(struct seq_file * m,void * v)104 static int irq_affinity_proc_show(struct seq_file *m, void *v)
105 {
106 	return show_irq_affinity(AFFINITY, m);
107 }
108 
irq_affinity_list_proc_show(struct seq_file * m,void * v)109 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
110 {
111 	return show_irq_affinity(AFFINITY_LIST, m);
112 }
113 
114 #ifndef CONFIG_AUTO_IRQ_AFFINITY
irq_select_affinity_usr(unsigned int irq)115 static inline int irq_select_affinity_usr(unsigned int irq)
116 {
117 	/*
118 	 * If the interrupt is started up already then this fails. The
119 	 * interrupt is assigned to an online CPU already. There is no
120 	 * point to move it around randomly. Tell user space that the
121 	 * selected mask is bogus.
122 	 *
123 	 * If not then any change to the affinity is pointless because the
124 	 * startup code invokes irq_setup_affinity() which will select
125 	 * a online CPU anyway.
126 	 */
127 	return -EINVAL;
128 }
129 #else
130 /* ALPHA magic affinity auto selector. Keep it for historical reasons. */
irq_select_affinity_usr(unsigned int irq)131 static inline int irq_select_affinity_usr(unsigned int irq)
132 {
133 	return irq_select_affinity(irq);
134 }
135 #endif
136 
write_irq_affinity(int type,struct file * file,const char __user * buffer,size_t count,loff_t * pos)137 static ssize_t write_irq_affinity(int type, struct file *file,
138 		const char __user *buffer, size_t count, loff_t *pos)
139 {
140 	unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
141 	cpumask_var_t new_value;
142 	int err;
143 
144 	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
145 		return -EIO;
146 
147 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
148 		return -ENOMEM;
149 
150 	if (type)
151 		err = cpumask_parselist_user(buffer, count, new_value);
152 	else
153 		err = cpumask_parse_user(buffer, count, new_value);
154 	if (err)
155 		goto free_cpumask;
156 
157 #ifdef CONFIG_CPU_ISOLATION_OPT
158 	if (cpumask_subset(new_value, cpu_isolated_mask)) {
159 		err = -EINVAL;
160 		goto free_cpumask;
161 	}
162 #endif
163 	/*
164 	 * Do not allow disabling IRQs completely - it's a too easy
165 	 * way to make the system unusable accidentally :-) At least
166 	 * one online CPU still has to be targeted.
167 	 */
168 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
169 		/*
170 		 * Special case for empty set - allow the architecture code
171 		 * to set default SMP affinity.
172 		 */
173 		err = irq_select_affinity_usr(irq) ? -EINVAL : count;
174 	} else {
175 		err = irq_set_affinity(irq, new_value);
176 		if (!err)
177 			err = count;
178 	}
179 
180 free_cpumask:
181 	free_cpumask_var(new_value);
182 	return err;
183 }
184 
irq_affinity_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)185 static ssize_t irq_affinity_proc_write(struct file *file,
186 		const char __user *buffer, size_t count, loff_t *pos)
187 {
188 	return write_irq_affinity(0, file, buffer, count, pos);
189 }
190 
irq_affinity_list_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)191 static ssize_t irq_affinity_list_proc_write(struct file *file,
192 		const char __user *buffer, size_t count, loff_t *pos)
193 {
194 	return write_irq_affinity(1, file, buffer, count, pos);
195 }
196 
irq_affinity_proc_open(struct inode * inode,struct file * file)197 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
198 {
199 	return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
200 }
201 
irq_affinity_list_proc_open(struct inode * inode,struct file * file)202 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
203 {
204 	return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
205 }
206 
207 static const struct proc_ops irq_affinity_proc_ops = {
208 	.proc_open	= irq_affinity_proc_open,
209 	.proc_read	= seq_read,
210 	.proc_lseek	= seq_lseek,
211 	.proc_release	= single_release,
212 	.proc_write	= irq_affinity_proc_write,
213 };
214 
215 static const struct proc_ops irq_affinity_list_proc_ops = {
216 	.proc_open	= irq_affinity_list_proc_open,
217 	.proc_read	= seq_read,
218 	.proc_lseek	= seq_lseek,
219 	.proc_release	= single_release,
220 	.proc_write	= irq_affinity_list_proc_write,
221 };
222 
223 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_effective_aff_proc_show(struct seq_file * m,void * v)224 static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
225 {
226 	return show_irq_affinity(EFFECTIVE, m);
227 }
228 
irq_effective_aff_list_proc_show(struct seq_file * m,void * v)229 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
230 {
231 	return show_irq_affinity(EFFECTIVE_LIST, m);
232 }
233 #endif
234 
default_affinity_show(struct seq_file * m,void * v)235 static int default_affinity_show(struct seq_file *m, void *v)
236 {
237 	seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
238 	return 0;
239 }
240 
default_affinity_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)241 static ssize_t default_affinity_write(struct file *file,
242 		const char __user *buffer, size_t count, loff_t *ppos)
243 {
244 	cpumask_var_t new_value;
245 	int err;
246 
247 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
248 		return -ENOMEM;
249 
250 	err = cpumask_parse_user(buffer, count, new_value);
251 	if (err)
252 		goto out;
253 
254 	/*
255 	 * Do not allow disabling IRQs completely - it's a too easy
256 	 * way to make the system unusable accidentally :-) At least
257 	 * one online CPU still has to be targeted.
258 	 */
259 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
260 		err = -EINVAL;
261 		goto out;
262 	}
263 
264 	cpumask_copy(irq_default_affinity, new_value);
265 	err = count;
266 
267 out:
268 	free_cpumask_var(new_value);
269 	return err;
270 }
271 
default_affinity_open(struct inode * inode,struct file * file)272 static int default_affinity_open(struct inode *inode, struct file *file)
273 {
274 	return single_open(file, default_affinity_show, PDE_DATA(inode));
275 }
276 
277 static const struct proc_ops default_affinity_proc_ops = {
278 	.proc_open	= default_affinity_open,
279 	.proc_read	= seq_read,
280 	.proc_lseek	= seq_lseek,
281 	.proc_release	= single_release,
282 	.proc_write	= default_affinity_write,
283 };
284 
irq_node_proc_show(struct seq_file * m,void * v)285 static int irq_node_proc_show(struct seq_file *m, void *v)
286 {
287 	struct irq_desc *desc = irq_to_desc((long) m->private);
288 
289 	seq_printf(m, "%d\n", irq_desc_get_node(desc));
290 	return 0;
291 }
292 #endif
293 
irq_spurious_proc_show(struct seq_file * m,void * v)294 static int irq_spurious_proc_show(struct seq_file *m, void *v)
295 {
296 	struct irq_desc *desc = irq_to_desc((long) m->private);
297 
298 	seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
299 		   desc->irq_count, desc->irqs_unhandled,
300 		   jiffies_to_msecs(desc->last_unhandled));
301 	return 0;
302 }
303 
304 #define MAX_NAMELEN 128
305 
name_unique(unsigned int irq,struct irqaction * new_action)306 static int name_unique(unsigned int irq, struct irqaction *new_action)
307 {
308 	struct irq_desc *desc = irq_to_desc(irq);
309 	struct irqaction *action;
310 	unsigned long flags;
311 	int ret = 1;
312 
313 	raw_spin_lock_irqsave(&desc->lock, flags);
314 	for_each_action_of_desc(desc, action) {
315 		if ((action != new_action) && action->name &&
316 				!strcmp(new_action->name, action->name)) {
317 			ret = 0;
318 			break;
319 		}
320 	}
321 	raw_spin_unlock_irqrestore(&desc->lock, flags);
322 	return ret;
323 }
324 
register_handler_proc(unsigned int irq,struct irqaction * action)325 void register_handler_proc(unsigned int irq, struct irqaction *action)
326 {
327 	char name [MAX_NAMELEN];
328 	struct irq_desc *desc = irq_to_desc(irq);
329 
330 	if (!desc->dir || action->dir || !action->name ||
331 					!name_unique(irq, action))
332 		return;
333 
334 	snprintf(name, MAX_NAMELEN, "%s", action->name);
335 
336 	/* create /proc/irq/1234/handler/ */
337 	action->dir = proc_mkdir(name, desc->dir);
338 }
339 
340 #undef MAX_NAMELEN
341 
342 #define MAX_NAMELEN 10
343 
register_irq_proc(unsigned int irq,struct irq_desc * desc)344 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
345 {
346 	static DEFINE_MUTEX(register_lock);
347 	void __maybe_unused *irqp = (void *)(unsigned long) irq;
348 	char name [MAX_NAMELEN];
349 
350 	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
351 		return;
352 
353 	/*
354 	 * irq directories are registered only when a handler is
355 	 * added, not when the descriptor is created, so multiple
356 	 * tasks might try to register at the same time.
357 	 */
358 	mutex_lock(&register_lock);
359 
360 	if (desc->dir)
361 		goto out_unlock;
362 
363 	sprintf(name, "%d", irq);
364 
365 	/* create /proc/irq/1234 */
366 	desc->dir = proc_mkdir(name, root_irq_dir);
367 	if (!desc->dir)
368 		goto out_unlock;
369 
370 #ifdef CONFIG_SMP
371 	/* create /proc/irq/<irq>/smp_affinity */
372 	proc_create_data("smp_affinity", 0644, desc->dir,
373 			 &irq_affinity_proc_ops, irqp);
374 
375 	/* create /proc/irq/<irq>/affinity_hint */
376 	proc_create_single_data("affinity_hint", 0444, desc->dir,
377 			irq_affinity_hint_proc_show, irqp);
378 
379 	/* create /proc/irq/<irq>/smp_affinity_list */
380 	proc_create_data("smp_affinity_list", 0644, desc->dir,
381 			 &irq_affinity_list_proc_ops, irqp);
382 
383 	proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
384 			irqp);
385 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
386 	proc_create_single_data("effective_affinity", 0444, desc->dir,
387 			irq_effective_aff_proc_show, irqp);
388 	proc_create_single_data("effective_affinity_list", 0444, desc->dir,
389 			irq_effective_aff_list_proc_show, irqp);
390 # endif
391 #endif
392 	proc_create_single_data("spurious", 0444, desc->dir,
393 			irq_spurious_proc_show, (void *)(long)irq);
394 
395 out_unlock:
396 	mutex_unlock(&register_lock);
397 }
398 
unregister_irq_proc(unsigned int irq,struct irq_desc * desc)399 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
400 {
401 	char name [MAX_NAMELEN];
402 
403 	if (!root_irq_dir || !desc->dir)
404 		return;
405 #ifdef CONFIG_SMP
406 	remove_proc_entry("smp_affinity", desc->dir);
407 	remove_proc_entry("affinity_hint", desc->dir);
408 	remove_proc_entry("smp_affinity_list", desc->dir);
409 	remove_proc_entry("node", desc->dir);
410 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
411 	remove_proc_entry("effective_affinity", desc->dir);
412 	remove_proc_entry("effective_affinity_list", desc->dir);
413 # endif
414 #endif
415 	remove_proc_entry("spurious", desc->dir);
416 
417 	sprintf(name, "%u", irq);
418 	remove_proc_entry(name, root_irq_dir);
419 }
420 
421 #undef MAX_NAMELEN
422 
unregister_handler_proc(unsigned int irq,struct irqaction * action)423 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
424 {
425 	proc_remove(action->dir);
426 }
427 
register_default_affinity_proc(void)428 static void register_default_affinity_proc(void)
429 {
430 #ifdef CONFIG_SMP
431 	proc_create("irq/default_smp_affinity", 0644, NULL,
432 		    &default_affinity_proc_ops);
433 #endif
434 }
435 
init_irq_proc(void)436 void init_irq_proc(void)
437 {
438 	unsigned int irq;
439 	struct irq_desc *desc;
440 
441 	/* create /proc/irq */
442 	root_irq_dir = proc_mkdir("irq", NULL);
443 	if (!root_irq_dir)
444 		return;
445 
446 	register_default_affinity_proc();
447 
448 	/*
449 	 * Create entries for all existing IRQs.
450 	 */
451 	for_each_irq_desc(irq, desc)
452 		register_irq_proc(irq, desc);
453 }
454 
455 #ifdef CONFIG_GENERIC_IRQ_SHOW
456 
arch_show_interrupts(struct seq_file * p,int prec)457 int __weak arch_show_interrupts(struct seq_file *p, int prec)
458 {
459 	return 0;
460 }
461 
462 #ifndef ACTUAL_NR_IRQS
463 # define ACTUAL_NR_IRQS nr_irqs
464 #endif
465 
show_interrupts(struct seq_file * p,void * v)466 int show_interrupts(struct seq_file *p, void *v)
467 {
468 	static int prec;
469 
470 	unsigned long flags, any_count = 0;
471 	int i = *(loff_t *) v, j;
472 	struct irqaction *action;
473 	struct irq_desc *desc;
474 
475 	if (i > ACTUAL_NR_IRQS)
476 		return 0;
477 
478 	if (i == ACTUAL_NR_IRQS)
479 		return arch_show_interrupts(p, prec);
480 
481 	/* print header and calculate the width of the first column */
482 	if (i == 0) {
483 		for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
484 			j *= 10;
485 
486 		seq_printf(p, "%*s", prec + 8, "");
487 		for_each_online_cpu(j)
488 			seq_printf(p, "CPU%-8d", j);
489 		seq_putc(p, '\n');
490 	}
491 
492 	rcu_read_lock();
493 	desc = irq_to_desc(i);
494 	if (!desc || irq_settings_is_hidden(desc))
495 		goto outsparse;
496 
497 	if (desc->kstat_irqs)
498 		for_each_online_cpu(j)
499 			any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
500 
501 	if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
502 		goto outsparse;
503 
504 	seq_printf(p, "%*d: ", prec, i);
505 	for_each_online_cpu(j)
506 		seq_printf(p, "%10u ", desc->kstat_irqs ?
507 					*per_cpu_ptr(desc->kstat_irqs, j) : 0);
508 
509 	raw_spin_lock_irqsave(&desc->lock, flags);
510 	if (desc->irq_data.chip) {
511 		if (desc->irq_data.chip->irq_print_chip)
512 			desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
513 		else if (desc->irq_data.chip->name)
514 			seq_printf(p, " %8s", desc->irq_data.chip->name);
515 		else
516 			seq_printf(p, " %8s", "-");
517 	} else {
518 		seq_printf(p, " %8s", "None");
519 	}
520 	if (desc->irq_data.domain)
521 		seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
522 	else
523 		seq_printf(p, " %*s", prec, "");
524 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
525 	seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
526 #endif
527 	if (desc->name)
528 		seq_printf(p, "-%-8s", desc->name);
529 
530 	action = desc->action;
531 	if (action) {
532 		seq_printf(p, "  %s", action->name);
533 		while ((action = action->next) != NULL)
534 			seq_printf(p, ", %s", action->name);
535 	}
536 
537 	seq_putc(p, '\n');
538 	raw_spin_unlock_irqrestore(&desc->lock, flags);
539 outsparse:
540 	rcu_read_unlock();
541 	return 0;
542 }
543 #endif
544