• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4 
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17 
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/irq.h>
21 #include <asm/sections.h>
22 
23 /*
24  * These correspond to the IORESOURCE_IRQ_* defines in
25  * linux/ioport.h to select the interrupt line behaviour.  When
26  * requesting an interrupt without specifying a IRQF_TRIGGER, the
27  * setting should be assumed to be "as already configured", which
28  * may be as per machine or firmware initialisation.
29  */
30 #define IRQF_TRIGGER_NONE	0x00000000
31 #define IRQF_TRIGGER_RISING	0x00000001
32 #define IRQF_TRIGGER_FALLING	0x00000002
33 #define IRQF_TRIGGER_HIGH	0x00000004
34 #define IRQF_TRIGGER_LOW	0x00000008
35 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37 #define IRQF_TRIGGER_PROBE	0x00000010
38 
39 /*
40  * These flags used only by the kernel as part of the
41  * irq handling routines.
42  *
43  * IRQF_SHARED - allow sharing the irq among several devices
44  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
45  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
46  * IRQF_PERCPU - Interrupt is per cpu
47  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
48  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
49  *                registered first in an shared interrupt is considered for
50  *                performance reasons)
51  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
52  *                Used by threaded interrupts which need to keep the
53  *                irq line disabled until the threaded handler has been run.
54  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
55  *                   that this interrupt will wake the system from a suspended
56  *                   state.  See Documentation/power/suspend-and-interrupts.txt
57  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
58  * IRQF_NO_THREAD - Interrupt cannot be threaded
59  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
60  *                resume time.
61  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
62  *                interrupt handler after suspending interrupts. For system
63  *                wakeup devices users need to implement wakeup detection in
64  *                their interrupt handlers.
65  */
66 #define IRQF_SHARED		0x00000080
67 #define IRQF_PROBE_SHARED	0x00000100
68 #define __IRQF_TIMER		0x00000200
69 #define IRQF_PERCPU		0x00000400
70 #define IRQF_NOBALANCING	0x00000800
71 #define IRQF_IRQPOLL		0x00001000
72 #define IRQF_ONESHOT		0x00002000
73 #define IRQF_NO_SUSPEND		0x00004000
74 #define IRQF_FORCE_RESUME	0x00008000
75 #define IRQF_NO_THREAD		0x00010000
76 #define IRQF_EARLY_RESUME	0x00020000
77 #define IRQF_COND_SUSPEND	0x00040000
78 
79 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
80 
81 /*
82  * These values can be returned by request_any_context_irq() and
83  * describe the context the interrupt will be run in.
84  *
85  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
86  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
87  */
88 enum {
89 	IRQC_IS_HARDIRQ	= 0,
90 	IRQC_IS_NESTED,
91 };
92 
93 typedef irqreturn_t (*irq_handler_t)(int, void *);
94 
95 /**
96  * struct irqaction - per interrupt action descriptor
97  * @handler:	interrupt handler function
98  * @name:	name of the device
99  * @dev_id:	cookie to identify the device
100  * @percpu_dev_id:	cookie to identify the device
101  * @next:	pointer to the next irqaction for shared interrupts
102  * @irq:	interrupt number
103  * @flags:	flags (see IRQF_* above)
104  * @thread_fn:	interrupt handler function for threaded interrupts
105  * @thread:	thread pointer for threaded interrupts
106  * @secondary:	pointer to secondary irqaction (force threading)
107  * @thread_flags:	flags related to @thread
108  * @thread_mask:	bitmask for keeping track of @thread activity
109  * @dir:	pointer to the proc/irq/NN/name entry
110  */
111 struct irqaction {
112 	irq_handler_t		handler;
113 	void			*dev_id;
114 	void __percpu		*percpu_dev_id;
115 	struct irqaction	*next;
116 	irq_handler_t		thread_fn;
117 	struct task_struct	*thread;
118 	struct irqaction	*secondary;
119 	unsigned int		irq;
120 	unsigned int		flags;
121 	unsigned long		thread_flags;
122 	unsigned long		thread_mask;
123 	const char		*name;
124 	struct proc_dir_entry	*dir;
125 } ____cacheline_internodealigned_in_smp;
126 
127 extern irqreturn_t no_action(int cpl, void *dev_id);
128 
129 /*
130  * If a (PCI) device interrupt is not connected we set dev->irq to
131  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
132  * can distingiush that case from other error returns.
133  *
134  * 0x80000000 is guaranteed to be outside the available range of interrupts
135  * and easy to distinguish from other possible incorrect values.
136  */
137 #define IRQ_NOTCONNECTED	(1U << 31)
138 
139 extern int __must_check
140 request_threaded_irq(unsigned int irq, irq_handler_t handler,
141 		     irq_handler_t thread_fn,
142 		     unsigned long flags, const char *name, void *dev);
143 
144 static inline int __must_check
request_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev)145 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
146 	    const char *name, void *dev)
147 {
148 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
149 }
150 
151 extern int __must_check
152 request_any_context_irq(unsigned int irq, irq_handler_t handler,
153 			unsigned long flags, const char *name, void *dev_id);
154 
155 extern int __must_check
156 request_percpu_irq(unsigned int irq, irq_handler_t handler,
157 		   const char *devname, void __percpu *percpu_dev_id);
158 
159 extern void free_irq(unsigned int, void *);
160 extern void free_percpu_irq(unsigned int, void __percpu *);
161 
162 struct device;
163 
164 extern int __must_check
165 devm_request_threaded_irq(struct device *dev, unsigned int irq,
166 			  irq_handler_t handler, irq_handler_t thread_fn,
167 			  unsigned long irqflags, const char *devname,
168 			  void *dev_id);
169 
170 static inline int __must_check
devm_request_irq(struct device * dev,unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)171 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
172 		 unsigned long irqflags, const char *devname, void *dev_id)
173 {
174 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
175 					 devname, dev_id);
176 }
177 
178 extern int __must_check
179 devm_request_any_context_irq(struct device *dev, unsigned int irq,
180 		 irq_handler_t handler, unsigned long irqflags,
181 		 const char *devname, void *dev_id);
182 
183 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
184 
185 /*
186  * On lockdep we dont want to enable hardirqs in hardirq
187  * context. Use local_irq_enable_in_hardirq() to annotate
188  * kernel code that has to do this nevertheless (pretty much
189  * the only valid case is for old/broken hardware that is
190  * insanely slow).
191  *
192  * NOTE: in theory this might break fragile code that relies
193  * on hardirq delivery - in practice we dont seem to have such
194  * places left. So the only effect should be slightly increased
195  * irqs-off latencies.
196  */
197 #ifdef CONFIG_LOCKDEP
198 # define local_irq_enable_in_hardirq()	do { } while (0)
199 #else
200 # define local_irq_enable_in_hardirq()	local_irq_enable()
201 #endif
202 
203 extern void disable_irq_nosync(unsigned int irq);
204 extern bool disable_hardirq(unsigned int irq);
205 extern void disable_irq(unsigned int irq);
206 extern void disable_percpu_irq(unsigned int irq);
207 extern void enable_irq(unsigned int irq);
208 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
209 extern bool irq_percpu_is_enabled(unsigned int irq);
210 extern void irq_wake_thread(unsigned int irq, void *dev_id);
211 
212 /* The following three functions are for the core kernel use only. */
213 extern void suspend_device_irqs(void);
214 extern void resume_device_irqs(void);
215 
216 /**
217  * struct irq_affinity_notify - context for notification of IRQ affinity changes
218  * @irq:		Interrupt to which notification applies
219  * @kref:		Reference count, for internal use
220  * @work:		Work item, for internal use
221  * @notify:		Function to be called on change.  This will be
222  *			called in process context.
223  * @release:		Function to be called on release.  This will be
224  *			called in process context.  Once registered, the
225  *			structure must only be freed when this function is
226  *			called or later.
227  */
228 struct irq_affinity_notify {
229 	unsigned int irq;
230 	struct kref kref;
231 	struct work_struct work;
232 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
233 	void (*release)(struct kref *ref);
234 };
235 
236 #if defined(CONFIG_SMP)
237 
238 extern cpumask_var_t irq_default_affinity;
239 
240 /* Internal implementation. Use the helpers below */
241 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
242 			      bool force);
243 
244 /**
245  * irq_set_affinity - Set the irq affinity of a given irq
246  * @irq:	Interrupt to set affinity
247  * @cpumask:	cpumask
248  *
249  * Fails if cpumask does not contain an online CPU
250  */
251 static inline int
irq_set_affinity(unsigned int irq,const struct cpumask * cpumask)252 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
253 {
254 	return __irq_set_affinity(irq, cpumask, false);
255 }
256 
257 /**
258  * irq_force_affinity - Force the irq affinity of a given irq
259  * @irq:	Interrupt to set affinity
260  * @cpumask:	cpumask
261  *
262  * Same as irq_set_affinity, but without checking the mask against
263  * online cpus.
264  *
265  * Solely for low level cpu hotplug code, where we need to make per
266  * cpu interrupts affine before the cpu becomes online.
267  */
268 static inline int
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)269 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
270 {
271 	return __irq_set_affinity(irq, cpumask, true);
272 }
273 
274 extern int irq_can_set_affinity(unsigned int irq);
275 extern int irq_select_affinity(unsigned int irq);
276 
277 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
278 
279 extern int
280 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
281 
282 struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
283 int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
284 
285 #else /* CONFIG_SMP */
286 
irq_set_affinity(unsigned int irq,const struct cpumask * m)287 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
288 {
289 	return -EINVAL;
290 }
291 
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)292 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
293 {
294 	return 0;
295 }
296 
irq_can_set_affinity(unsigned int irq)297 static inline int irq_can_set_affinity(unsigned int irq)
298 {
299 	return 0;
300 }
301 
irq_select_affinity(unsigned int irq)302 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
303 
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)304 static inline int irq_set_affinity_hint(unsigned int irq,
305 					const struct cpumask *m)
306 {
307 	return -EINVAL;
308 }
309 
310 static inline int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)311 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
312 {
313 	return 0;
314 }
315 
316 static inline struct cpumask *
irq_create_affinity_masks(const struct cpumask * affinity,int nvec)317 irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
318 {
319 	return NULL;
320 }
321 
322 static inline int
irq_calc_affinity_vectors(const struct cpumask * affinity,int maxvec)323 irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
324 {
325 	return maxvec;
326 }
327 
328 #endif /* CONFIG_SMP */
329 
330 /*
331  * Special lockdep variants of irq disabling/enabling.
332  * These should be used for locking constructs that
333  * know that a particular irq context which is disabled,
334  * and which is the only irq-context user of a lock,
335  * that it's safe to take the lock in the irq-disabled
336  * section without disabling hardirqs.
337  *
338  * On !CONFIG_LOCKDEP they are equivalent to the normal
339  * irq disable/enable methods.
340  */
disable_irq_nosync_lockdep(unsigned int irq)341 static inline void disable_irq_nosync_lockdep(unsigned int irq)
342 {
343 	disable_irq_nosync(irq);
344 #ifdef CONFIG_LOCKDEP
345 	local_irq_disable();
346 #endif
347 }
348 
disable_irq_nosync_lockdep_irqsave(unsigned int irq,unsigned long * flags)349 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
350 {
351 	disable_irq_nosync(irq);
352 #ifdef CONFIG_LOCKDEP
353 	local_irq_save(*flags);
354 #endif
355 }
356 
disable_irq_lockdep(unsigned int irq)357 static inline void disable_irq_lockdep(unsigned int irq)
358 {
359 	disable_irq(irq);
360 #ifdef CONFIG_LOCKDEP
361 	local_irq_disable();
362 #endif
363 }
364 
enable_irq_lockdep(unsigned int irq)365 static inline void enable_irq_lockdep(unsigned int irq)
366 {
367 #ifdef CONFIG_LOCKDEP
368 	local_irq_enable();
369 #endif
370 	enable_irq(irq);
371 }
372 
enable_irq_lockdep_irqrestore(unsigned int irq,unsigned long * flags)373 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
374 {
375 #ifdef CONFIG_LOCKDEP
376 	local_irq_restore(*flags);
377 #endif
378 	enable_irq(irq);
379 }
380 
381 /* IRQ wakeup (PM) control: */
382 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
383 
enable_irq_wake(unsigned int irq)384 static inline int enable_irq_wake(unsigned int irq)
385 {
386 	return irq_set_irq_wake(irq, 1);
387 }
388 
disable_irq_wake(unsigned int irq)389 static inline int disable_irq_wake(unsigned int irq)
390 {
391 	return irq_set_irq_wake(irq, 0);
392 }
393 
394 /*
395  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
396  */
397 enum irqchip_irq_state {
398 	IRQCHIP_STATE_PENDING,		/* Is interrupt pending? */
399 	IRQCHIP_STATE_ACTIVE,		/* Is interrupt in progress? */
400 	IRQCHIP_STATE_MASKED,		/* Is interrupt masked? */
401 	IRQCHIP_STATE_LINE_LEVEL,	/* Is IRQ line high? */
402 };
403 
404 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
405 				 bool *state);
406 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
407 				 bool state);
408 
409 #ifdef CONFIG_IRQ_FORCED_THREADING
410 extern bool force_irqthreads;
411 #else
412 #define force_irqthreads	(0)
413 #endif
414 
415 #ifndef __ARCH_SET_SOFTIRQ_PENDING
416 #define set_softirq_pending(x) (local_softirq_pending() = (x))
417 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
418 #endif
419 
420 /* Some architectures might implement lazy enabling/disabling of
421  * interrupts. In some cases, such as stop_machine, we might want
422  * to ensure that after a local_irq_disable(), interrupts have
423  * really been disabled in hardware. Such architectures need to
424  * implement the following hook.
425  */
426 #ifndef hard_irq_disable
427 #define hard_irq_disable()	do { } while(0)
428 #endif
429 
430 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
431    frequency threaded job scheduling. For almost all the purposes
432    tasklets are more than enough. F.e. all serial device BHs et
433    al. should be converted to tasklets, not to softirqs.
434  */
435 
436 enum
437 {
438 	HI_SOFTIRQ=0,
439 	TIMER_SOFTIRQ,
440 	NET_TX_SOFTIRQ,
441 	NET_RX_SOFTIRQ,
442 	BLOCK_SOFTIRQ,
443 	IRQ_POLL_SOFTIRQ,
444 	TASKLET_SOFTIRQ,
445 	SCHED_SOFTIRQ,
446 	HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
447 			    numbering. Sigh! */
448 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
449 
450 	NR_SOFTIRQS
451 };
452 
453 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
454 
455 /* map softirq index to softirq name. update 'softirq_to_name' in
456  * kernel/softirq.c when adding a new softirq.
457  */
458 extern const char * const softirq_to_name[NR_SOFTIRQS];
459 
460 /* softirq mask and active fields moved to irq_cpustat_t in
461  * asm/hardirq.h to get better cache usage.  KAO
462  */
463 
464 struct softirq_action
465 {
466 	void	(*action)(struct softirq_action *);
467 };
468 
469 asmlinkage void do_softirq(void);
470 asmlinkage void __do_softirq(void);
471 
472 #ifdef __ARCH_HAS_DO_SOFTIRQ
473 void do_softirq_own_stack(void);
474 #else
do_softirq_own_stack(void)475 static inline void do_softirq_own_stack(void)
476 {
477 	__do_softirq();
478 }
479 #endif
480 
481 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
482 extern void softirq_init(void);
483 extern void __raise_softirq_irqoff(unsigned int nr);
484 
485 extern void raise_softirq_irqoff(unsigned int nr);
486 extern void raise_softirq(unsigned int nr);
487 
488 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
489 
this_cpu_ksoftirqd(void)490 static inline struct task_struct *this_cpu_ksoftirqd(void)
491 {
492 	return this_cpu_read(ksoftirqd);
493 }
494 
495 /* Tasklets --- multithreaded analogue of BHs.
496 
497    Main feature differing them of generic softirqs: tasklet
498    is running only on one CPU simultaneously.
499 
500    Main feature differing them of BHs: different tasklets
501    may be run simultaneously on different CPUs.
502 
503    Properties:
504    * If tasklet_schedule() is called, then tasklet is guaranteed
505      to be executed on some cpu at least once after this.
506    * If the tasklet is already scheduled, but its execution is still not
507      started, it will be executed only once.
508    * If this tasklet is already running on another CPU (or schedule is called
509      from tasklet itself), it is rescheduled for later.
510    * Tasklet is strictly serialized wrt itself, but not
511      wrt another tasklets. If client needs some intertask synchronization,
512      he makes it with spinlocks.
513  */
514 
515 struct tasklet_struct
516 {
517 	struct tasklet_struct *next;
518 	unsigned long state;
519 	atomic_t count;
520 	void (*func)(unsigned long);
521 	unsigned long data;
522 };
523 
524 #define DECLARE_TASKLET(name, func, data) \
525 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
526 
527 #define DECLARE_TASKLET_DISABLED(name, func, data) \
528 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
529 
530 
531 enum
532 {
533 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
534 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
535 };
536 
537 #ifdef CONFIG_SMP
tasklet_trylock(struct tasklet_struct * t)538 static inline int tasklet_trylock(struct tasklet_struct *t)
539 {
540 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
541 }
542 
tasklet_unlock(struct tasklet_struct * t)543 static inline void tasklet_unlock(struct tasklet_struct *t)
544 {
545 	smp_mb__before_atomic();
546 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
547 }
548 
tasklet_unlock_wait(struct tasklet_struct * t)549 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
550 {
551 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
552 }
553 #else
554 #define tasklet_trylock(t) 1
555 #define tasklet_unlock_wait(t) do { } while (0)
556 #define tasklet_unlock(t) do { } while (0)
557 #endif
558 
559 extern void __tasklet_schedule(struct tasklet_struct *t);
560 
tasklet_schedule(struct tasklet_struct * t)561 static inline void tasklet_schedule(struct tasklet_struct *t)
562 {
563 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
564 		__tasklet_schedule(t);
565 }
566 
567 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
568 
tasklet_hi_schedule(struct tasklet_struct * t)569 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
570 {
571 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
572 		__tasklet_hi_schedule(t);
573 }
574 
575 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
576 
577 /*
578  * This version avoids touching any other tasklets. Needed for kmemcheck
579  * in order not to take any page faults while enqueueing this tasklet;
580  * consider VERY carefully whether you really need this or
581  * tasklet_hi_schedule()...
582  */
tasklet_hi_schedule_first(struct tasklet_struct * t)583 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
584 {
585 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
586 		__tasklet_hi_schedule_first(t);
587 }
588 
589 
tasklet_disable_nosync(struct tasklet_struct * t)590 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
591 {
592 	atomic_inc(&t->count);
593 	smp_mb__after_atomic();
594 }
595 
tasklet_disable(struct tasklet_struct * t)596 static inline void tasklet_disable(struct tasklet_struct *t)
597 {
598 	tasklet_disable_nosync(t);
599 	tasklet_unlock_wait(t);
600 	smp_mb();
601 }
602 
tasklet_enable(struct tasklet_struct * t)603 static inline void tasklet_enable(struct tasklet_struct *t)
604 {
605 	smp_mb__before_atomic();
606 	atomic_dec(&t->count);
607 }
608 
609 extern void tasklet_kill(struct tasklet_struct *t);
610 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
611 extern void tasklet_init(struct tasklet_struct *t,
612 			 void (*func)(unsigned long), unsigned long data);
613 
614 struct tasklet_hrtimer {
615 	struct hrtimer		timer;
616 	struct tasklet_struct	tasklet;
617 	enum hrtimer_restart	(*function)(struct hrtimer *);
618 };
619 
620 extern void
621 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
622 		     enum hrtimer_restart (*function)(struct hrtimer *),
623 		     clockid_t which_clock, enum hrtimer_mode mode);
624 
625 static inline
tasklet_hrtimer_start(struct tasklet_hrtimer * ttimer,ktime_t time,const enum hrtimer_mode mode)626 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
627 			   const enum hrtimer_mode mode)
628 {
629 	hrtimer_start(&ttimer->timer, time, mode);
630 }
631 
632 static inline
tasklet_hrtimer_cancel(struct tasklet_hrtimer * ttimer)633 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
634 {
635 	hrtimer_cancel(&ttimer->timer);
636 	tasklet_kill(&ttimer->tasklet);
637 }
638 
639 /*
640  * Autoprobing for irqs:
641  *
642  * probe_irq_on() and probe_irq_off() provide robust primitives
643  * for accurate IRQ probing during kernel initialization.  They are
644  * reasonably simple to use, are not "fooled" by spurious interrupts,
645  * and, unlike other attempts at IRQ probing, they do not get hung on
646  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
647  *
648  * For reasonably foolproof probing, use them as follows:
649  *
650  * 1. clear and/or mask the device's internal interrupt.
651  * 2. sti();
652  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
653  * 4. enable the device and cause it to trigger an interrupt.
654  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
655  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
656  * 7. service the device to clear its pending interrupt.
657  * 8. loop again if paranoia is required.
658  *
659  * probe_irq_on() returns a mask of allocated irq's.
660  *
661  * probe_irq_off() takes the mask as a parameter,
662  * and returns the irq number which occurred,
663  * or zero if none occurred, or a negative irq number
664  * if more than one irq occurred.
665  */
666 
667 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
probe_irq_on(void)668 static inline unsigned long probe_irq_on(void)
669 {
670 	return 0;
671 }
probe_irq_off(unsigned long val)672 static inline int probe_irq_off(unsigned long val)
673 {
674 	return 0;
675 }
probe_irq_mask(unsigned long val)676 static inline unsigned int probe_irq_mask(unsigned long val)
677 {
678 	return 0;
679 }
680 #else
681 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
682 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
683 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
684 #endif
685 
686 #ifdef CONFIG_PROC_FS
687 /* Initialize /proc/irq/ */
688 extern void init_irq_proc(void);
689 #else
init_irq_proc(void)690 static inline void init_irq_proc(void)
691 {
692 }
693 #endif
694 
695 struct seq_file;
696 int show_interrupts(struct seq_file *p, void *v);
697 int arch_show_interrupts(struct seq_file *p, int prec);
698 
699 extern int early_irq_init(void);
700 extern int arch_probe_nr_irqs(void);
701 extern int arch_early_irq_init(void);
702 
703 /*
704  * We want to know which function is an entrypoint of a hardirq or a softirq.
705  */
706 #define __irq_entry		 __attribute__((__section__(".irqentry.text")))
707 #define __softirq_entry  \
708 	__attribute__((__section__(".softirqentry.text")))
709 
710 #endif
711