1 #ifndef _LINUX_IRQ_WORK_H
2 #define _LINUX_IRQ_WORK_H
3
4 #include <linux/llist.h>
5
6 /*
7 * An entry can be in one of four states:
8 *
9 * free NULL, 0 -> {claimed} : free to be used
10 * claimed NULL, 3 -> {pending} : claimed to be enqueued
11 * pending next, 3 -> {busy} : queued, pending callback
12 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
13 */
14
15 #define IRQ_WORK_PENDING 1UL
16 #define IRQ_WORK_BUSY 2UL
17 #define IRQ_WORK_FLAGS 3UL
18 #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
19
20 struct irq_work {
21 unsigned long flags;
22 struct llist_node llnode;
23 void (*func)(struct irq_work *);
24 };
25
26 static inline
init_irq_work(struct irq_work * work,void (* func)(struct irq_work *))27 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
28 {
29 work->flags = 0;
30 work->func = func;
31 }
32
33 #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
34
35 bool irq_work_queue(struct irq_work *work);
36
37 #ifdef CONFIG_SMP
38 bool irq_work_queue_on(struct irq_work *work, int cpu);
39 #endif
40
41 void irq_work_tick(void);
42 void irq_work_sync(struct irq_work *work);
43
44 #ifdef CONFIG_IRQ_WORK
45 #include <asm/irq_work.h>
46
47 void irq_work_run(void);
48 bool irq_work_needs_cpu(void);
49 #else
irq_work_needs_cpu(void)50 static inline bool irq_work_needs_cpu(void) { return false; }
irq_work_run(void)51 static inline void irq_work_run(void) { }
52 #endif
53
54 #endif /* _LINUX_IRQ_WORK_H */
55