1 #ifndef INTERNAL_IO_WQ_H
2 #define INTERNAL_IO_WQ_H
3
4 #ifdef __GENKSYMS__
5 /*
6 * ANDROID ABI HACK
7 *
8 * See the big comment in the linux/io_uring.h file for details. This
9 * include is not needed for any real functionality, but must be here to
10 * preserve the CRC of a number of variables and functions.
11 */
12 #include <linux/io_uring.h>
13 #endif
14
15 #include <linux/refcount.h>
16
17 struct io_wq;
18
19 enum {
20 IO_WQ_WORK_CANCEL = 1,
21 IO_WQ_WORK_HASHED = 2,
22 IO_WQ_WORK_UNBOUND = 4,
23 IO_WQ_WORK_CONCURRENT = 16,
24
25 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
26 };
27
28 enum io_wq_cancel {
29 IO_WQ_CANCEL_OK, /* cancelled before started */
30 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
31 IO_WQ_CANCEL_NOTFOUND, /* work not found */
32 };
33
34 struct io_wq_work_node {
35 struct io_wq_work_node *next;
36 };
37
38 struct io_wq_work_list {
39 struct io_wq_work_node *first;
40 struct io_wq_work_node *last;
41 };
42
wq_list_add_after(struct io_wq_work_node * node,struct io_wq_work_node * pos,struct io_wq_work_list * list)43 static inline void wq_list_add_after(struct io_wq_work_node *node,
44 struct io_wq_work_node *pos,
45 struct io_wq_work_list *list)
46 {
47 struct io_wq_work_node *next = pos->next;
48
49 pos->next = node;
50 node->next = next;
51 if (!next)
52 list->last = node;
53 }
54
wq_list_add_tail(struct io_wq_work_node * node,struct io_wq_work_list * list)55 static inline void wq_list_add_tail(struct io_wq_work_node *node,
56 struct io_wq_work_list *list)
57 {
58 node->next = NULL;
59 if (!list->first) {
60 list->last = node;
61 WRITE_ONCE(list->first, node);
62 } else {
63 list->last->next = node;
64 list->last = node;
65 }
66 }
67
wq_list_cut(struct io_wq_work_list * list,struct io_wq_work_node * last,struct io_wq_work_node * prev)68 static inline void wq_list_cut(struct io_wq_work_list *list,
69 struct io_wq_work_node *last,
70 struct io_wq_work_node *prev)
71 {
72 /* first in the list, if prev==NULL */
73 if (!prev)
74 WRITE_ONCE(list->first, last->next);
75 else
76 prev->next = last->next;
77
78 if (last == list->last)
79 list->last = prev;
80 last->next = NULL;
81 }
82
wq_list_del(struct io_wq_work_list * list,struct io_wq_work_node * node,struct io_wq_work_node * prev)83 static inline void wq_list_del(struct io_wq_work_list *list,
84 struct io_wq_work_node *node,
85 struct io_wq_work_node *prev)
86 {
87 wq_list_cut(list, node, prev);
88 }
89
90 #define wq_list_for_each(pos, prv, head) \
91 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
92
93 #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
94 #define INIT_WQ_LIST(list) do { \
95 (list)->first = NULL; \
96 (list)->last = NULL; \
97 } while (0)
98
99 struct io_wq_work {
100 struct io_wq_work_node list;
101 unsigned flags;
102 };
103
wq_next_work(struct io_wq_work * work)104 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
105 {
106 if (!work->list.next)
107 return NULL;
108
109 return container_of(work->list.next, struct io_wq_work, list);
110 }
111
112 typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
113 typedef void (io_wq_work_fn)(struct io_wq_work *);
114
115 struct io_wq_hash {
116 refcount_t refs;
117 unsigned long map;
118 struct wait_queue_head wait;
119 };
120
io_wq_put_hash(struct io_wq_hash * hash)121 static inline void io_wq_put_hash(struct io_wq_hash *hash)
122 {
123 if (refcount_dec_and_test(&hash->refs))
124 kfree(hash);
125 }
126
127 struct io_wq_data {
128 struct io_wq_hash *hash;
129 struct task_struct *task;
130 io_wq_work_fn *do_work;
131 free_work_fn *free_work;
132 };
133
134 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
135 void io_wq_exit_start(struct io_wq *wq);
136 void io_wq_put_and_exit(struct io_wq *wq);
137
138 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
139 void io_wq_hash_work(struct io_wq_work *work, void *val);
140
141 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
142 int io_wq_max_workers(struct io_wq *wq, int *new_count);
143 bool io_wq_worker_stopped(void);
144
io_wq_is_hashed(struct io_wq_work * work)145 static inline bool io_wq_is_hashed(struct io_wq_work *work)
146 {
147 return work->flags & IO_WQ_WORK_HASHED;
148 }
149
150 typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
151
152 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
153 void *data, bool cancel_all);
154
155 #if defined(CONFIG_IO_WQ)
156 extern void io_wq_worker_sleeping(struct task_struct *);
157 extern void io_wq_worker_running(struct task_struct *);
158 #else
io_wq_worker_sleeping(struct task_struct * tsk)159 static inline void io_wq_worker_sleeping(struct task_struct *tsk)
160 {
161 }
io_wq_worker_running(struct task_struct * tsk)162 static inline void io_wq_worker_running(struct task_struct *tsk)
163 {
164 }
165 #endif
166
io_wq_current_is_worker(void)167 static inline bool io_wq_current_is_worker(void)
168 {
169 return in_task() && (current->flags & PF_IO_WORKER) &&
170 current->pf_io_worker;
171 }
172 #endif
173