1 #ifndef _LINUX_CLOSURE_H
2 #define _LINUX_CLOSURE_H
3
4 #include <linux/llist.h>
5 #include <linux/sched.h>
6 #include <linux/workqueue.h>
7
8 /*
9 * Closure is perhaps the most overused and abused term in computer science, but
10 * since I've been unable to come up with anything better you're stuck with it
11 * again.
12 *
13 * What are closures?
14 *
15 * They embed a refcount. The basic idea is they count "things that are in
16 * progress" - in flight bios, some other thread that's doing something else -
17 * anything you might want to wait on.
18 *
19 * The refcount may be manipulated with closure_get() and closure_put().
20 * closure_put() is where many of the interesting things happen, when it causes
21 * the refcount to go to 0.
22 *
23 * Closures can be used to wait on things both synchronously and asynchronously,
24 * and synchronous and asynchronous use can be mixed without restriction. To
25 * wait synchronously, use closure_sync() - you will sleep until your closure's
26 * refcount hits 1.
27 *
28 * To wait asynchronously, use
29 * continue_at(cl, next_function, workqueue);
30 *
31 * passing it, as you might expect, the function to run when nothing is pending
32 * and the workqueue to run that function out of.
33 *
34 * continue_at() also, critically, is a macro that returns the calling function.
35 * There's good reason for this.
36 *
37 * To use safely closures asynchronously, they must always have a refcount while
38 * they are running owned by the thread that is running them. Otherwise, suppose
39 * you submit some bios and wish to have a function run when they all complete:
40 *
41 * foo_endio(struct bio *bio, int error)
42 * {
43 * closure_put(cl);
44 * }
45 *
46 * closure_init(cl);
47 *
48 * do_stuff();
49 * closure_get(cl);
50 * bio1->bi_endio = foo_endio;
51 * bio_submit(bio1);
52 *
53 * do_more_stuff();
54 * closure_get(cl);
55 * bio2->bi_endio = foo_endio;
56 * bio_submit(bio2);
57 *
58 * continue_at(cl, complete_some_read, system_wq);
59 *
60 * If closure's refcount started at 0, complete_some_read() could run before the
61 * second bio was submitted - which is almost always not what you want! More
62 * importantly, it wouldn't be possible to say whether the original thread or
63 * complete_some_read()'s thread owned the closure - and whatever state it was
64 * associated with!
65 *
66 * So, closure_init() initializes a closure's refcount to 1 - and when a
67 * closure_fn is run, the refcount will be reset to 1 first.
68 *
69 * Then, the rule is - if you got the refcount with closure_get(), release it
70 * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
71 * on a closure because you called closure_init() or you were run out of a
72 * closure - _always_ use continue_at(). Doing so consistently will help
73 * eliminate an entire class of particularly pernicious races.
74 *
75 * Lastly, you might have a wait list dedicated to a specific event, and have no
76 * need for specifying the condition - you just want to wait until someone runs
77 * closure_wake_up() on the appropriate wait list. In that case, just use
78 * closure_wait(). It will return either true or false, depending on whether the
79 * closure was already on a wait list or not - a closure can only be on one wait
80 * list at a time.
81 *
82 * Parents:
83 *
84 * closure_init() takes two arguments - it takes the closure to initialize, and
85 * a (possibly null) parent.
86 *
87 * If parent is non null, the new closure will have a refcount for its lifetime;
88 * a closure is considered to be "finished" when its refcount hits 0 and the
89 * function to run is null. Hence
90 *
91 * continue_at(cl, NULL, NULL);
92 *
93 * returns up the (spaghetti) stack of closures, precisely like normal return
94 * returns up the C stack. continue_at() with non null fn is better thought of
95 * as doing a tail call.
96 *
97 * All this implies that a closure should typically be embedded in a particular
98 * struct (which its refcount will normally control the lifetime of), and that
99 * struct can very much be thought of as a stack frame.
100 */
101
102 struct closure;
103 typedef void (closure_fn) (struct closure *);
104
105 struct closure_waitlist {
106 struct llist_head list;
107 };
108
109 enum closure_state {
110 /*
111 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
112 * the thread that owns the closure, and cleared by the thread that's
113 * waking up the closure.
114 *
115 * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
116 * - indicates that cl->task is valid and closure_put() may wake it up.
117 * Only set or cleared by the thread that owns the closure.
118 *
119 * The rest are for debugging and don't affect behaviour:
120 *
121 * CLOSURE_RUNNING: Set when a closure is running (i.e. by
122 * closure_init() and when closure_put() runs then next function), and
123 * must be cleared before remaining hits 0. Primarily to help guard
124 * against incorrect usage and accidentally transferring references.
125 * continue_at() and closure_return() clear it for you, if you're doing
126 * something unusual you can use closure_set_dead() which also helps
127 * annotate where references are being transferred.
128 *
129 * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
130 * closure with this flag set
131 */
132
133 CLOSURE_BITS_START = (1 << 23),
134 CLOSURE_DESTRUCTOR = (1 << 23),
135 CLOSURE_WAITING = (1 << 25),
136 CLOSURE_SLEEPING = (1 << 27),
137 CLOSURE_RUNNING = (1 << 29),
138 CLOSURE_STACK = (1 << 31),
139 };
140
141 #define CLOSURE_GUARD_MASK \
142 ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
143 CLOSURE_RUNNING|CLOSURE_STACK) << 1)
144
145 #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
146 #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
147
148 struct closure {
149 union {
150 struct {
151 struct workqueue_struct *wq;
152 struct task_struct *task;
153 struct llist_node list;
154 closure_fn *fn;
155 };
156 struct work_struct work;
157 };
158
159 struct closure *parent;
160
161 atomic_t remaining;
162
163 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
164 #define CLOSURE_MAGIC_DEAD 0xc054dead
165 #define CLOSURE_MAGIC_ALIVE 0xc054a11e
166
167 unsigned magic;
168 struct list_head all;
169 unsigned long ip;
170 unsigned long waiting_on;
171 #endif
172 };
173
174 void closure_sub(struct closure *cl, int v);
175 void closure_put(struct closure *cl);
176 void __closure_wake_up(struct closure_waitlist *list);
177 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
178 void closure_sync(struct closure *cl);
179
180 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
181
182 void closure_debug_init(void);
183 void closure_debug_create(struct closure *cl);
184 void closure_debug_destroy(struct closure *cl);
185
186 #else
187
closure_debug_init(void)188 static inline void closure_debug_init(void) {}
closure_debug_create(struct closure * cl)189 static inline void closure_debug_create(struct closure *cl) {}
closure_debug_destroy(struct closure * cl)190 static inline void closure_debug_destroy(struct closure *cl) {}
191
192 #endif
193
closure_set_ip(struct closure * cl)194 static inline void closure_set_ip(struct closure *cl)
195 {
196 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
197 cl->ip = _THIS_IP_;
198 #endif
199 }
200
closure_set_ret_ip(struct closure * cl)201 static inline void closure_set_ret_ip(struct closure *cl)
202 {
203 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
204 cl->ip = _RET_IP_;
205 #endif
206 }
207
closure_set_waiting(struct closure * cl,unsigned long f)208 static inline void closure_set_waiting(struct closure *cl, unsigned long f)
209 {
210 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
211 cl->waiting_on = f;
212 #endif
213 }
214
__closure_end_sleep(struct closure * cl)215 static inline void __closure_end_sleep(struct closure *cl)
216 {
217 __set_current_state(TASK_RUNNING);
218
219 if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
220 atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
221 }
222
__closure_start_sleep(struct closure * cl)223 static inline void __closure_start_sleep(struct closure *cl)
224 {
225 closure_set_ip(cl);
226 cl->task = current;
227 set_current_state(TASK_UNINTERRUPTIBLE);
228
229 if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
230 atomic_add(CLOSURE_SLEEPING, &cl->remaining);
231 }
232
closure_set_stopped(struct closure * cl)233 static inline void closure_set_stopped(struct closure *cl)
234 {
235 atomic_sub(CLOSURE_RUNNING, &cl->remaining);
236 }
237
set_closure_fn(struct closure * cl,closure_fn * fn,struct workqueue_struct * wq)238 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
239 struct workqueue_struct *wq)
240 {
241 BUG_ON(object_is_on_stack(cl));
242 closure_set_ip(cl);
243 cl->fn = fn;
244 cl->wq = wq;
245 /* between atomic_dec() in closure_put() */
246 smp_mb__before_atomic();
247 }
248
closure_queue(struct closure * cl)249 static inline void closure_queue(struct closure *cl)
250 {
251 struct workqueue_struct *wq = cl->wq;
252 if (wq) {
253 INIT_WORK(&cl->work, cl->work.func);
254 BUG_ON(!queue_work(wq, &cl->work));
255 } else
256 cl->fn(cl);
257 }
258
259 /**
260 * closure_get - increment a closure's refcount
261 */
closure_get(struct closure * cl)262 static inline void closure_get(struct closure *cl)
263 {
264 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
265 BUG_ON((atomic_inc_return(&cl->remaining) &
266 CLOSURE_REMAINING_MASK) <= 1);
267 #else
268 atomic_inc(&cl->remaining);
269 #endif
270 }
271
272 /**
273 * closure_init - Initialize a closure, setting the refcount to 1
274 * @cl: closure to initialize
275 * @parent: parent of the new closure. cl will take a refcount on it for its
276 * lifetime; may be NULL.
277 */
closure_init(struct closure * cl,struct closure * parent)278 static inline void closure_init(struct closure *cl, struct closure *parent)
279 {
280 memset(cl, 0, sizeof(struct closure));
281 cl->parent = parent;
282 if (parent)
283 closure_get(parent);
284
285 atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
286
287 closure_debug_create(cl);
288 closure_set_ip(cl);
289 }
290
closure_init_stack(struct closure * cl)291 static inline void closure_init_stack(struct closure *cl)
292 {
293 memset(cl, 0, sizeof(struct closure));
294 atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
295 }
296
297 /**
298 * closure_wake_up - wake up all closures on a wait list.
299 */
closure_wake_up(struct closure_waitlist * list)300 static inline void closure_wake_up(struct closure_waitlist *list)
301 {
302 smp_mb();
303 __closure_wake_up(list);
304 }
305
306 /**
307 * continue_at - jump to another function with barrier
308 *
309 * After @cl is no longer waiting on anything (i.e. all outstanding refs have
310 * been dropped with closure_put()), it will resume execution at @fn running out
311 * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
312 *
313 * NOTE: This macro expands to a return in the calling function!
314 *
315 * This is because after calling continue_at() you no longer have a ref on @cl,
316 * and whatever @cl owns may be freed out from under you - a running closure fn
317 * has a ref on its own closure which continue_at() drops.
318 */
319 #define continue_at(_cl, _fn, _wq) \
320 do { \
321 set_closure_fn(_cl, _fn, _wq); \
322 closure_sub(_cl, CLOSURE_RUNNING + 1); \
323 return; \
324 } while (0)
325
326 /**
327 * closure_return - finish execution of a closure
328 *
329 * This is used to indicate that @cl is finished: when all outstanding refs on
330 * @cl have been dropped @cl's ref on its parent closure (as passed to
331 * closure_init()) will be dropped, if one was specified - thus this can be
332 * thought of as returning to the parent closure.
333 */
334 #define closure_return(_cl) continue_at((_cl), NULL, NULL)
335
336 /**
337 * continue_at_nobarrier - jump to another function without barrier
338 *
339 * Causes @fn to be executed out of @cl, in @wq context (or called directly if
340 * @wq is NULL).
341 *
342 * NOTE: like continue_at(), this macro expands to a return in the caller!
343 *
344 * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
345 * thus it's not safe to touch anything protected by @cl after a
346 * continue_at_nobarrier().
347 */
348 #define continue_at_nobarrier(_cl, _fn, _wq) \
349 do { \
350 set_closure_fn(_cl, _fn, _wq); \
351 closure_queue(_cl); \
352 return; \
353 } while (0)
354
355 /**
356 * closure_return - finish execution of a closure, with destructor
357 *
358 * Works like closure_return(), except @destructor will be called when all
359 * outstanding refs on @cl have been dropped; @destructor may be used to safely
360 * free the memory occupied by @cl, and it is called with the ref on the parent
361 * closure still held - so @destructor could safely return an item to a
362 * freelist protected by @cl's parent.
363 */
364 #define closure_return_with_destructor(_cl, _destructor) \
365 do { \
366 set_closure_fn(_cl, _destructor, NULL); \
367 closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
368 return; \
369 } while (0)
370
371 /**
372 * closure_call - execute @fn out of a new, uninitialized closure
373 *
374 * Typically used when running out of one closure, and we want to run @fn
375 * asynchronously out of a new closure - @parent will then wait for @cl to
376 * finish.
377 */
closure_call(struct closure * cl,closure_fn fn,struct workqueue_struct * wq,struct closure * parent)378 static inline void closure_call(struct closure *cl, closure_fn fn,
379 struct workqueue_struct *wq,
380 struct closure *parent)
381 {
382 closure_init(cl, parent);
383 continue_at_nobarrier(cl, fn, wq);
384 }
385
386 #endif /* _LINUX_CLOSURE_H */
387