1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
26 #include "ctree.h"
27
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
31
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
34
35 struct __btrfs_workqueue {
36 struct workqueue_struct *normal_wq;
37 /* List head pointing to ordered work list */
38 struct list_head ordered_list;
39
40 /* Spinlock for ordered_list */
41 spinlock_t list_lock;
42
43 /* Thresholding related variants */
44 atomic_t pending;
45
46 /* Up limit of concurrency workers */
47 int limit_active;
48
49 /* Current number of concurrency workers */
50 int current_active;
51
52 /* Threshold to change current_active */
53 int thresh;
54 unsigned int count;
55 spinlock_t thres_lock;
56 };
57
58 struct btrfs_workqueue {
59 struct __btrfs_workqueue *normal;
60 struct __btrfs_workqueue *high;
61 };
62
63 static void normal_work_helper(struct btrfs_work *work);
64
65 #define BTRFS_WORK_HELPER(name) \
66 void btrfs_##name(struct work_struct *arg) \
67 { \
68 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
69 normal_work); \
70 normal_work_helper(work); \
71 }
72
btrfs_workqueue_normal_congested(struct btrfs_workqueue * wq)73 bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
74 {
75 /*
76 * We could compare wq->normal->pending with num_online_cpus()
77 * to support "thresh == NO_THRESHOLD" case, but it requires
78 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
79 * postpone it until someone needs the support of that case.
80 */
81 if (wq->normal->thresh == NO_THRESHOLD)
82 return false;
83
84 return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
85 }
86
87 BTRFS_WORK_HELPER(worker_helper);
88 BTRFS_WORK_HELPER(delalloc_helper);
89 BTRFS_WORK_HELPER(flush_delalloc_helper);
90 BTRFS_WORK_HELPER(cache_helper);
91 BTRFS_WORK_HELPER(submit_helper);
92 BTRFS_WORK_HELPER(fixup_helper);
93 BTRFS_WORK_HELPER(endio_helper);
94 BTRFS_WORK_HELPER(endio_meta_helper);
95 BTRFS_WORK_HELPER(endio_meta_write_helper);
96 BTRFS_WORK_HELPER(endio_raid56_helper);
97 BTRFS_WORK_HELPER(endio_repair_helper);
98 BTRFS_WORK_HELPER(rmw_helper);
99 BTRFS_WORK_HELPER(endio_write_helper);
100 BTRFS_WORK_HELPER(freespace_write_helper);
101 BTRFS_WORK_HELPER(delayed_meta_helper);
102 BTRFS_WORK_HELPER(readahead_helper);
103 BTRFS_WORK_HELPER(qgroup_rescan_helper);
104 BTRFS_WORK_HELPER(extent_refs_helper);
105 BTRFS_WORK_HELPER(scrub_helper);
106 BTRFS_WORK_HELPER(scrubwrc_helper);
107 BTRFS_WORK_HELPER(scrubnc_helper);
108 BTRFS_WORK_HELPER(scrubparity_helper);
109
110 static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(const char * name,unsigned int flags,int limit_active,int thresh)111 __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
112 int thresh)
113 {
114 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
115
116 if (!ret)
117 return NULL;
118
119 ret->limit_active = limit_active;
120 atomic_set(&ret->pending, 0);
121 if (thresh == 0)
122 thresh = DFT_THRESHOLD;
123 /* For low threshold, disabling threshold is a better choice */
124 if (thresh < DFT_THRESHOLD) {
125 ret->current_active = limit_active;
126 ret->thresh = NO_THRESHOLD;
127 } else {
128 /*
129 * For threshold-able wq, let its concurrency grow on demand.
130 * Use minimal max_active at alloc time to reduce resource
131 * usage.
132 */
133 ret->current_active = 1;
134 ret->thresh = thresh;
135 }
136
137 if (flags & WQ_HIGHPRI)
138 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
139 ret->current_active, "btrfs",
140 name);
141 else
142 ret->normal_wq = alloc_workqueue("%s-%s", flags,
143 ret->current_active, "btrfs",
144 name);
145 if (!ret->normal_wq) {
146 kfree(ret);
147 return NULL;
148 }
149
150 INIT_LIST_HEAD(&ret->ordered_list);
151 spin_lock_init(&ret->list_lock);
152 spin_lock_init(&ret->thres_lock);
153 trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
154 return ret;
155 }
156
157 static inline void
158 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
159
btrfs_alloc_workqueue(const char * name,unsigned int flags,int limit_active,int thresh)160 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
161 unsigned int flags,
162 int limit_active,
163 int thresh)
164 {
165 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
166
167 if (!ret)
168 return NULL;
169
170 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
171 limit_active, thresh);
172 if (!ret->normal) {
173 kfree(ret);
174 return NULL;
175 }
176
177 if (flags & WQ_HIGHPRI) {
178 ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
179 thresh);
180 if (!ret->high) {
181 __btrfs_destroy_workqueue(ret->normal);
182 kfree(ret);
183 return NULL;
184 }
185 }
186 return ret;
187 }
188
189 /*
190 * Hook for threshold which will be called in btrfs_queue_work.
191 * This hook WILL be called in IRQ handler context,
192 * so workqueue_set_max_active MUST NOT be called in this hook
193 */
thresh_queue_hook(struct __btrfs_workqueue * wq)194 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
195 {
196 if (wq->thresh == NO_THRESHOLD)
197 return;
198 atomic_inc(&wq->pending);
199 }
200
201 /*
202 * Hook for threshold which will be called before executing the work,
203 * This hook is called in kthread content.
204 * So workqueue_set_max_active is called here.
205 */
thresh_exec_hook(struct __btrfs_workqueue * wq)206 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
207 {
208 int new_current_active;
209 long pending;
210 int need_change = 0;
211
212 if (wq->thresh == NO_THRESHOLD)
213 return;
214
215 atomic_dec(&wq->pending);
216 spin_lock(&wq->thres_lock);
217 /*
218 * Use wq->count to limit the calling frequency of
219 * workqueue_set_max_active.
220 */
221 wq->count++;
222 wq->count %= (wq->thresh / 4);
223 if (!wq->count)
224 goto out;
225 new_current_active = wq->current_active;
226
227 /*
228 * pending may be changed later, but it's OK since we really
229 * don't need it so accurate to calculate new_max_active.
230 */
231 pending = atomic_read(&wq->pending);
232 if (pending > wq->thresh)
233 new_current_active++;
234 if (pending < wq->thresh / 2)
235 new_current_active--;
236 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
237 if (new_current_active != wq->current_active) {
238 need_change = 1;
239 wq->current_active = new_current_active;
240 }
241 out:
242 spin_unlock(&wq->thres_lock);
243
244 if (need_change) {
245 workqueue_set_max_active(wq->normal_wq, wq->current_active);
246 }
247 }
248
run_ordered_work(struct __btrfs_workqueue * wq)249 static void run_ordered_work(struct __btrfs_workqueue *wq)
250 {
251 struct list_head *list = &wq->ordered_list;
252 struct btrfs_work *work;
253 spinlock_t *lock = &wq->list_lock;
254 unsigned long flags;
255
256 while (1) {
257 spin_lock_irqsave(lock, flags);
258 if (list_empty(list))
259 break;
260 work = list_entry(list->next, struct btrfs_work,
261 ordered_list);
262 if (!test_bit(WORK_DONE_BIT, &work->flags))
263 break;
264 /*
265 * Orders all subsequent loads after reading WORK_DONE_BIT,
266 * paired with the smp_mb__before_atomic in btrfs_work_helper
267 * this guarantees that the ordered function will see all
268 * updates from ordinary work function.
269 */
270 smp_rmb();
271
272 /*
273 * we are going to call the ordered done function, but
274 * we leave the work item on the list as a barrier so
275 * that later work items that are done don't have their
276 * functions called before this one returns
277 */
278 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
279 break;
280 trace_btrfs_ordered_sched(work);
281 spin_unlock_irqrestore(lock, flags);
282 work->ordered_func(work);
283
284 /* now take the lock again and drop our item from the list */
285 spin_lock_irqsave(lock, flags);
286 list_del(&work->ordered_list);
287 spin_unlock_irqrestore(lock, flags);
288
289 /*
290 * we don't want to call the ordered free functions
291 * with the lock held though
292 */
293 work->ordered_free(work);
294 trace_btrfs_all_work_done(work);
295 }
296 spin_unlock_irqrestore(lock, flags);
297 }
298
normal_work_helper(struct btrfs_work * work)299 static void normal_work_helper(struct btrfs_work *work)
300 {
301 struct __btrfs_workqueue *wq;
302 int need_order = 0;
303
304 /*
305 * We should not touch things inside work in the following cases:
306 * 1) after work->func() if it has no ordered_free
307 * Since the struct is freed in work->func().
308 * 2) after setting WORK_DONE_BIT
309 * The work may be freed in other threads almost instantly.
310 * So we save the needed things here.
311 */
312 if (work->ordered_func)
313 need_order = 1;
314 wq = work->wq;
315
316 trace_btrfs_work_sched(work);
317 thresh_exec_hook(wq);
318 work->func(work);
319 if (need_order) {
320 /*
321 * Ensures all memory accesses done in the work function are
322 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
323 * which is going to executed the ordered work sees them.
324 * Pairs with the smp_rmb in run_ordered_work.
325 */
326 smp_mb__before_atomic();
327 set_bit(WORK_DONE_BIT, &work->flags);
328 run_ordered_work(wq);
329 }
330 if (!need_order)
331 trace_btrfs_all_work_done(work);
332 }
333
btrfs_init_work(struct btrfs_work * work,btrfs_work_func_t uniq_func,btrfs_func_t func,btrfs_func_t ordered_func,btrfs_func_t ordered_free)334 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
335 btrfs_func_t func,
336 btrfs_func_t ordered_func,
337 btrfs_func_t ordered_free)
338 {
339 work->func = func;
340 work->ordered_func = ordered_func;
341 work->ordered_free = ordered_free;
342 INIT_WORK(&work->normal_work, uniq_func);
343 INIT_LIST_HEAD(&work->ordered_list);
344 work->flags = 0;
345 }
346
__btrfs_queue_work(struct __btrfs_workqueue * wq,struct btrfs_work * work)347 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
348 struct btrfs_work *work)
349 {
350 unsigned long flags;
351
352 work->wq = wq;
353 thresh_queue_hook(wq);
354 if (work->ordered_func) {
355 spin_lock_irqsave(&wq->list_lock, flags);
356 list_add_tail(&work->ordered_list, &wq->ordered_list);
357 spin_unlock_irqrestore(&wq->list_lock, flags);
358 }
359 trace_btrfs_work_queued(work);
360 queue_work(wq->normal_wq, &work->normal_work);
361 }
362
btrfs_queue_work(struct btrfs_workqueue * wq,struct btrfs_work * work)363 void btrfs_queue_work(struct btrfs_workqueue *wq,
364 struct btrfs_work *work)
365 {
366 struct __btrfs_workqueue *dest_wq;
367
368 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
369 dest_wq = wq->high;
370 else
371 dest_wq = wq->normal;
372 __btrfs_queue_work(dest_wq, work);
373 }
374
375 static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue * wq)376 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
377 {
378 destroy_workqueue(wq->normal_wq);
379 trace_btrfs_workqueue_destroy(wq);
380 kfree(wq);
381 }
382
btrfs_destroy_workqueue(struct btrfs_workqueue * wq)383 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
384 {
385 if (!wq)
386 return;
387 if (wq->high)
388 __btrfs_destroy_workqueue(wq->high);
389 __btrfs_destroy_workqueue(wq->normal);
390 kfree(wq);
391 }
392
btrfs_workqueue_set_max(struct btrfs_workqueue * wq,int limit_active)393 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
394 {
395 if (!wq)
396 return;
397 wq->normal->limit_active = limit_active;
398 if (wq->high)
399 wq->high->limit_active = limit_active;
400 }
401
btrfs_set_work_high_priority(struct btrfs_work * work)402 void btrfs_set_work_high_priority(struct btrfs_work *work)
403 {
404 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
405 }
406
btrfs_flush_workqueue(struct btrfs_workqueue * wq)407 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
408 {
409 if (wq->high)
410 flush_workqueue(wq->high->normal_wq);
411
412 flush_workqueue(wq->normal->normal_wq);
413 }
414