• Home
  • Raw
  • Download

Lines Matching +full:high +full:- +full:threshold

1 // SPDX-License-Identifier: GPL-2.0
12 #include "async-thread.h"
21 #define NO_THRESHOLD (-1)
45 /* Threshold to change current_active */
53 struct __btrfs_workqueue *high; member
58 return wq->fs_info; in btrfs_workqueue_owner()
63 return work->wq->fs_info; in btrfs_work_owner()
69 * We could compare wq->normal->pending with num_online_cpus() in btrfs_workqueue_normal_congested()
74 if (wq->normal->thresh == NO_THRESHOLD) in btrfs_workqueue_normal_congested()
77 return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; in btrfs_workqueue_normal_congested()
89 ret->fs_info = fs_info; in __btrfs_alloc_workqueue()
90 ret->limit_active = limit_active; in __btrfs_alloc_workqueue()
91 atomic_set(&ret->pending, 0); in __btrfs_alloc_workqueue()
94 /* For low threshold, disabling threshold is a better choice */ in __btrfs_alloc_workqueue()
96 ret->current_active = limit_active; in __btrfs_alloc_workqueue()
97 ret->thresh = NO_THRESHOLD; in __btrfs_alloc_workqueue()
100 * For threshold-able wq, let its concurrency grow on demand. in __btrfs_alloc_workqueue()
104 ret->current_active = 1; in __btrfs_alloc_workqueue()
105 ret->thresh = thresh; in __btrfs_alloc_workqueue()
109 ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, in __btrfs_alloc_workqueue()
110 ret->current_active, name); in __btrfs_alloc_workqueue()
112 ret->normal_wq = alloc_workqueue("btrfs-%s", flags, in __btrfs_alloc_workqueue()
113 ret->current_active, name); in __btrfs_alloc_workqueue()
114 if (!ret->normal_wq) { in __btrfs_alloc_workqueue()
119 INIT_LIST_HEAD(&ret->ordered_list); in __btrfs_alloc_workqueue()
120 spin_lock_init(&ret->list_lock); in __btrfs_alloc_workqueue()
121 spin_lock_init(&ret->thres_lock); in __btrfs_alloc_workqueue()
140 ret->normal = __btrfs_alloc_workqueue(fs_info, name, in btrfs_alloc_workqueue()
143 if (!ret->normal) { in btrfs_alloc_workqueue()
149 ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, in btrfs_alloc_workqueue()
151 if (!ret->high) { in btrfs_alloc_workqueue()
152 __btrfs_destroy_workqueue(ret->normal); in btrfs_alloc_workqueue()
161 * Hook for threshold which will be called in btrfs_queue_work.
167 if (wq->thresh == NO_THRESHOLD) in thresh_queue_hook()
169 atomic_inc(&wq->pending); in thresh_queue_hook()
173 * Hook for threshold which will be called before executing the work,
183 if (wq->thresh == NO_THRESHOLD) in thresh_exec_hook()
186 atomic_dec(&wq->pending); in thresh_exec_hook()
187 spin_lock(&wq->thres_lock); in thresh_exec_hook()
189 * Use wq->count to limit the calling frequency of in thresh_exec_hook()
192 wq->count++; in thresh_exec_hook()
193 wq->count %= (wq->thresh / 4); in thresh_exec_hook()
194 if (!wq->count) in thresh_exec_hook()
196 new_current_active = wq->current_active; in thresh_exec_hook()
202 pending = atomic_read(&wq->pending); in thresh_exec_hook()
203 if (pending > wq->thresh) in thresh_exec_hook()
205 if (pending < wq->thresh / 2) in thresh_exec_hook()
206 new_current_active--; in thresh_exec_hook()
207 new_current_active = clamp_val(new_current_active, 1, wq->limit_active); in thresh_exec_hook()
208 if (new_current_active != wq->current_active) { in thresh_exec_hook()
210 wq->current_active = new_current_active; in thresh_exec_hook()
213 spin_unlock(&wq->thres_lock); in thresh_exec_hook()
216 workqueue_set_max_active(wq->normal_wq, wq->current_active); in thresh_exec_hook()
223 struct list_head *list = &wq->ordered_list; in run_ordered_work()
225 spinlock_t *lock = &wq->list_lock; in run_ordered_work()
233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
251 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
255 work->ordered_func(work); in run_ordered_work()
259 list_del(&work->ordered_list); in run_ordered_work()
267 * The kernel workqueue code guarantees non-reentrancy in run_ordered_work()
290 work->ordered_free(work); in run_ordered_work()
292 trace_btrfs_all_work_done(wq->fs_info, work); in run_ordered_work()
298 self->ordered_free(self); in run_ordered_work()
300 trace_btrfs_all_work_done(wq->fs_info, self); in run_ordered_work()
313 * 1) after work->func() if it has no ordered_free in btrfs_work_helper()
314 * Since the struct is freed in work->func(). in btrfs_work_helper()
319 if (work->ordered_func) in btrfs_work_helper()
321 wq = work->wq; in btrfs_work_helper()
325 work->func(work); in btrfs_work_helper()
334 set_bit(WORK_DONE_BIT, &work->flags); in btrfs_work_helper()
338 trace_btrfs_all_work_done(wq->fs_info, work); in btrfs_work_helper()
345 work->func = func; in btrfs_init_work()
346 work->ordered_func = ordered_func; in btrfs_init_work()
347 work->ordered_free = ordered_free; in btrfs_init_work()
348 INIT_WORK(&work->normal_work, btrfs_work_helper); in btrfs_init_work()
349 INIT_LIST_HEAD(&work->ordered_list); in btrfs_init_work()
350 work->flags = 0; in btrfs_init_work()
358 work->wq = wq; in __btrfs_queue_work()
360 if (work->ordered_func) { in __btrfs_queue_work()
361 spin_lock_irqsave(&wq->list_lock, flags); in __btrfs_queue_work()
362 list_add_tail(&work->ordered_list, &wq->ordered_list); in __btrfs_queue_work()
363 spin_unlock_irqrestore(&wq->list_lock, flags); in __btrfs_queue_work()
366 queue_work(wq->normal_wq, &work->normal_work); in __btrfs_queue_work()
374 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) in btrfs_queue_work()
375 dest_wq = wq->high; in btrfs_queue_work()
377 dest_wq = wq->normal; in btrfs_queue_work()
384 destroy_workqueue(wq->normal_wq); in __btrfs_destroy_workqueue()
393 if (wq->high) in btrfs_destroy_workqueue()
394 __btrfs_destroy_workqueue(wq->high); in btrfs_destroy_workqueue()
395 __btrfs_destroy_workqueue(wq->normal); in btrfs_destroy_workqueue()
403 wq->normal->limit_active = limit_active; in btrfs_workqueue_set_max()
404 if (wq->high) in btrfs_workqueue_set_max()
405 wq->high->limit_active = limit_active; in btrfs_workqueue_set_max()
410 set_bit(WORK_HIGH_PRIO_BIT, &work->flags); in btrfs_set_work_high_priority()
415 if (wq->high) in btrfs_flush_workqueue()
416 flush_workqueue(wq->high->normal_wq); in btrfs_flush_workqueue()
418 flush_workqueue(wq->normal->normal_wq); in btrfs_flush_workqueue()