• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/freezer.h>
23 #include <linux/ftrace.h>
24 #include "async-thread.h"
25 
26 #define WORK_QUEUED_BIT 0
27 #define WORK_DONE_BIT 1
28 #define WORK_ORDER_DONE_BIT 2
29 
30 /*
31  * container for the kthread task pointer and the list of pending work
32  * One of these is allocated per thread.
33  */
34 struct btrfs_worker_thread {
35 	/* pool we belong to */
36 	struct btrfs_workers *workers;
37 
38 	/* list of struct btrfs_work that are waiting for service */
39 	struct list_head pending;
40 
41 	/* list of worker threads from struct btrfs_workers */
42 	struct list_head worker_list;
43 
44 	/* kthread */
45 	struct task_struct *task;
46 
47 	/* number of things on the pending list */
48 	atomic_t num_pending;
49 
50 	unsigned long sequence;
51 
52 	/* protects the pending list. */
53 	spinlock_t lock;
54 
55 	/* set to non-zero when this thread is already awake and kicking */
56 	int working;
57 
58 	/* are we currently idle */
59 	int idle;
60 };
61 
62 /*
63  * helper function to move a thread onto the idle list after it
64  * has finished some requests.
65  */
check_idle_worker(struct btrfs_worker_thread * worker)66 static void check_idle_worker(struct btrfs_worker_thread *worker)
67 {
68 	if (!worker->idle && atomic_read(&worker->num_pending) <
69 	    worker->workers->idle_thresh / 2) {
70 		unsigned long flags;
71 		spin_lock_irqsave(&worker->workers->lock, flags);
72 		worker->idle = 1;
73 		list_move(&worker->worker_list, &worker->workers->idle_list);
74 		spin_unlock_irqrestore(&worker->workers->lock, flags);
75 	}
76 }
77 
78 /*
79  * helper function to move a thread off the idle list after new
80  * pending work is added.
81  */
check_busy_worker(struct btrfs_worker_thread * worker)82 static void check_busy_worker(struct btrfs_worker_thread *worker)
83 {
84 	if (worker->idle && atomic_read(&worker->num_pending) >=
85 	    worker->workers->idle_thresh) {
86 		unsigned long flags;
87 		spin_lock_irqsave(&worker->workers->lock, flags);
88 		worker->idle = 0;
89 		list_move_tail(&worker->worker_list,
90 			       &worker->workers->worker_list);
91 		spin_unlock_irqrestore(&worker->workers->lock, flags);
92 	}
93 }
94 
run_ordered_completions(struct btrfs_workers * workers,struct btrfs_work * work)95 static noinline int run_ordered_completions(struct btrfs_workers *workers,
96 					    struct btrfs_work *work)
97 {
98 	unsigned long flags;
99 
100 	if (!workers->ordered)
101 		return 0;
102 
103 	set_bit(WORK_DONE_BIT, &work->flags);
104 
105 	spin_lock_irqsave(&workers->lock, flags);
106 
107 	while (!list_empty(&workers->order_list)) {
108 		work = list_entry(workers->order_list.next,
109 				  struct btrfs_work, order_list);
110 
111 		if (!test_bit(WORK_DONE_BIT, &work->flags))
112 			break;
113 
114 		/* we are going to call the ordered done function, but
115 		 * we leave the work item on the list as a barrier so
116 		 * that later work items that are done don't have their
117 		 * functions called before this one returns
118 		 */
119 		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
120 			break;
121 
122 		spin_unlock_irqrestore(&workers->lock, flags);
123 
124 		work->ordered_func(work);
125 
126 		/* now take the lock again and call the freeing code */
127 		spin_lock_irqsave(&workers->lock, flags);
128 		list_del(&work->order_list);
129 		work->ordered_free(work);
130 	}
131 
132 	spin_unlock_irqrestore(&workers->lock, flags);
133 	return 0;
134 }
135 
136 /*
137  * main loop for servicing work items
138  */
worker_loop(void * arg)139 static int worker_loop(void *arg)
140 {
141 	struct btrfs_worker_thread *worker = arg;
142 	struct list_head *cur;
143 	struct btrfs_work *work;
144 	do {
145 		spin_lock_irq(&worker->lock);
146 again_locked:
147 		while (!list_empty(&worker->pending)) {
148 			cur = worker->pending.next;
149 			work = list_entry(cur, struct btrfs_work, list);
150 			list_del(&work->list);
151 			clear_bit(WORK_QUEUED_BIT, &work->flags);
152 
153 			work->worker = worker;
154 			spin_unlock_irq(&worker->lock);
155 
156 			work->func(work);
157 
158 			atomic_dec(&worker->num_pending);
159 			/*
160 			 * unless this is an ordered work queue,
161 			 * 'work' was probably freed by func above.
162 			 */
163 			run_ordered_completions(worker->workers, work);
164 
165 			spin_lock_irq(&worker->lock);
166 			check_idle_worker(worker);
167 
168 		}
169 		if (freezing(current)) {
170 			worker->working = 0;
171 			spin_unlock_irq(&worker->lock);
172 			refrigerator();
173 		} else {
174 			spin_unlock_irq(&worker->lock);
175 			if (!kthread_should_stop()) {
176 				cpu_relax();
177 				/*
178 				 * we've dropped the lock, did someone else
179 				 * jump_in?
180 				 */
181 				smp_mb();
182 				if (!list_empty(&worker->pending))
183 					continue;
184 
185 				/*
186 				 * this short schedule allows more work to
187 				 * come in without the queue functions
188 				 * needing to go through wake_up_process()
189 				 *
190 				 * worker->working is still 1, so nobody
191 				 * is going to try and wake us up
192 				 */
193 				schedule_timeout(1);
194 				smp_mb();
195 				if (!list_empty(&worker->pending))
196 					continue;
197 
198 				/* still no more work?, sleep for real */
199 				spin_lock_irq(&worker->lock);
200 				set_current_state(TASK_INTERRUPTIBLE);
201 				if (!list_empty(&worker->pending))
202 					goto again_locked;
203 
204 				/*
205 				 * this makes sure we get a wakeup when someone
206 				 * adds something new to the queue
207 				 */
208 				worker->working = 0;
209 				spin_unlock_irq(&worker->lock);
210 
211 				schedule();
212 			}
213 			__set_current_state(TASK_RUNNING);
214 		}
215 	} while (!kthread_should_stop());
216 	return 0;
217 }
218 
219 /*
220  * this will wait for all the worker threads to shutdown
221  */
btrfs_stop_workers(struct btrfs_workers * workers)222 int btrfs_stop_workers(struct btrfs_workers *workers)
223 {
224 	struct list_head *cur;
225 	struct btrfs_worker_thread *worker;
226 
227 	list_splice_init(&workers->idle_list, &workers->worker_list);
228 	while (!list_empty(&workers->worker_list)) {
229 		cur = workers->worker_list.next;
230 		worker = list_entry(cur, struct btrfs_worker_thread,
231 				    worker_list);
232 		kthread_stop(worker->task);
233 		list_del(&worker->worker_list);
234 		kfree(worker);
235 	}
236 	return 0;
237 }
238 
239 /*
240  * simple init on struct btrfs_workers
241  */
btrfs_init_workers(struct btrfs_workers * workers,char * name,int max)242 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
243 {
244 	workers->num_workers = 0;
245 	INIT_LIST_HEAD(&workers->worker_list);
246 	INIT_LIST_HEAD(&workers->idle_list);
247 	INIT_LIST_HEAD(&workers->order_list);
248 	spin_lock_init(&workers->lock);
249 	workers->max_workers = max;
250 	workers->idle_thresh = 32;
251 	workers->name = name;
252 	workers->ordered = 0;
253 }
254 
255 /*
256  * starts new worker threads.  This does not enforce the max worker
257  * count in case you need to temporarily go past it.
258  */
btrfs_start_workers(struct btrfs_workers * workers,int num_workers)259 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
260 {
261 	struct btrfs_worker_thread *worker;
262 	int ret = 0;
263 	int i;
264 
265 	for (i = 0; i < num_workers; i++) {
266 		worker = kzalloc(sizeof(*worker), GFP_NOFS);
267 		if (!worker) {
268 			ret = -ENOMEM;
269 			goto fail;
270 		}
271 
272 		INIT_LIST_HEAD(&worker->pending);
273 		INIT_LIST_HEAD(&worker->worker_list);
274 		spin_lock_init(&worker->lock);
275 		atomic_set(&worker->num_pending, 0);
276 		worker->task = kthread_run(worker_loop, worker,
277 					   "btrfs-%s-%d", workers->name,
278 					   workers->num_workers + i);
279 		worker->workers = workers;
280 		if (IS_ERR(worker->task)) {
281 			kfree(worker);
282 			ret = PTR_ERR(worker->task);
283 			goto fail;
284 		}
285 
286 		spin_lock_irq(&workers->lock);
287 		list_add_tail(&worker->worker_list, &workers->idle_list);
288 		worker->idle = 1;
289 		workers->num_workers++;
290 		spin_unlock_irq(&workers->lock);
291 	}
292 	return 0;
293 fail:
294 	btrfs_stop_workers(workers);
295 	return ret;
296 }
297 
298 /*
299  * run through the list and find a worker thread that doesn't have a lot
300  * to do right now.  This can return null if we aren't yet at the thread
301  * count limit and all of the threads are busy.
302  */
next_worker(struct btrfs_workers * workers)303 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
304 {
305 	struct btrfs_worker_thread *worker;
306 	struct list_head *next;
307 	int enforce_min = workers->num_workers < workers->max_workers;
308 
309 	/*
310 	 * if we find an idle thread, don't move it to the end of the
311 	 * idle list.  This improves the chance that the next submission
312 	 * will reuse the same thread, and maybe catch it while it is still
313 	 * working
314 	 */
315 	if (!list_empty(&workers->idle_list)) {
316 		next = workers->idle_list.next;
317 		worker = list_entry(next, struct btrfs_worker_thread,
318 				    worker_list);
319 		return worker;
320 	}
321 	if (enforce_min || list_empty(&workers->worker_list))
322 		return NULL;
323 
324 	/*
325 	 * if we pick a busy task, move the task to the end of the list.
326 	 * hopefully this will keep things somewhat evenly balanced.
327 	 * Do the move in batches based on the sequence number.  This groups
328 	 * requests submitted at roughly the same time onto the same worker.
329 	 */
330 	next = workers->worker_list.next;
331 	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
332 	atomic_inc(&worker->num_pending);
333 	worker->sequence++;
334 
335 	if (worker->sequence % workers->idle_thresh == 0)
336 		list_move_tail(next, &workers->worker_list);
337 	return worker;
338 }
339 
340 /*
341  * selects a worker thread to take the next job.  This will either find
342  * an idle worker, start a new worker up to the max count, or just return
343  * one of the existing busy workers.
344  */
find_worker(struct btrfs_workers * workers)345 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
346 {
347 	struct btrfs_worker_thread *worker;
348 	unsigned long flags;
349 
350 again:
351 	spin_lock_irqsave(&workers->lock, flags);
352 	worker = next_worker(workers);
353 	spin_unlock_irqrestore(&workers->lock, flags);
354 
355 	if (!worker) {
356 		spin_lock_irqsave(&workers->lock, flags);
357 		if (workers->num_workers >= workers->max_workers) {
358 			struct list_head *fallback = NULL;
359 			/*
360 			 * we have failed to find any workers, just
361 			 * return the force one
362 			 */
363 			if (!list_empty(&workers->worker_list))
364 				fallback = workers->worker_list.next;
365 			if (!list_empty(&workers->idle_list))
366 				fallback = workers->idle_list.next;
367 			BUG_ON(!fallback);
368 			worker = list_entry(fallback,
369 				  struct btrfs_worker_thread, worker_list);
370 			spin_unlock_irqrestore(&workers->lock, flags);
371 		} else {
372 			spin_unlock_irqrestore(&workers->lock, flags);
373 			/* we're below the limit, start another worker */
374 			btrfs_start_workers(workers, 1);
375 			goto again;
376 		}
377 	}
378 	return worker;
379 }
380 
381 /*
382  * btrfs_requeue_work just puts the work item back on the tail of the list
383  * it was taken from.  It is intended for use with long running work functions
384  * that make some progress and want to give the cpu up for others.
385  */
btrfs_requeue_work(struct btrfs_work * work)386 int btrfs_requeue_work(struct btrfs_work *work)
387 {
388 	struct btrfs_worker_thread *worker = work->worker;
389 	unsigned long flags;
390 	int wake = 0;
391 
392 	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
393 		goto out;
394 
395 	spin_lock_irqsave(&worker->lock, flags);
396 	list_add_tail(&work->list, &worker->pending);
397 	atomic_inc(&worker->num_pending);
398 
399 	/* by definition we're busy, take ourselves off the idle
400 	 * list
401 	 */
402 	if (worker->idle) {
403 		spin_lock_irqsave(&worker->workers->lock, flags);
404 		worker->idle = 0;
405 		list_move_tail(&worker->worker_list,
406 			       &worker->workers->worker_list);
407 		spin_unlock_irqrestore(&worker->workers->lock, flags);
408 	}
409 	if (!worker->working) {
410 		wake = 1;
411 		worker->working = 1;
412 	}
413 
414 	spin_unlock_irqrestore(&worker->lock, flags);
415 	if (wake)
416 		wake_up_process(worker->task);
417 out:
418 
419 	return 0;
420 }
421 
422 /*
423  * places a struct btrfs_work into the pending queue of one of the kthreads
424  */
btrfs_queue_worker(struct btrfs_workers * workers,struct btrfs_work * work)425 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
426 {
427 	struct btrfs_worker_thread *worker;
428 	unsigned long flags;
429 	int wake = 0;
430 
431 	/* don't requeue something already on a list */
432 	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
433 		goto out;
434 
435 	worker = find_worker(workers);
436 	if (workers->ordered) {
437 		spin_lock_irqsave(&workers->lock, flags);
438 		list_add_tail(&work->order_list, &workers->order_list);
439 		spin_unlock_irqrestore(&workers->lock, flags);
440 	} else {
441 		INIT_LIST_HEAD(&work->order_list);
442 	}
443 
444 	spin_lock_irqsave(&worker->lock, flags);
445 
446 	list_add_tail(&work->list, &worker->pending);
447 	atomic_inc(&worker->num_pending);
448 	check_busy_worker(worker);
449 
450 	/*
451 	 * avoid calling into wake_up_process if this thread has already
452 	 * been kicked
453 	 */
454 	if (!worker->working)
455 		wake = 1;
456 	worker->working = 1;
457 
458 	spin_unlock_irqrestore(&worker->lock, flags);
459 
460 	if (wake)
461 		wake_up_process(worker->task);
462 out:
463 	return 0;
464 }
465