• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * async.c: Asynchronous function calls for boot performance
3  *
4  * (C) Copyright 2009 Intel Corporation
5  * Author: Arjan van de Ven <arjan@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 
14 /*
15 
16 Goals and Theory of Operation
17 
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21 
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27 
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31 
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34 
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41 
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48 
49 */
50 
51 #include <linux/async.h>
52 #include <linux/module.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/init.h>
56 #include <linux/kthread.h>
57 #include <linux/delay.h>
58 #include <asm/atomic.h>
59 
60 static async_cookie_t next_cookie = 1;
61 
62 #define MAX_THREADS	256
63 #define MAX_WORK	32768
64 
65 static LIST_HEAD(async_pending);
66 static LIST_HEAD(async_running);
67 static DEFINE_SPINLOCK(async_lock);
68 
69 static int async_enabled = 0;
70 
71 struct async_entry {
72 	struct list_head list;
73 	async_cookie_t   cookie;
74 	async_func_ptr	 *func;
75 	void             *data;
76 	struct list_head *running;
77 };
78 
79 static DECLARE_WAIT_QUEUE_HEAD(async_done);
80 static DECLARE_WAIT_QUEUE_HEAD(async_new);
81 
82 static atomic_t entry_count;
83 static atomic_t thread_count;
84 
85 extern int initcall_debug;
86 
87 
88 /*
89  * MUST be called with the lock held!
90  */
__lowest_in_progress(struct list_head * running)91 static async_cookie_t  __lowest_in_progress(struct list_head *running)
92 {
93 	struct async_entry *entry;
94 	if (!list_empty(running)) {
95 		entry = list_first_entry(running,
96 			struct async_entry, list);
97 		return entry->cookie;
98 	} else if (!list_empty(&async_pending)) {
99 		entry = list_first_entry(&async_pending,
100 			struct async_entry, list);
101 		return entry->cookie;
102 	} else {
103 		/* nothing in progress... next_cookie is "infinity" */
104 		return next_cookie;
105 	}
106 
107 }
108 
lowest_in_progress(struct list_head * running)109 static async_cookie_t  lowest_in_progress(struct list_head *running)
110 {
111 	unsigned long flags;
112 	async_cookie_t ret;
113 
114 	spin_lock_irqsave(&async_lock, flags);
115 	ret = __lowest_in_progress(running);
116 	spin_unlock_irqrestore(&async_lock, flags);
117 	return ret;
118 }
119 /*
120  * pick the first pending entry and run it
121  */
run_one_entry(void)122 static void run_one_entry(void)
123 {
124 	unsigned long flags;
125 	struct async_entry *entry;
126 	ktime_t calltime, delta, rettime;
127 
128 	/* 1) pick one task from the pending queue */
129 
130 	spin_lock_irqsave(&async_lock, flags);
131 	if (list_empty(&async_pending))
132 		goto out;
133 	entry = list_first_entry(&async_pending, struct async_entry, list);
134 
135 	/* 2) move it to the running queue */
136 	list_move_tail(&entry->list, entry->running);
137 	spin_unlock_irqrestore(&async_lock, flags);
138 
139 	/* 3) run it (and print duration)*/
140 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 		printk("calling  %lli_%pF @ %i\n", (long long)entry->cookie,
142 			entry->func, task_pid_nr(current));
143 		calltime = ktime_get();
144 	}
145 	entry->func(entry->data, entry->cookie);
146 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
147 		rettime = ktime_get();
148 		delta = ktime_sub(rettime, calltime);
149 		printk("initcall %lli_%pF returned 0 after %lld usecs\n",
150 			(long long)entry->cookie,
151 			entry->func,
152 			(long long)ktime_to_ns(delta) >> 10);
153 	}
154 
155 	/* 4) remove it from the running queue */
156 	spin_lock_irqsave(&async_lock, flags);
157 	list_del(&entry->list);
158 
159 	/* 5) free the entry  */
160 	kfree(entry);
161 	atomic_dec(&entry_count);
162 
163 	spin_unlock_irqrestore(&async_lock, flags);
164 
165 	/* 6) wake up any waiters. */
166 	wake_up(&async_done);
167 	return;
168 
169 out:
170 	spin_unlock_irqrestore(&async_lock, flags);
171 }
172 
173 
__async_schedule(async_func_ptr * ptr,void * data,struct list_head * running)174 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
175 {
176 	struct async_entry *entry;
177 	unsigned long flags;
178 	async_cookie_t newcookie;
179 
180 
181 	/* allow irq-off callers */
182 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
183 
184 	/*
185 	 * If we're out of memory or if there's too much work
186 	 * pending already, we execute synchronously.
187 	 */
188 	if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
189 		kfree(entry);
190 		spin_lock_irqsave(&async_lock, flags);
191 		newcookie = next_cookie++;
192 		spin_unlock_irqrestore(&async_lock, flags);
193 
194 		/* low on memory.. run synchronously */
195 		ptr(data, newcookie);
196 		return newcookie;
197 	}
198 	entry->func = ptr;
199 	entry->data = data;
200 	entry->running = running;
201 
202 	spin_lock_irqsave(&async_lock, flags);
203 	newcookie = entry->cookie = next_cookie++;
204 	list_add_tail(&entry->list, &async_pending);
205 	atomic_inc(&entry_count);
206 	spin_unlock_irqrestore(&async_lock, flags);
207 	wake_up(&async_new);
208 	return newcookie;
209 }
210 
211 /**
212  * async_schedule - schedule a function for asynchronous execution
213  * @ptr: function to execute asynchronously
214  * @data: data pointer to pass to the function
215  *
216  * Returns an async_cookie_t that may be used for checkpointing later.
217  * Note: This function may be called from atomic or non-atomic contexts.
218  */
async_schedule(async_func_ptr * ptr,void * data)219 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
220 {
221 	return __async_schedule(ptr, data, &async_running);
222 }
223 EXPORT_SYMBOL_GPL(async_schedule);
224 
225 /**
226  * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
227  * @ptr: function to execute asynchronously
228  * @data: data pointer to pass to the function
229  * @running: running list for the domain
230  *
231  * Returns an async_cookie_t that may be used for checkpointing later.
232  * @running may be used in the async_synchronize_*_domain() functions
233  * to wait within a certain synchronization domain rather than globally.
234  * A synchronization domain is specified via the running queue @running to use.
235  * Note: This function may be called from atomic or non-atomic contexts.
236  */
async_schedule_domain(async_func_ptr * ptr,void * data,struct list_head * running)237 async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
238 				     struct list_head *running)
239 {
240 	return __async_schedule(ptr, data, running);
241 }
242 EXPORT_SYMBOL_GPL(async_schedule_domain);
243 
244 /**
245  * async_synchronize_full - synchronize all asynchronous function calls
246  *
247  * This function waits until all asynchronous function calls have been done.
248  */
async_synchronize_full(void)249 void async_synchronize_full(void)
250 {
251 	do {
252 		async_synchronize_cookie(next_cookie);
253 	} while (!list_empty(&async_running) || !list_empty(&async_pending));
254 }
255 EXPORT_SYMBOL_GPL(async_synchronize_full);
256 
257 /**
258  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
259  * @list: running list to synchronize on
260  *
261  * This function waits until all asynchronous function calls for the
262  * synchronization domain specified by the running list @list have been done.
263  */
async_synchronize_full_domain(struct list_head * list)264 void async_synchronize_full_domain(struct list_head *list)
265 {
266 	async_synchronize_cookie_domain(next_cookie, list);
267 }
268 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
269 
270 /**
271  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
272  * @cookie: async_cookie_t to use as checkpoint
273  * @running: running list to synchronize on
274  *
275  * This function waits until all asynchronous function calls for the
276  * synchronization domain specified by the running list @list submitted
277  * prior to @cookie have been done.
278  */
async_synchronize_cookie_domain(async_cookie_t cookie,struct list_head * running)279 void async_synchronize_cookie_domain(async_cookie_t cookie,
280 				     struct list_head *running)
281 {
282 	ktime_t starttime, delta, endtime;
283 
284 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
285 		printk("async_waiting @ %i\n", task_pid_nr(current));
286 		starttime = ktime_get();
287 	}
288 
289 	wait_event(async_done, lowest_in_progress(running) >= cookie);
290 
291 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
292 		endtime = ktime_get();
293 		delta = ktime_sub(endtime, starttime);
294 
295 		printk("async_continuing @ %i after %lli usec\n",
296 			task_pid_nr(current),
297 			(long long)ktime_to_ns(delta) >> 10);
298 	}
299 }
300 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
301 
302 /**
303  * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
304  * @cookie: async_cookie_t to use as checkpoint
305  *
306  * This function waits until all asynchronous function calls prior to @cookie
307  * have been done.
308  */
async_synchronize_cookie(async_cookie_t cookie)309 void async_synchronize_cookie(async_cookie_t cookie)
310 {
311 	async_synchronize_cookie_domain(cookie, &async_running);
312 }
313 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
314 
315 
async_thread(void * unused)316 static int async_thread(void *unused)
317 {
318 	DECLARE_WAITQUEUE(wq, current);
319 	add_wait_queue(&async_new, &wq);
320 
321 	while (!kthread_should_stop()) {
322 		int ret = HZ;
323 		set_current_state(TASK_INTERRUPTIBLE);
324 		/*
325 		 * check the list head without lock.. false positives
326 		 * are dealt with inside run_one_entry() while holding
327 		 * the lock.
328 		 */
329 		rmb();
330 		if (!list_empty(&async_pending))
331 			run_one_entry();
332 		else
333 			ret = schedule_timeout(HZ);
334 
335 		if (ret == 0) {
336 			/*
337 			 * we timed out, this means we as thread are redundant.
338 			 * we sign off and die, but we to avoid any races there
339 			 * is a last-straw check to see if work snuck in.
340 			 */
341 			atomic_dec(&thread_count);
342 			wmb(); /* manager must see our departure first */
343 			if (list_empty(&async_pending))
344 				break;
345 			/*
346 			 * woops work came in between us timing out and us
347 			 * signing off; we need to stay alive and keep working.
348 			 */
349 			atomic_inc(&thread_count);
350 		}
351 	}
352 	remove_wait_queue(&async_new, &wq);
353 
354 	return 0;
355 }
356 
async_manager_thread(void * unused)357 static int async_manager_thread(void *unused)
358 {
359 	DECLARE_WAITQUEUE(wq, current);
360 	add_wait_queue(&async_new, &wq);
361 
362 	while (!kthread_should_stop()) {
363 		int tc, ec;
364 
365 		set_current_state(TASK_INTERRUPTIBLE);
366 
367 		tc = atomic_read(&thread_count);
368 		rmb();
369 		ec = atomic_read(&entry_count);
370 
371 		while (tc < ec && tc < MAX_THREADS) {
372 			if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
373 					       tc))) {
374 				msleep(100);
375 				continue;
376 			}
377 			atomic_inc(&thread_count);
378 			tc++;
379 		}
380 
381 		schedule();
382 	}
383 	remove_wait_queue(&async_new, &wq);
384 
385 	return 0;
386 }
387 
async_init(void)388 static int __init async_init(void)
389 {
390 	if (async_enabled)
391 		if (IS_ERR(kthread_run(async_manager_thread, NULL,
392 				       "async/mgr")))
393 			async_enabled = 0;
394 	return 0;
395 }
396 
setup_async(char * str)397 static int __init setup_async(char *str)
398 {
399 	async_enabled = 1;
400 	return 1;
401 }
402 
403 __setup("fastboot", setup_async);
404 
405 
406 core_initcall(async_init);
407