1 /*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14 /*
15
16 Goals and Theory of Operation
17
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48
49 */
50
51 #include <linux/async.h>
52 #include <linux/atomic.h>
53 #include <linux/ktime.h>
54 #include <linux/export.h>
55 #include <linux/wait.h>
56 #include <linux/sched.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59
60 #include "workqueue_internal.h"
61
62 static async_cookie_t next_cookie = 1;
63
64 #define MAX_WORK 32768
65 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
66
67 static LIST_HEAD(async_global_pending); /* pending from all registered doms */
68 static ASYNC_DOMAIN(async_dfl_domain);
69 static DEFINE_SPINLOCK(async_lock);
70
71 struct async_entry {
72 struct list_head domain_list;
73 struct list_head global_list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_t func;
77 void *data;
78 struct async_domain *domain;
79 };
80
81 static DECLARE_WAIT_QUEUE_HEAD(async_done);
82
83 static atomic_t entry_count;
84
lowest_in_progress(struct async_domain * domain)85 static async_cookie_t lowest_in_progress(struct async_domain *domain)
86 {
87 struct list_head *pending;
88 async_cookie_t ret = ASYNC_COOKIE_MAX;
89 unsigned long flags;
90
91 spin_lock_irqsave(&async_lock, flags);
92
93 if (domain)
94 pending = &domain->pending;
95 else
96 pending = &async_global_pending;
97
98 if (!list_empty(pending))
99 ret = list_first_entry(pending, struct async_entry,
100 domain_list)->cookie;
101
102 spin_unlock_irqrestore(&async_lock, flags);
103 return ret;
104 }
105
106 /*
107 * pick the first pending entry and run it
108 */
async_run_entry_fn(struct work_struct * work)109 static void async_run_entry_fn(struct work_struct *work)
110 {
111 struct async_entry *entry =
112 container_of(work, struct async_entry, work);
113 unsigned long flags;
114 ktime_t uninitialized_var(calltime), delta, rettime;
115
116 /* 1) run (and print duration) */
117 if (initcall_debug && system_state == SYSTEM_BOOTING) {
118 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
119 (long long)entry->cookie,
120 entry->func, task_pid_nr(current));
121 calltime = ktime_get();
122 }
123 entry->func(entry->data, entry->cookie);
124 if (initcall_debug && system_state == SYSTEM_BOOTING) {
125 rettime = ktime_get();
126 delta = ktime_sub(rettime, calltime);
127 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
128 (long long)entry->cookie,
129 entry->func,
130 (long long)ktime_to_ns(delta) >> 10);
131 }
132
133 /* 2) remove self from the pending queues */
134 spin_lock_irqsave(&async_lock, flags);
135 list_del_init(&entry->domain_list);
136 list_del_init(&entry->global_list);
137
138 /* 3) free the entry */
139 kfree(entry);
140 atomic_dec(&entry_count);
141
142 spin_unlock_irqrestore(&async_lock, flags);
143
144 /* 4) wake up any waiters */
145 wake_up(&async_done);
146 }
147
__async_schedule(async_func_t func,void * data,struct async_domain * domain)148 static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain)
149 {
150 struct async_entry *entry;
151 unsigned long flags;
152 async_cookie_t newcookie;
153
154 /* allow irq-off callers */
155 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
156
157 /*
158 * If we're out of memory or if there's too much work
159 * pending already, we execute synchronously.
160 */
161 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
162 kfree(entry);
163 spin_lock_irqsave(&async_lock, flags);
164 newcookie = next_cookie++;
165 spin_unlock_irqrestore(&async_lock, flags);
166
167 /* low on memory.. run synchronously */
168 func(data, newcookie);
169 return newcookie;
170 }
171 INIT_LIST_HEAD(&entry->domain_list);
172 INIT_LIST_HEAD(&entry->global_list);
173 INIT_WORK(&entry->work, async_run_entry_fn);
174 entry->func = func;
175 entry->data = data;
176 entry->domain = domain;
177
178 spin_lock_irqsave(&async_lock, flags);
179
180 /* allocate cookie and queue */
181 newcookie = entry->cookie = next_cookie++;
182
183 list_add_tail(&entry->domain_list, &domain->pending);
184 if (domain->registered)
185 list_add_tail(&entry->global_list, &async_global_pending);
186
187 atomic_inc(&entry_count);
188 spin_unlock_irqrestore(&async_lock, flags);
189
190 /* mark that this task has queued an async job, used by module init */
191 current->flags |= PF_USED_ASYNC;
192
193 /* schedule for execution */
194 queue_work(system_unbound_wq, &entry->work);
195
196 return newcookie;
197 }
198
199 /**
200 * async_schedule - schedule a function for asynchronous execution
201 * @func: function to execute asynchronously
202 * @data: data pointer to pass to the function
203 *
204 * Returns an async_cookie_t that may be used for checkpointing later.
205 * Note: This function may be called from atomic or non-atomic contexts.
206 */
async_schedule(async_func_t func,void * data)207 async_cookie_t async_schedule(async_func_t func, void *data)
208 {
209 return __async_schedule(func, data, &async_dfl_domain);
210 }
211 EXPORT_SYMBOL_GPL(async_schedule);
212
213 /**
214 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
215 * @func: function to execute asynchronously
216 * @data: data pointer to pass to the function
217 * @domain: the domain
218 *
219 * Returns an async_cookie_t that may be used for checkpointing later.
220 * @domain may be used in the async_synchronize_*_domain() functions to
221 * wait within a certain synchronization domain rather than globally. A
222 * synchronization domain is specified via @domain. Note: This function
223 * may be called from atomic or non-atomic contexts.
224 */
async_schedule_domain(async_func_t func,void * data,struct async_domain * domain)225 async_cookie_t async_schedule_domain(async_func_t func, void *data,
226 struct async_domain *domain)
227 {
228 return __async_schedule(func, data, domain);
229 }
230 EXPORT_SYMBOL_GPL(async_schedule_domain);
231
232 /**
233 * async_synchronize_full - synchronize all asynchronous function calls
234 *
235 * This function waits until all asynchronous function calls have been done.
236 */
async_synchronize_full(void)237 void async_synchronize_full(void)
238 {
239 async_synchronize_full_domain(NULL);
240 }
241 EXPORT_SYMBOL_GPL(async_synchronize_full);
242
243 /**
244 * async_unregister_domain - ensure no more anonymous waiters on this domain
245 * @domain: idle domain to flush out of any async_synchronize_full instances
246 *
247 * async_synchronize_{cookie|full}_domain() are not flushed since callers
248 * of these routines should know the lifetime of @domain
249 *
250 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
251 */
async_unregister_domain(struct async_domain * domain)252 void async_unregister_domain(struct async_domain *domain)
253 {
254 spin_lock_irq(&async_lock);
255 WARN_ON(!domain->registered || !list_empty(&domain->pending));
256 domain->registered = 0;
257 spin_unlock_irq(&async_lock);
258 }
259 EXPORT_SYMBOL_GPL(async_unregister_domain);
260
261 /**
262 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
263 * @domain: the domain to synchronize
264 *
265 * This function waits until all asynchronous function calls for the
266 * synchronization domain specified by @domain have been done.
267 */
async_synchronize_full_domain(struct async_domain * domain)268 void async_synchronize_full_domain(struct async_domain *domain)
269 {
270 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
271 }
272 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
273
274 /**
275 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
276 * @cookie: async_cookie_t to use as checkpoint
277 * @domain: the domain to synchronize (%NULL for all registered domains)
278 *
279 * This function waits until all asynchronous function calls for the
280 * synchronization domain specified by @domain submitted prior to @cookie
281 * have been done.
282 */
async_synchronize_cookie_domain(async_cookie_t cookie,struct async_domain * domain)283 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
284 {
285 ktime_t uninitialized_var(starttime), delta, endtime;
286
287 if (initcall_debug && system_state == SYSTEM_BOOTING) {
288 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
289 starttime = ktime_get();
290 }
291
292 wait_event(async_done, lowest_in_progress(domain) >= cookie);
293
294 if (initcall_debug && system_state == SYSTEM_BOOTING) {
295 endtime = ktime_get();
296 delta = ktime_sub(endtime, starttime);
297
298 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
299 task_pid_nr(current),
300 (long long)ktime_to_ns(delta) >> 10);
301 }
302 }
303 EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
304
305 /**
306 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
307 * @cookie: async_cookie_t to use as checkpoint
308 *
309 * This function waits until all asynchronous function calls prior to @cookie
310 * have been done.
311 */
async_synchronize_cookie(async_cookie_t cookie)312 void async_synchronize_cookie(async_cookie_t cookie)
313 {
314 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
315 }
316 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
317
318 /**
319 * current_is_async - is %current an async worker task?
320 *
321 * Returns %true if %current is an async worker task.
322 */
current_is_async(void)323 bool current_is_async(void)
324 {
325 struct worker *worker = current_wq_worker();
326
327 return worker && worker->current_func == async_run_entry_fn;
328 }
329