1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
52#include <linux/atomic.h>
53#include <linux/ktime.h>
54#include <linux/export.h>
55#include <linux/wait.h>
56#include <linux/sched.h>
57#include <linux/slab.h>
58#include <linux/workqueue.h>
59
60#include "workqueue_internal.h"
61
62static async_cookie_t next_cookie = 1;
63
64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
66
67static LIST_HEAD(async_global_pending); /* pending from all registered doms */
68static ASYNC_DOMAIN(async_dfl_domain);
69static DEFINE_SPINLOCK(async_lock);
70
71struct async_entry {
72 struct list_head domain_list;
73 struct list_head global_list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_t func;
77 void *data;
78 struct async_domain *domain;
79};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
82
83static atomic_t entry_count;
84
85static async_cookie_t lowest_in_progress(struct async_domain *domain)
86{
87 struct async_entry *first = NULL;
88 async_cookie_t ret = ASYNC_COOKIE_MAX;
89 unsigned long flags;
90
91 spin_lock_irqsave(&async_lock, flags);
92
93 if (domain) {
94 if (!list_empty(&domain->pending))
95 first = list_first_entry(&domain->pending,
96 struct async_entry, domain_list);
97 } else {
98 if (!list_empty(&async_global_pending))
99 first = list_first_entry(&async_global_pending,
100 struct async_entry, global_list);
101 }
102
103 if (first)
104 ret = first->cookie;
105
106 spin_unlock_irqrestore(&async_lock, flags);
107 return ret;
108}
109
110/*
111 * pick the first pending entry and run it
112 */
113static void async_run_entry_fn(struct work_struct *work)
114{
115 struct async_entry *entry =
116 container_of(work, struct async_entry, work);
117 unsigned long flags;
118 ktime_t uninitialized_var(calltime), delta, rettime;
119
120 /* 1) run (and print duration) */
121 if (initcall_debug && system_state < SYSTEM_RUNNING) {
122 pr_debug("calling %lli_%pF @ %i\n",
123 (long long)entry->cookie,
124 entry->func, task_pid_nr(current));
125 calltime = ktime_get();
126 }
127 entry->func(entry->data, entry->cookie);
128 if (initcall_debug && system_state < SYSTEM_RUNNING) {
129 rettime = ktime_get();
130 delta = ktime_sub(rettime, calltime);
131 pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n",
132 (long long)entry->cookie,
133 entry->func,
134 (long long)ktime_to_ns(delta) >> 10);
135 }
136
137 /* 2) remove self from the pending queues */
138 spin_lock_irqsave(&async_lock, flags);
139 list_del_init(&entry->domain_list);
140 list_del_init(&entry->global_list);
141
142 /* 3) free the entry */
143 kfree(entry);
144 atomic_dec(&entry_count);
145
146 spin_unlock_irqrestore(&async_lock, flags);
147
148 /* 4) wake up any waiters */
149 wake_up(&async_done);
150}
151
152/**
153 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
154 * @func: function to execute asynchronously
155 * @data: data pointer to pass to the function
156 * @node: NUMA node that we want to schedule this on or close to
157 * @domain: the domain
158 *
159 * Returns an async_cookie_t that may be used for checkpointing later.
160 * @domain may be used in the async_synchronize_*_domain() functions to
161 * wait within a certain synchronization domain rather than globally.
162 *
163 * Note: This function may be called from atomic or non-atomic contexts.
164 *
165 * The node requested will be honored on a best effort basis. If the node
166 * has no CPUs associated with it then the work is distributed among all
167 * available CPUs.
168 */
169async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
170 int node, struct async_domain *domain)
171{
172 struct async_entry *entry;
173 unsigned long flags;
174 async_cookie_t newcookie;
175
176 /* allow irq-off callers */
177 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
178
179 /*
180 * If we're out of memory or if there's too much work
181 * pending already, we execute synchronously.
182 */
183 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
184 kfree(entry);
185 spin_lock_irqsave(&async_lock, flags);
186 newcookie = next_cookie++;
187 spin_unlock_irqrestore(&async_lock, flags);
188
189 /* low on memory.. run synchronously */
190 func(data, newcookie);
191 return newcookie;
192 }
193 INIT_LIST_HEAD(&entry->domain_list);
194 INIT_LIST_HEAD(&entry->global_list);
195 INIT_WORK(&entry->work, async_run_entry_fn);
196 entry->func = func;
197 entry->data = data;
198 entry->domain = domain;
199
200 spin_lock_irqsave(&async_lock, flags);
201
202 /* allocate cookie and queue */
203 newcookie = entry->cookie = next_cookie++;
204
205 list_add_tail(&entry->domain_list, &domain->pending);
206 if (domain->registered)
207 list_add_tail(&entry->global_list, &async_global_pending);
208
209 atomic_inc(&entry_count);
210 spin_unlock_irqrestore(&async_lock, flags);
211
212 /* mark that this task has queued an async job, used by module init */
213 current->flags |= PF_USED_ASYNC;
214
215 /* schedule for execution */
216 queue_work_node(node, system_unbound_wq, &entry->work);
217
218 return newcookie;
219}
220EXPORT_SYMBOL_GPL(async_schedule_node_domain);
221
222/**
223 * async_schedule_node - NUMA specific version of async_schedule
224 * @func: function to execute asynchronously
225 * @data: data pointer to pass to the function
226 * @node: NUMA node that we want to schedule this on or close to
227 *
228 * Returns an async_cookie_t that may be used for checkpointing later.
229 * Note: This function may be called from atomic or non-atomic contexts.
230 *
231 * The node requested will be honored on a best effort basis. If the node
232 * has no CPUs associated with it then the work is distributed among all
233 * available CPUs.
234 */
235async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
236{
237 return async_schedule_node_domain(func, data, node, &async_dfl_domain);
238}
239EXPORT_SYMBOL_GPL(async_schedule_node);
240
241/**
242 * async_synchronize_full - synchronize all asynchronous function calls
243 *
244 * This function waits until all asynchronous function calls have been done.
245 */
246void async_synchronize_full(void)
247{
248 async_synchronize_full_domain(NULL);
249}
250EXPORT_SYMBOL_GPL(async_synchronize_full);
251
252/**
253 * async_unregister_domain - ensure no more anonymous waiters on this domain
254 * @domain: idle domain to flush out of any async_synchronize_full instances
255 *
256 * async_synchronize_{cookie|full}_domain() are not flushed since callers
257 * of these routines should know the lifetime of @domain
258 *
259 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
260 */
261void async_unregister_domain(struct async_domain *domain)
262{
263 spin_lock_irq(&async_lock);
264 WARN_ON(!domain->registered || !list_empty(&domain->pending));
265 domain->registered = 0;
266 spin_unlock_irq(&async_lock);
267}
268EXPORT_SYMBOL_GPL(async_unregister_domain);
269
270/**
271 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
272 * @domain: the domain to synchronize
273 *
274 * This function waits until all asynchronous function calls for the
275 * synchronization domain specified by @domain have been done.
276 */
277void async_synchronize_full_domain(struct async_domain *domain)
278{
279 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
280}
281EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
282
283/**
284 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
285 * @cookie: async_cookie_t to use as checkpoint
286 * @domain: the domain to synchronize (%NULL for all registered domains)
287 *
288 * This function waits until all asynchronous function calls for the
289 * synchronization domain specified by @domain submitted prior to @cookie
290 * have been done.
291 */
292void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
293{
294 ktime_t uninitialized_var(starttime), delta, endtime;
295
296 if (initcall_debug && system_state < SYSTEM_RUNNING) {
297 pr_debug("async_waiting @ %i\n", task_pid_nr(current));
298 starttime = ktime_get();
299 }
300
301 wait_event(async_done, lowest_in_progress(domain) >= cookie);
302
303 if (initcall_debug && system_state < SYSTEM_RUNNING) {
304 endtime = ktime_get();
305 delta = ktime_sub(endtime, starttime);
306
307 pr_debug("async_continuing @ %i after %lli usec\n",
308 task_pid_nr(current),
309 (long long)ktime_to_ns(delta) >> 10);
310 }
311}
312EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
313
314/**
315 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
316 * @cookie: async_cookie_t to use as checkpoint
317 *
318 * This function waits until all asynchronous function calls prior to @cookie
319 * have been done.
320 */
321void async_synchronize_cookie(async_cookie_t cookie)
322{
323 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
324}
325EXPORT_SYMBOL_GPL(async_synchronize_cookie);
326
327/**
328 * current_is_async - is %current an async worker task?
329 *
330 * Returns %true if %current is an async worker task.
331 */
332bool current_is_async(void)
333{
334 struct worker *worker = current_wq_worker();
335
336 return worker && worker->current_func == async_run_entry_fn;
337}
338EXPORT_SYMBOL_GPL(current_is_async);
339