1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * workqueue.h --- work queue handling for Linux. |
4 | */ |
5 | |
6 | #ifndef _LINUX_WORKQUEUE_H |
7 | #define _LINUX_WORKQUEUE_H |
8 | |
9 | #include <linux/timer.h> |
10 | #include <linux/linkage.h> |
11 | #include <linux/bitops.h> |
12 | #include <linux/lockdep.h> |
13 | #include <linux/threads.h> |
14 | #include <linux/atomic.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/rcupdate.h> |
17 | |
18 | struct workqueue_struct; |
19 | |
20 | struct work_struct; |
21 | typedef void (*work_func_t)(struct work_struct *work); |
22 | void delayed_work_timer_fn(struct timer_list *t); |
23 | |
24 | /* |
25 | * The first word is the work queue pointer and the flags rolled into |
26 | * one |
27 | */ |
28 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
29 | |
30 | enum { |
31 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
32 | WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ |
33 | WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ |
34 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ |
35 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
36 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
37 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
38 | #else |
39 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
40 | #endif |
41 | |
42 | WORK_STRUCT_COLOR_BITS = 4, |
43 | |
44 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
45 | WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, |
46 | WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, |
47 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
48 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
49 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, |
50 | #else |
51 | WORK_STRUCT_STATIC = 0, |
52 | #endif |
53 | |
54 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), |
55 | |
56 | /* not bound to any CPU, prefer the local CPU */ |
57 | WORK_CPU_UNBOUND = NR_CPUS, |
58 | |
59 | /* |
60 | * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. |
61 | * This makes pwqs aligned to 256 bytes and allows 16 workqueue |
62 | * flush colors. |
63 | */ |
64 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
65 | WORK_STRUCT_COLOR_BITS, |
66 | |
67 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
68 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
69 | |
70 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
71 | |
72 | /* |
73 | * When a work item is off queue, its high bits point to the last |
74 | * pool it was on. Cap at 31 bits and use the highest number to |
75 | * indicate that no pool is associated. |
76 | */ |
77 | WORK_OFFQ_FLAG_BITS = 1, |
78 | WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, |
79 | WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, |
80 | WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, |
81 | |
82 | /* bit mask for work_busy() return values */ |
83 | WORK_BUSY_PENDING = 1 << 0, |
84 | WORK_BUSY_RUNNING = 1 << 1, |
85 | |
86 | /* maximum string length for set_worker_desc() */ |
87 | WORKER_DESC_LEN = 24, |
88 | }; |
89 | |
90 | /* Convenience constants - of type 'unsigned long', not 'enum'! */ |
91 | #define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING) |
92 | #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) |
93 | #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) |
94 | |
95 | #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) |
96 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
97 | |
98 | struct work_struct { |
99 | atomic_long_t data; |
100 | struct list_head entry; |
101 | work_func_t func; |
102 | #ifdef CONFIG_LOCKDEP |
103 | struct lockdep_map lockdep_map; |
104 | #endif |
105 | }; |
106 | |
107 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) |
108 | #define WORK_DATA_STATIC_INIT() \ |
109 | ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) |
110 | |
111 | struct delayed_work { |
112 | struct work_struct work; |
113 | struct timer_list timer; |
114 | |
115 | /* target workqueue and CPU ->timer uses to queue ->work */ |
116 | struct workqueue_struct *wq; |
117 | int cpu; |
118 | }; |
119 | |
120 | struct rcu_work { |
121 | struct work_struct work; |
122 | struct rcu_head rcu; |
123 | |
124 | /* target workqueue ->rcu uses to queue ->work */ |
125 | struct workqueue_struct *wq; |
126 | }; |
127 | |
128 | enum wq_affn_scope { |
129 | WQ_AFFN_DFL, /* use system default */ |
130 | WQ_AFFN_CPU, /* one pod per CPU */ |
131 | WQ_AFFN_SMT, /* one pod poer SMT */ |
132 | WQ_AFFN_CACHE, /* one pod per LLC */ |
133 | WQ_AFFN_NUMA, /* one pod per NUMA node */ |
134 | WQ_AFFN_SYSTEM, /* one pod across the whole system */ |
135 | |
136 | WQ_AFFN_NR_TYPES, |
137 | }; |
138 | |
139 | /** |
140 | * struct workqueue_attrs - A struct for workqueue attributes. |
141 | * |
142 | * This can be used to change attributes of an unbound workqueue. |
143 | */ |
144 | struct workqueue_attrs { |
145 | /** |
146 | * @nice: nice level |
147 | */ |
148 | int nice; |
149 | |
150 | /** |
151 | * @cpumask: allowed CPUs |
152 | * |
153 | * Work items in this workqueue are affine to these CPUs and not allowed |
154 | * to execute on other CPUs. A pool serving a workqueue must have the |
155 | * same @cpumask. |
156 | */ |
157 | cpumask_var_t cpumask; |
158 | |
159 | /** |
160 | * @__pod_cpumask: internal attribute used to create per-pod pools |
161 | * |
162 | * Internal use only. |
163 | * |
164 | * Per-pod unbound worker pools are used to improve locality. Always a |
165 | * subset of ->cpumask. A workqueue can be associated with multiple |
166 | * worker pools with disjoint @__pod_cpumask's. Whether the enforcement |
167 | * of a pool's @__pod_cpumask is strict depends on @affn_strict. |
168 | */ |
169 | cpumask_var_t __pod_cpumask; |
170 | |
171 | /** |
172 | * @affn_strict: affinity scope is strict |
173 | * |
174 | * If clear, workqueue will make a best-effort attempt at starting the |
175 | * worker inside @__pod_cpumask but the scheduler is free to migrate it |
176 | * outside. |
177 | * |
178 | * If set, workers are only allowed to run inside @__pod_cpumask. |
179 | */ |
180 | bool affn_strict; |
181 | |
182 | /* |
183 | * Below fields aren't properties of a worker_pool. They only modify how |
184 | * :c:func:`apply_workqueue_attrs` select pools and thus don't |
185 | * participate in pool hash calculations or equality comparisons. |
186 | */ |
187 | |
188 | /** |
189 | * @affn_scope: unbound CPU affinity scope |
190 | * |
191 | * CPU pods are used to improve execution locality of unbound work |
192 | * items. There are multiple pod types, one for each wq_affn_scope, and |
193 | * every CPU in the system belongs to one pod in every pod type. CPUs |
194 | * that belong to the same pod share the worker pool. For example, |
195 | * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker |
196 | * pool for each NUMA node. |
197 | */ |
198 | enum wq_affn_scope affn_scope; |
199 | |
200 | /** |
201 | * @ordered: work items must be executed one by one in queueing order |
202 | */ |
203 | bool ordered; |
204 | }; |
205 | |
206 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
207 | { |
208 | return container_of(work, struct delayed_work, work); |
209 | } |
210 | |
211 | static inline struct rcu_work *to_rcu_work(struct work_struct *work) |
212 | { |
213 | return container_of(work, struct rcu_work, work); |
214 | } |
215 | |
216 | struct execute_work { |
217 | struct work_struct work; |
218 | }; |
219 | |
220 | #ifdef CONFIG_LOCKDEP |
221 | /* |
222 | * NB: because we have to copy the lockdep_map, setting _key |
223 | * here is required, otherwise it could get initialised to the |
224 | * copy of the lockdep_map! |
225 | */ |
226 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ |
227 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), |
228 | #else |
229 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
230 | #endif |
231 | |
232 | #define __WORK_INITIALIZER(n, f) { \ |
233 | .data = WORK_DATA_STATIC_INIT(), \ |
234 | .entry = { &(n).entry, &(n).entry }, \ |
235 | .func = (f), \ |
236 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
237 | } |
238 | |
239 | #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
240 | .work = __WORK_INITIALIZER((n).work, (f)), \ |
241 | .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ |
242 | (tflags) | TIMER_IRQSAFE), \ |
243 | } |
244 | |
245 | #define DECLARE_WORK(n, f) \ |
246 | struct work_struct n = __WORK_INITIALIZER(n, f) |
247 | |
248 | #define DECLARE_DELAYED_WORK(n, f) \ |
249 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
250 | |
251 | #define DECLARE_DEFERRABLE_WORK(n, f) \ |
252 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
253 | |
254 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
255 | extern void __init_work(struct work_struct *work, int onstack); |
256 | extern void destroy_work_on_stack(struct work_struct *work); |
257 | extern void destroy_delayed_work_on_stack(struct delayed_work *work); |
258 | static inline unsigned int work_static(struct work_struct *work) |
259 | { |
260 | return *work_data_bits(work) & WORK_STRUCT_STATIC; |
261 | } |
262 | #else |
263 | static inline void __init_work(struct work_struct *work, int onstack) { } |
264 | static inline void destroy_work_on_stack(struct work_struct *work) { } |
265 | static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } |
266 | static inline unsigned int work_static(struct work_struct *work) { return 0; } |
267 | #endif |
268 | |
269 | /* |
270 | * initialize all of a work item in one go |
271 | * |
272 | * NOTE! No point in using "atomic_long_set()": using a direct |
273 | * assignment of the work data initializer allows the compiler |
274 | * to generate better code. |
275 | */ |
276 | #ifdef CONFIG_LOCKDEP |
277 | #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ |
278 | do { \ |
279 | __init_work((_work), _onstack); \ |
280 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
281 | lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ |
282 | INIT_LIST_HEAD(&(_work)->entry); \ |
283 | (_work)->func = (_func); \ |
284 | } while (0) |
285 | #else |
286 | #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ |
287 | do { \ |
288 | __init_work((_work), _onstack); \ |
289 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
290 | INIT_LIST_HEAD(&(_work)->entry); \ |
291 | (_work)->func = (_func); \ |
292 | } while (0) |
293 | #endif |
294 | |
295 | #define __INIT_WORK(_work, _func, _onstack) \ |
296 | do { \ |
297 | static __maybe_unused struct lock_class_key __key; \ |
298 | \ |
299 | __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ |
300 | } while (0) |
301 | |
302 | #define INIT_WORK(_work, _func) \ |
303 | __INIT_WORK((_work), (_func), 0) |
304 | |
305 | #define INIT_WORK_ONSTACK(_work, _func) \ |
306 | __INIT_WORK((_work), (_func), 1) |
307 | |
308 | #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ |
309 | __INIT_WORK_KEY((_work), (_func), 1, _key) |
310 | |
311 | #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
312 | do { \ |
313 | INIT_WORK(&(_work)->work, (_func)); \ |
314 | __init_timer(&(_work)->timer, \ |
315 | delayed_work_timer_fn, \ |
316 | (_tflags) | TIMER_IRQSAFE); \ |
317 | } while (0) |
318 | |
319 | #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
320 | do { \ |
321 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ |
322 | __init_timer_on_stack(&(_work)->timer, \ |
323 | delayed_work_timer_fn, \ |
324 | (_tflags) | TIMER_IRQSAFE); \ |
325 | } while (0) |
326 | |
327 | #define INIT_DELAYED_WORK(_work, _func) \ |
328 | __INIT_DELAYED_WORK(_work, _func, 0) |
329 | |
330 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ |
331 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) |
332 | |
333 | #define INIT_DEFERRABLE_WORK(_work, _func) \ |
334 | __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) |
335 | |
336 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ |
337 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) |
338 | |
339 | #define INIT_RCU_WORK(_work, _func) \ |
340 | INIT_WORK(&(_work)->work, (_func)) |
341 | |
342 | #define INIT_RCU_WORK_ONSTACK(_work, _func) \ |
343 | INIT_WORK_ONSTACK(&(_work)->work, (_func)) |
344 | |
345 | /** |
346 | * work_pending - Find out whether a work item is currently pending |
347 | * @work: The work item in question |
348 | */ |
349 | #define work_pending(work) \ |
350 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
351 | |
352 | /** |
353 | * delayed_work_pending - Find out whether a delayable work item is currently |
354 | * pending |
355 | * @w: The work item in question |
356 | */ |
357 | #define delayed_work_pending(w) \ |
358 | work_pending(&(w)->work) |
359 | |
360 | /* |
361 | * Workqueue flags and constants. For details, please refer to |
362 | * Documentation/core-api/workqueue.rst. |
363 | */ |
364 | enum { |
365 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
366 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
367 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
368 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
369 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ |
370 | WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ |
371 | |
372 | /* |
373 | * Per-cpu workqueues are generally preferred because they tend to |
374 | * show better performance thanks to cache locality. Per-cpu |
375 | * workqueues exclude the scheduler from choosing the CPU to |
376 | * execute the worker threads, which has an unfortunate side effect |
377 | * of increasing power consumption. |
378 | * |
379 | * The scheduler considers a CPU idle if it doesn't have any task |
380 | * to execute and tries to keep idle cores idle to conserve power; |
381 | * however, for example, a per-cpu work item scheduled from an |
382 | * interrupt handler on an idle CPU will force the scheduler to |
383 | * execute the work item on that CPU breaking the idleness, which in |
384 | * turn may lead to more scheduling choices which are sub-optimal |
385 | * in terms of power consumption. |
386 | * |
387 | * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default |
388 | * but become unbound if workqueue.power_efficient kernel param is |
389 | * specified. Per-cpu workqueues which are identified to |
390 | * contribute significantly to power-consumption are identified and |
391 | * marked with this flag and enabling the power_efficient mode |
392 | * leads to noticeable power saving at the cost of small |
393 | * performance disadvantage. |
394 | * |
395 | * http://thread.gmane.org/gmane.linux.kernel/1480396 |
396 | */ |
397 | WQ_POWER_EFFICIENT = 1 << 7, |
398 | |
399 | __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ |
400 | __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
401 | __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
402 | __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ |
403 | __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ |
404 | |
405 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
406 | WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, |
407 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
408 | }; |
409 | |
410 | /* |
411 | * System-wide workqueues which are always present. |
412 | * |
413 | * system_wq is the one used by schedule[_delayed]_work[_on](). |
414 | * Multi-CPU multi-threaded. There are users which expect relatively |
415 | * short queue flush time. Don't queue works which can run for too |
416 | * long. |
417 | * |
418 | * system_highpri_wq is similar to system_wq but for work items which |
419 | * require WQ_HIGHPRI. |
420 | * |
421 | * system_long_wq is similar to system_wq but may host long running |
422 | * works. Queue flushing might take relatively long. |
423 | * |
424 | * system_unbound_wq is unbound workqueue. Workers are not bound to |
425 | * any specific CPU, not concurrency managed, and all queued works are |
426 | * executed immediately as long as max_active limit is not reached and |
427 | * resources are available. |
428 | * |
429 | * system_freezable_wq is equivalent to system_wq except that it's |
430 | * freezable. |
431 | * |
432 | * *_power_efficient_wq are inclined towards saving power and converted |
433 | * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, |
434 | * they are same as their non-power-efficient counterparts - e.g. |
435 | * system_power_efficient_wq is identical to system_wq if |
436 | * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. |
437 | */ |
438 | extern struct workqueue_struct *system_wq; |
439 | extern struct workqueue_struct *system_highpri_wq; |
440 | extern struct workqueue_struct *system_long_wq; |
441 | extern struct workqueue_struct *system_unbound_wq; |
442 | extern struct workqueue_struct *system_freezable_wq; |
443 | extern struct workqueue_struct *system_power_efficient_wq; |
444 | extern struct workqueue_struct *system_freezable_power_efficient_wq; |
445 | |
446 | /** |
447 | * alloc_workqueue - allocate a workqueue |
448 | * @fmt: printf format for the name of the workqueue |
449 | * @flags: WQ_* flags |
450 | * @max_active: max in-flight work items per CPU, 0 for default |
451 | * remaining args: args for @fmt |
452 | * |
453 | * Allocate a workqueue with the specified parameters. For detailed |
454 | * information on WQ_* flags, please refer to |
455 | * Documentation/core-api/workqueue.rst. |
456 | * |
457 | * RETURNS: |
458 | * Pointer to the allocated workqueue on success, %NULL on failure. |
459 | */ |
460 | __printf(1, 4) struct workqueue_struct * |
461 | alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); |
462 | |
463 | /** |
464 | * alloc_ordered_workqueue - allocate an ordered workqueue |
465 | * @fmt: printf format for the name of the workqueue |
466 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
467 | * @args: args for @fmt |
468 | * |
469 | * Allocate an ordered workqueue. An ordered workqueue executes at |
470 | * most one work item at any given time in the queued order. They are |
471 | * implemented as unbound workqueues with @max_active of one. |
472 | * |
473 | * RETURNS: |
474 | * Pointer to the allocated workqueue on success, %NULL on failure. |
475 | */ |
476 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
477 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ |
478 | __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) |
479 | |
480 | #define create_workqueue(name) \ |
481 | alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) |
482 | #define create_freezable_workqueue(name) \ |
483 | alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ |
484 | WQ_MEM_RECLAIM, 1, (name)) |
485 | #define create_singlethread_workqueue(name) \ |
486 | alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) |
487 | |
488 | extern void destroy_workqueue(struct workqueue_struct *wq); |
489 | |
490 | struct workqueue_attrs *alloc_workqueue_attrs(void); |
491 | void free_workqueue_attrs(struct workqueue_attrs *attrs); |
492 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
493 | const struct workqueue_attrs *attrs); |
494 | int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); |
495 | |
496 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
497 | struct work_struct *work); |
498 | extern bool queue_work_node(int node, struct workqueue_struct *wq, |
499 | struct work_struct *work); |
500 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
501 | struct delayed_work *work, unsigned long delay); |
502 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
503 | struct delayed_work *dwork, unsigned long delay); |
504 | extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); |
505 | |
506 | extern void __flush_workqueue(struct workqueue_struct *wq); |
507 | extern void drain_workqueue(struct workqueue_struct *wq); |
508 | |
509 | extern int schedule_on_each_cpu(work_func_t func); |
510 | |
511 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
512 | |
513 | extern bool flush_work(struct work_struct *work); |
514 | extern bool cancel_work(struct work_struct *work); |
515 | extern bool cancel_work_sync(struct work_struct *work); |
516 | |
517 | extern bool flush_delayed_work(struct delayed_work *dwork); |
518 | extern bool cancel_delayed_work(struct delayed_work *dwork); |
519 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
520 | |
521 | extern bool flush_rcu_work(struct rcu_work *rwork); |
522 | |
523 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
524 | int max_active); |
525 | extern struct work_struct *current_work(void); |
526 | extern bool current_is_workqueue_rescuer(void); |
527 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
528 | extern unsigned int work_busy(struct work_struct *work); |
529 | extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); |
530 | extern void print_worker_info(const char *log_lvl, struct task_struct *task); |
531 | extern void show_all_workqueues(void); |
532 | extern void show_freezable_workqueues(void); |
533 | extern void show_one_workqueue(struct workqueue_struct *wq); |
534 | extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); |
535 | |
536 | /** |
537 | * queue_work - queue work on a workqueue |
538 | * @wq: workqueue to use |
539 | * @work: work to queue |
540 | * |
541 | * Returns %false if @work was already on a queue, %true otherwise. |
542 | * |
543 | * We queue the work to the CPU on which it was submitted, but if the CPU dies |
544 | * it can be processed by another CPU. |
545 | * |
546 | * Memory-ordering properties: If it returns %true, guarantees that all stores |
547 | * preceding the call to queue_work() in the program order will be visible from |
548 | * the CPU which will execute @work by the time such work executes, e.g., |
549 | * |
550 | * { x is initially 0 } |
551 | * |
552 | * CPU0 CPU1 |
553 | * |
554 | * WRITE_ONCE(x, 1); [ @work is being executed ] |
555 | * r0 = queue_work(wq, work); r1 = READ_ONCE(x); |
556 | * |
557 | * Forbids: r0 == true && r1 == 0 |
558 | */ |
559 | static inline bool queue_work(struct workqueue_struct *wq, |
560 | struct work_struct *work) |
561 | { |
562 | return queue_work_on(cpu: WORK_CPU_UNBOUND, wq, work); |
563 | } |
564 | |
565 | /** |
566 | * queue_delayed_work - queue work on a workqueue after delay |
567 | * @wq: workqueue to use |
568 | * @dwork: delayable work to queue |
569 | * @delay: number of jiffies to wait before queueing |
570 | * |
571 | * Equivalent to queue_delayed_work_on() but tries to use the local CPU. |
572 | */ |
573 | static inline bool queue_delayed_work(struct workqueue_struct *wq, |
574 | struct delayed_work *dwork, |
575 | unsigned long delay) |
576 | { |
577 | return queue_delayed_work_on(cpu: WORK_CPU_UNBOUND, wq, work: dwork, delay); |
578 | } |
579 | |
580 | /** |
581 | * mod_delayed_work - modify delay of or queue a delayed work |
582 | * @wq: workqueue to use |
583 | * @dwork: work to queue |
584 | * @delay: number of jiffies to wait before queueing |
585 | * |
586 | * mod_delayed_work_on() on local CPU. |
587 | */ |
588 | static inline bool mod_delayed_work(struct workqueue_struct *wq, |
589 | struct delayed_work *dwork, |
590 | unsigned long delay) |
591 | { |
592 | return mod_delayed_work_on(cpu: WORK_CPU_UNBOUND, wq, dwork, delay); |
593 | } |
594 | |
595 | /** |
596 | * schedule_work_on - put work task on a specific cpu |
597 | * @cpu: cpu to put the work task on |
598 | * @work: job to be done |
599 | * |
600 | * This puts a job on a specific cpu |
601 | */ |
602 | static inline bool schedule_work_on(int cpu, struct work_struct *work) |
603 | { |
604 | return queue_work_on(cpu, wq: system_wq, work); |
605 | } |
606 | |
607 | /** |
608 | * schedule_work - put work task in global workqueue |
609 | * @work: job to be done |
610 | * |
611 | * Returns %false if @work was already on the kernel-global workqueue and |
612 | * %true otherwise. |
613 | * |
614 | * This puts a job in the kernel-global workqueue if it was not already |
615 | * queued and leaves it in the same position on the kernel-global |
616 | * workqueue otherwise. |
617 | * |
618 | * Shares the same memory-ordering properties of queue_work(), cf. the |
619 | * DocBook header of queue_work(). |
620 | */ |
621 | static inline bool schedule_work(struct work_struct *work) |
622 | { |
623 | return queue_work(wq: system_wq, work); |
624 | } |
625 | |
626 | /* |
627 | * Detect attempt to flush system-wide workqueues at compile time when possible. |
628 | * Warn attempt to flush system-wide workqueues at runtime. |
629 | * |
630 | * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp |
631 | * for reasons and steps for converting system-wide workqueues into local workqueues. |
632 | */ |
633 | extern void __warn_flushing_systemwide_wq(void) |
634 | __compiletime_warning("Please avoid flushing system-wide workqueues." ); |
635 | |
636 | /* Please stop using this function, for this function will be removed in near future. */ |
637 | #define flush_scheduled_work() \ |
638 | ({ \ |
639 | __warn_flushing_systemwide_wq(); \ |
640 | __flush_workqueue(system_wq); \ |
641 | }) |
642 | |
643 | #define flush_workqueue(wq) \ |
644 | ({ \ |
645 | struct workqueue_struct *_wq = (wq); \ |
646 | \ |
647 | if ((__builtin_constant_p(_wq == system_wq) && \ |
648 | _wq == system_wq) || \ |
649 | (__builtin_constant_p(_wq == system_highpri_wq) && \ |
650 | _wq == system_highpri_wq) || \ |
651 | (__builtin_constant_p(_wq == system_long_wq) && \ |
652 | _wq == system_long_wq) || \ |
653 | (__builtin_constant_p(_wq == system_unbound_wq) && \ |
654 | _wq == system_unbound_wq) || \ |
655 | (__builtin_constant_p(_wq == system_freezable_wq) && \ |
656 | _wq == system_freezable_wq) || \ |
657 | (__builtin_constant_p(_wq == system_power_efficient_wq) && \ |
658 | _wq == system_power_efficient_wq) || \ |
659 | (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ |
660 | _wq == system_freezable_power_efficient_wq)) \ |
661 | __warn_flushing_systemwide_wq(); \ |
662 | __flush_workqueue(_wq); \ |
663 | }) |
664 | |
665 | /** |
666 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
667 | * @cpu: cpu to use |
668 | * @dwork: job to be done |
669 | * @delay: number of jiffies to wait |
670 | * |
671 | * After waiting for a given time this puts a job in the kernel-global |
672 | * workqueue on the specified CPU. |
673 | */ |
674 | static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, |
675 | unsigned long delay) |
676 | { |
677 | return queue_delayed_work_on(cpu, wq: system_wq, work: dwork, delay); |
678 | } |
679 | |
680 | /** |
681 | * schedule_delayed_work - put work task in global workqueue after delay |
682 | * @dwork: job to be done |
683 | * @delay: number of jiffies to wait or 0 for immediate execution |
684 | * |
685 | * After waiting for a given time this puts a job in the kernel-global |
686 | * workqueue. |
687 | */ |
688 | static inline bool schedule_delayed_work(struct delayed_work *dwork, |
689 | unsigned long delay) |
690 | { |
691 | return queue_delayed_work(wq: system_wq, dwork, delay); |
692 | } |
693 | |
694 | #ifndef CONFIG_SMP |
695 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
696 | { |
697 | return fn(arg); |
698 | } |
699 | static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) |
700 | { |
701 | return fn(arg); |
702 | } |
703 | #else |
704 | long work_on_cpu_key(int cpu, long (*fn)(void *), |
705 | void *arg, struct lock_class_key *key); |
706 | /* |
707 | * A new key is defined for each caller to make sure the work |
708 | * associated with the function doesn't share its locking class. |
709 | */ |
710 | #define work_on_cpu(_cpu, _fn, _arg) \ |
711 | ({ \ |
712 | static struct lock_class_key __key; \ |
713 | \ |
714 | work_on_cpu_key(_cpu, _fn, _arg, &__key); \ |
715 | }) |
716 | |
717 | long work_on_cpu_safe_key(int cpu, long (*fn)(void *), |
718 | void *arg, struct lock_class_key *key); |
719 | |
720 | /* |
721 | * A new key is defined for each caller to make sure the work |
722 | * associated with the function doesn't share its locking class. |
723 | */ |
724 | #define work_on_cpu_safe(_cpu, _fn, _arg) \ |
725 | ({ \ |
726 | static struct lock_class_key __key; \ |
727 | \ |
728 | work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ |
729 | }) |
730 | #endif /* CONFIG_SMP */ |
731 | |
732 | #ifdef CONFIG_FREEZER |
733 | extern void freeze_workqueues_begin(void); |
734 | extern bool freeze_workqueues_busy(void); |
735 | extern void thaw_workqueues(void); |
736 | #endif /* CONFIG_FREEZER */ |
737 | |
738 | #ifdef CONFIG_SYSFS |
739 | int workqueue_sysfs_register(struct workqueue_struct *wq); |
740 | #else /* CONFIG_SYSFS */ |
741 | static inline int workqueue_sysfs_register(struct workqueue_struct *wq) |
742 | { return 0; } |
743 | #endif /* CONFIG_SYSFS */ |
744 | |
745 | #ifdef CONFIG_WQ_WATCHDOG |
746 | void wq_watchdog_touch(int cpu); |
747 | #else /* CONFIG_WQ_WATCHDOG */ |
748 | static inline void wq_watchdog_touch(int cpu) { } |
749 | #endif /* CONFIG_WQ_WATCHDOG */ |
750 | |
751 | #ifdef CONFIG_SMP |
752 | int workqueue_prepare_cpu(unsigned int cpu); |
753 | int workqueue_online_cpu(unsigned int cpu); |
754 | int workqueue_offline_cpu(unsigned int cpu); |
755 | #endif |
756 | |
757 | void __init workqueue_init_early(void); |
758 | void __init workqueue_init(void); |
759 | void __init workqueue_init_topology(void); |
760 | |
761 | #endif |
762 | |