1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * workqueue.h --- work queue handling for Linux.
4 */
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17
18struct workqueue_struct;
19
20struct work_struct;
21typedef void (*work_func_t)(struct work_struct *work);
22void delayed_work_timer_fn(struct timer_list *t);
23
24/*
25 * The first word is the work queue pointer and the flags rolled into
26 * one
27 */
28#define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30enum {
31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
33 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
35#ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
37 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
38#else
39 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
40#endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48#ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50#else
51 WORK_STRUCT_STATIC = 0,
52#endif
53
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
55
56 /* not bound to any CPU, prefer the local CPU */
57 WORK_CPU_UNBOUND = NR_CPUS,
58
59 /*
60 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
61 * This makes pwqs aligned to 256 bytes and allows 16 workqueue
62 * flush colors.
63 */
64 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
65 WORK_STRUCT_COLOR_BITS,
66
67 /* data contains off-queue information when !WORK_STRUCT_PWQ */
68 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
69
70 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
71 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
72
73 /*
74 * When a work item is off queue, its high bits point to the last
75 * pool it was on. Cap at 31 bits and use the highest number to
76 * indicate that no pool is associated.
77 */
78 WORK_OFFQ_FLAG_BITS = 1,
79 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
80 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
81 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
82 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
83
84 /* convenience constants */
85 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
86 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
87 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
88
89 /* bit mask for work_busy() return values */
90 WORK_BUSY_PENDING = 1 << 0,
91 WORK_BUSY_RUNNING = 1 << 1,
92
93 /* maximum string length for set_worker_desc() */
94 WORKER_DESC_LEN = 24,
95};
96
97struct work_struct {
98 atomic_long_t data;
99 struct list_head entry;
100 work_func_t func;
101#ifdef CONFIG_LOCKDEP
102 struct lockdep_map lockdep_map;
103#endif
104};
105
106#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
107#define WORK_DATA_STATIC_INIT() \
108 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
109
110struct delayed_work {
111 struct work_struct work;
112 struct timer_list timer;
113
114 /* target workqueue and CPU ->timer uses to queue ->work */
115 struct workqueue_struct *wq;
116 int cpu;
117};
118
119struct rcu_work {
120 struct work_struct work;
121 struct rcu_head rcu;
122
123 /* target workqueue ->rcu uses to queue ->work */
124 struct workqueue_struct *wq;
125};
126
127/**
128 * struct workqueue_attrs - A struct for workqueue attributes.
129 *
130 * This can be used to change attributes of an unbound workqueue.
131 */
132struct workqueue_attrs {
133 /**
134 * @nice: nice level
135 */
136 int nice;
137
138 /**
139 * @cpumask: allowed CPUs
140 */
141 cpumask_var_t cpumask;
142
143 /**
144 * @no_numa: disable NUMA affinity
145 *
146 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
147 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
148 * doesn't participate in pool hash calculations or equality comparisons.
149 */
150 bool no_numa;
151};
152
153static inline struct delayed_work *to_delayed_work(struct work_struct *work)
154{
155 return container_of(work, struct delayed_work, work);
156}
157
158static inline struct rcu_work *to_rcu_work(struct work_struct *work)
159{
160 return container_of(work, struct rcu_work, work);
161}
162
163struct execute_work {
164 struct work_struct work;
165};
166
167#ifdef CONFIG_LOCKDEP
168/*
169 * NB: because we have to copy the lockdep_map, setting _key
170 * here is required, otherwise it could get initialised to the
171 * copy of the lockdep_map!
172 */
173#define __WORK_INIT_LOCKDEP_MAP(n, k) \
174 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
175#else
176#define __WORK_INIT_LOCKDEP_MAP(n, k)
177#endif
178
179#define __WORK_INITIALIZER(n, f) { \
180 .data = WORK_DATA_STATIC_INIT(), \
181 .entry = { &(n).entry, &(n).entry }, \
182 .func = (f), \
183 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
184 }
185
186#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
187 .work = __WORK_INITIALIZER((n).work, (f)), \
188 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
189 (tflags) | TIMER_IRQSAFE), \
190 }
191
192#define DECLARE_WORK(n, f) \
193 struct work_struct n = __WORK_INITIALIZER(n, f)
194
195#define DECLARE_DELAYED_WORK(n, f) \
196 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
197
198#define DECLARE_DEFERRABLE_WORK(n, f) \
199 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
200
201#ifdef CONFIG_DEBUG_OBJECTS_WORK
202extern void __init_work(struct work_struct *work, int onstack);
203extern void destroy_work_on_stack(struct work_struct *work);
204extern void destroy_delayed_work_on_stack(struct delayed_work *work);
205static inline unsigned int work_static(struct work_struct *work)
206{
207 return *work_data_bits(work) & WORK_STRUCT_STATIC;
208}
209#else
210static inline void __init_work(struct work_struct *work, int onstack) { }
211static inline void destroy_work_on_stack(struct work_struct *work) { }
212static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
213static inline unsigned int work_static(struct work_struct *work) { return 0; }
214#endif
215
216/*
217 * initialize all of a work item in one go
218 *
219 * NOTE! No point in using "atomic_long_set()": using a direct
220 * assignment of the work data initializer allows the compiler
221 * to generate better code.
222 */
223#ifdef CONFIG_LOCKDEP
224#define __INIT_WORK(_work, _func, _onstack) \
225 do { \
226 static struct lock_class_key __key; \
227 \
228 __init_work((_work), _onstack); \
229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
230 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
231 INIT_LIST_HEAD(&(_work)->entry); \
232 (_work)->func = (_func); \
233 } while (0)
234#else
235#define __INIT_WORK(_work, _func, _onstack) \
236 do { \
237 __init_work((_work), _onstack); \
238 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
239 INIT_LIST_HEAD(&(_work)->entry); \
240 (_work)->func = (_func); \
241 } while (0)
242#endif
243
244#define INIT_WORK(_work, _func) \
245 __INIT_WORK((_work), (_func), 0)
246
247#define INIT_WORK_ONSTACK(_work, _func) \
248 __INIT_WORK((_work), (_func), 1)
249
250#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
251 do { \
252 INIT_WORK(&(_work)->work, (_func)); \
253 __init_timer(&(_work)->timer, \
254 delayed_work_timer_fn, \
255 (_tflags) | TIMER_IRQSAFE); \
256 } while (0)
257
258#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
259 do { \
260 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
261 __init_timer_on_stack(&(_work)->timer, \
262 delayed_work_timer_fn, \
263 (_tflags) | TIMER_IRQSAFE); \
264 } while (0)
265
266#define INIT_DELAYED_WORK(_work, _func) \
267 __INIT_DELAYED_WORK(_work, _func, 0)
268
269#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
270 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
271
272#define INIT_DEFERRABLE_WORK(_work, _func) \
273 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
274
275#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
276 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
277
278#define INIT_RCU_WORK(_work, _func) \
279 INIT_WORK(&(_work)->work, (_func))
280
281#define INIT_RCU_WORK_ONSTACK(_work, _func) \
282 INIT_WORK_ONSTACK(&(_work)->work, (_func))
283
284/**
285 * work_pending - Find out whether a work item is currently pending
286 * @work: The work item in question
287 */
288#define work_pending(work) \
289 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
290
291/**
292 * delayed_work_pending - Find out whether a delayable work item is currently
293 * pending
294 * @w: The work item in question
295 */
296#define delayed_work_pending(w) \
297 work_pending(&(w)->work)
298
299/*
300 * Workqueue flags and constants. For details, please refer to
301 * Documentation/core-api/workqueue.rst.
302 */
303enum {
304 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
305 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
306 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
307 WQ_HIGHPRI = 1 << 4, /* high priority */
308 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
309 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
310
311 /*
312 * Per-cpu workqueues are generally preferred because they tend to
313 * show better performance thanks to cache locality. Per-cpu
314 * workqueues exclude the scheduler from choosing the CPU to
315 * execute the worker threads, which has an unfortunate side effect
316 * of increasing power consumption.
317 *
318 * The scheduler considers a CPU idle if it doesn't have any task
319 * to execute and tries to keep idle cores idle to conserve power;
320 * however, for example, a per-cpu work item scheduled from an
321 * interrupt handler on an idle CPU will force the scheduler to
322 * execute the work item on that CPU breaking the idleness, which in
323 * turn may lead to more scheduling choices which are sub-optimal
324 * in terms of power consumption.
325 *
326 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
327 * but become unbound if workqueue.power_efficient kernel param is
328 * specified. Per-cpu workqueues which are identified to
329 * contribute significantly to power-consumption are identified and
330 * marked with this flag and enabling the power_efficient mode
331 * leads to noticeable power saving at the cost of small
332 * performance disadvantage.
333 *
334 * http://thread.gmane.org/gmane.linux.kernel/1480396
335 */
336 WQ_POWER_EFFICIENT = 1 << 7,
337
338 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
339 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
340 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
341 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
342
343 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
344 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
345 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
346};
347
348/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
349#define WQ_UNBOUND_MAX_ACTIVE \
350 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
351
352/*
353 * System-wide workqueues which are always present.
354 *
355 * system_wq is the one used by schedule[_delayed]_work[_on]().
356 * Multi-CPU multi-threaded. There are users which expect relatively
357 * short queue flush time. Don't queue works which can run for too
358 * long.
359 *
360 * system_highpri_wq is similar to system_wq but for work items which
361 * require WQ_HIGHPRI.
362 *
363 * system_long_wq is similar to system_wq but may host long running
364 * works. Queue flushing might take relatively long.
365 *
366 * system_unbound_wq is unbound workqueue. Workers are not bound to
367 * any specific CPU, not concurrency managed, and all queued works are
368 * executed immediately as long as max_active limit is not reached and
369 * resources are available.
370 *
371 * system_freezable_wq is equivalent to system_wq except that it's
372 * freezable.
373 *
374 * *_power_efficient_wq are inclined towards saving power and converted
375 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
376 * they are same as their non-power-efficient counterparts - e.g.
377 * system_power_efficient_wq is identical to system_wq if
378 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
379 */
380extern struct workqueue_struct *system_wq;
381extern struct workqueue_struct *system_highpri_wq;
382extern struct workqueue_struct *system_long_wq;
383extern struct workqueue_struct *system_unbound_wq;
384extern struct workqueue_struct *system_freezable_wq;
385extern struct workqueue_struct *system_power_efficient_wq;
386extern struct workqueue_struct *system_freezable_power_efficient_wq;
387
388/**
389 * alloc_workqueue - allocate a workqueue
390 * @fmt: printf format for the name of the workqueue
391 * @flags: WQ_* flags
392 * @max_active: max in-flight work items, 0 for default
393 * remaining args: args for @fmt
394 *
395 * Allocate a workqueue with the specified parameters. For detailed
396 * information on WQ_* flags, please refer to
397 * Documentation/core-api/workqueue.rst.
398 *
399 * RETURNS:
400 * Pointer to the allocated workqueue on success, %NULL on failure.
401 */
402__printf(1, 4) struct workqueue_struct *
403alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
404
405/**
406 * alloc_ordered_workqueue - allocate an ordered workqueue
407 * @fmt: printf format for the name of the workqueue
408 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
409 * @args: args for @fmt
410 *
411 * Allocate an ordered workqueue. An ordered workqueue executes at
412 * most one work item at any given time in the queued order. They are
413 * implemented as unbound workqueues with @max_active of one.
414 *
415 * RETURNS:
416 * Pointer to the allocated workqueue on success, %NULL on failure.
417 */
418#define alloc_ordered_workqueue(fmt, flags, args...) \
419 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
420 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
421
422#define create_workqueue(name) \
423 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
424#define create_freezable_workqueue(name) \
425 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
426 WQ_MEM_RECLAIM, 1, (name))
427#define create_singlethread_workqueue(name) \
428 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
429
430extern void destroy_workqueue(struct workqueue_struct *wq);
431
432struct workqueue_attrs *alloc_workqueue_attrs(void);
433void free_workqueue_attrs(struct workqueue_attrs *attrs);
434int apply_workqueue_attrs(struct workqueue_struct *wq,
435 const struct workqueue_attrs *attrs);
436int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
437
438extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
439 struct work_struct *work);
440extern bool queue_work_node(int node, struct workqueue_struct *wq,
441 struct work_struct *work);
442extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
443 struct delayed_work *work, unsigned long delay);
444extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
445 struct delayed_work *dwork, unsigned long delay);
446extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
447
448extern void __flush_workqueue(struct workqueue_struct *wq);
449extern void drain_workqueue(struct workqueue_struct *wq);
450
451extern int schedule_on_each_cpu(work_func_t func);
452
453int execute_in_process_context(work_func_t fn, struct execute_work *);
454
455extern bool flush_work(struct work_struct *work);
456extern bool cancel_work(struct work_struct *work);
457extern bool cancel_work_sync(struct work_struct *work);
458
459extern bool flush_delayed_work(struct delayed_work *dwork);
460extern bool cancel_delayed_work(struct delayed_work *dwork);
461extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
462
463extern bool flush_rcu_work(struct rcu_work *rwork);
464
465extern void workqueue_set_max_active(struct workqueue_struct *wq,
466 int max_active);
467extern struct work_struct *current_work(void);
468extern bool current_is_workqueue_rescuer(void);
469extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
470extern unsigned int work_busy(struct work_struct *work);
471extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
472extern void print_worker_info(const char *log_lvl, struct task_struct *task);
473extern void show_all_workqueues(void);
474extern void show_one_workqueue(struct workqueue_struct *wq);
475extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
476
477/**
478 * queue_work - queue work on a workqueue
479 * @wq: workqueue to use
480 * @work: work to queue
481 *
482 * Returns %false if @work was already on a queue, %true otherwise.
483 *
484 * We queue the work to the CPU on which it was submitted, but if the CPU dies
485 * it can be processed by another CPU.
486 *
487 * Memory-ordering properties: If it returns %true, guarantees that all stores
488 * preceding the call to queue_work() in the program order will be visible from
489 * the CPU which will execute @work by the time such work executes, e.g.,
490 *
491 * { x is initially 0 }
492 *
493 * CPU0 CPU1
494 *
495 * WRITE_ONCE(x, 1); [ @work is being executed ]
496 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
497 *
498 * Forbids: r0 == true && r1 == 0
499 */
500static inline bool queue_work(struct workqueue_struct *wq,
501 struct work_struct *work)
502{
503 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
504}
505
506/**
507 * queue_delayed_work - queue work on a workqueue after delay
508 * @wq: workqueue to use
509 * @dwork: delayable work to queue
510 * @delay: number of jiffies to wait before queueing
511 *
512 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
513 */
514static inline bool queue_delayed_work(struct workqueue_struct *wq,
515 struct delayed_work *dwork,
516 unsigned long delay)
517{
518 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
519}
520
521/**
522 * mod_delayed_work - modify delay of or queue a delayed work
523 * @wq: workqueue to use
524 * @dwork: work to queue
525 * @delay: number of jiffies to wait before queueing
526 *
527 * mod_delayed_work_on() on local CPU.
528 */
529static inline bool mod_delayed_work(struct workqueue_struct *wq,
530 struct delayed_work *dwork,
531 unsigned long delay)
532{
533 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
534}
535
536/**
537 * schedule_work_on - put work task on a specific cpu
538 * @cpu: cpu to put the work task on
539 * @work: job to be done
540 *
541 * This puts a job on a specific cpu
542 */
543static inline bool schedule_work_on(int cpu, struct work_struct *work)
544{
545 return queue_work_on(cpu, system_wq, work);
546}
547
548/**
549 * schedule_work - put work task in global workqueue
550 * @work: job to be done
551 *
552 * Returns %false if @work was already on the kernel-global workqueue and
553 * %true otherwise.
554 *
555 * This puts a job in the kernel-global workqueue if it was not already
556 * queued and leaves it in the same position on the kernel-global
557 * workqueue otherwise.
558 *
559 * Shares the same memory-ordering properties of queue_work(), cf. the
560 * DocBook header of queue_work().
561 */
562static inline bool schedule_work(struct work_struct *work)
563{
564 return queue_work(system_wq, work);
565}
566
567/*
568 * Detect attempt to flush system-wide workqueues at compile time when possible.
569 *
570 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
571 * for reasons and steps for converting system-wide workqueues into local workqueues.
572 */
573extern void __warn_flushing_systemwide_wq(void)
574 __compiletime_warning("Please avoid flushing system-wide workqueues.");
575
576/**
577 * flush_scheduled_work - ensure that any scheduled work has run to completion.
578 *
579 * Forces execution of the kernel-global workqueue and blocks until its
580 * completion.
581 *
582 * It's very easy to get into trouble if you don't take great care.
583 * Either of the following situations will lead to deadlock:
584 *
585 * One of the work items currently on the workqueue needs to acquire
586 * a lock held by your code or its caller.
587 *
588 * Your code is running in the context of a work routine.
589 *
590 * They will be detected by lockdep when they occur, but the first might not
591 * occur very often. It depends on what work items are on the workqueue and
592 * what locks they need, which you have no control over.
593 *
594 * In most situations flushing the entire workqueue is overkill; you merely
595 * need to know that a particular work item isn't queued and isn't running.
596 * In such cases you should use cancel_delayed_work_sync() or
597 * cancel_work_sync() instead.
598 *
599 * Please stop calling this function! A conversion to stop flushing system-wide
600 * workqueues is in progress. This function will be removed after all in-tree
601 * users stopped calling this function.
602 */
603/*
604 * The background of commit 771c035372a036f8 ("deprecate the
605 * '__deprecated' attribute warnings entirely and for good") is that,
606 * since Linus builds all modules between every single pull he does,
607 * the standard kernel build needs to be _clean_ in order to be able to
608 * notice when new problems happen. Therefore, don't emit warning while
609 * there are in-tree users.
610 */
611#define flush_scheduled_work() \
612({ \
613 if (0) \
614 __warn_flushing_systemwide_wq(); \
615 __flush_workqueue(system_wq); \
616})
617
618/*
619 * Although there is no longer in-tree caller, for now just emit warning
620 * in order to give out-of-tree callers time to update.
621 */
622#define flush_workqueue(wq) \
623({ \
624 struct workqueue_struct *_wq = (wq); \
625 \
626 if ((__builtin_constant_p(_wq == system_wq) && \
627 _wq == system_wq) || \
628 (__builtin_constant_p(_wq == system_highpri_wq) && \
629 _wq == system_highpri_wq) || \
630 (__builtin_constant_p(_wq == system_long_wq) && \
631 _wq == system_long_wq) || \
632 (__builtin_constant_p(_wq == system_unbound_wq) && \
633 _wq == system_unbound_wq) || \
634 (__builtin_constant_p(_wq == system_freezable_wq) && \
635 _wq == system_freezable_wq) || \
636 (__builtin_constant_p(_wq == system_power_efficient_wq) && \
637 _wq == system_power_efficient_wq) || \
638 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
639 _wq == system_freezable_power_efficient_wq)) \
640 __warn_flushing_systemwide_wq(); \
641 __flush_workqueue(_wq); \
642})
643
644/**
645 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
646 * @cpu: cpu to use
647 * @dwork: job to be done
648 * @delay: number of jiffies to wait
649 *
650 * After waiting for a given time this puts a job in the kernel-global
651 * workqueue on the specified CPU.
652 */
653static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
654 unsigned long delay)
655{
656 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
657}
658
659/**
660 * schedule_delayed_work - put work task in global workqueue after delay
661 * @dwork: job to be done
662 * @delay: number of jiffies to wait or 0 for immediate execution
663 *
664 * After waiting for a given time this puts a job in the kernel-global
665 * workqueue.
666 */
667static inline bool schedule_delayed_work(struct delayed_work *dwork,
668 unsigned long delay)
669{
670 return queue_delayed_work(system_wq, dwork, delay);
671}
672
673#ifndef CONFIG_SMP
674static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
675{
676 return fn(arg);
677}
678static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
679{
680 return fn(arg);
681}
682#else
683long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
684long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
685#endif /* CONFIG_SMP */
686
687#ifdef CONFIG_FREEZER
688extern void freeze_workqueues_begin(void);
689extern bool freeze_workqueues_busy(void);
690extern void thaw_workqueues(void);
691#endif /* CONFIG_FREEZER */
692
693#ifdef CONFIG_SYSFS
694int workqueue_sysfs_register(struct workqueue_struct *wq);
695#else /* CONFIG_SYSFS */
696static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
697{ return 0; }
698#endif /* CONFIG_SYSFS */
699
700#ifdef CONFIG_WQ_WATCHDOG
701void wq_watchdog_touch(int cpu);
702#else /* CONFIG_WQ_WATCHDOG */
703static inline void wq_watchdog_touch(int cpu) { }
704#endif /* CONFIG_WQ_WATCHDOG */
705
706#ifdef CONFIG_SMP
707int workqueue_prepare_cpu(unsigned int cpu);
708int workqueue_online_cpu(unsigned int cpu);
709int workqueue_offline_cpu(unsigned int cpu);
710#endif
711
712void __init workqueue_init_early(void);
713void __init workqueue_init(void);
714
715#endif
716

source code of linux/include/linux/workqueue.h