1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * workqueue.h --- work queue handling for Linux.
4 */
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17#include <linux/workqueue_types.h>
18
19/*
20 * The first word is the work queue pointer and the flags rolled into
21 * one
22 */
23#define work_data_bits(work) ((unsigned long *)(&(work)->data))
24
25enum work_bits {
26	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
27	WORK_STRUCT_INACTIVE_BIT,	/* work item is inactive */
28	WORK_STRUCT_PWQ_BIT,		/* data points to pwq */
29	WORK_STRUCT_LINKED_BIT,		/* next work is linked to this one */
30#ifdef CONFIG_DEBUG_OBJECTS_WORK
31	WORK_STRUCT_STATIC_BIT,		/* static initializer (debugobjects) */
32#endif
33	WORK_STRUCT_FLAG_BITS,
34
35	/* color for workqueue flushing */
36	WORK_STRUCT_COLOR_SHIFT	= WORK_STRUCT_FLAG_BITS,
37	WORK_STRUCT_COLOR_BITS	= 4,
38
39	/*
40	 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
41	 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
42	 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
43	 *
44	 * MSB
45	 * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
46	 *                     4 bits        4 or 5 bits
47	 */
48	WORK_STRUCT_PWQ_SHIFT	= WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
49
50	/*
51	 * data contains off-queue information when !WORK_STRUCT_PWQ.
52	 *
53	 * MSB
54	 * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ]
55	 *                 1 bit        4 or 5 bits
56	 */
57	WORK_OFFQ_FLAG_SHIFT	= WORK_STRUCT_FLAG_BITS,
58	WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT,
59	WORK_OFFQ_FLAG_END,
60	WORK_OFFQ_FLAG_BITS	= WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
61
62	/*
63	 * When a work item is off queue, the high bits encode off-queue flags
64	 * and the last pool it was on. Cap pool ID to 31 bits and use the
65	 * highest number to indicate that no pool is associated.
66	 */
67	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
68	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
69	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
70};
71
72enum work_flags {
73	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
74	WORK_STRUCT_INACTIVE	= 1 << WORK_STRUCT_INACTIVE_BIT,
75	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
76	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
77#ifdef CONFIG_DEBUG_OBJECTS_WORK
78	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
79#else
80	WORK_STRUCT_STATIC	= 0,
81#endif
82};
83
84enum wq_misc_consts {
85	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS),
86
87	/* not bound to any CPU, prefer the local CPU */
88	WORK_CPU_UNBOUND	= NR_CPUS,
89
90	/* bit mask for work_busy() return values */
91	WORK_BUSY_PENDING	= 1 << 0,
92	WORK_BUSY_RUNNING	= 1 << 1,
93
94	/* maximum string length for set_worker_desc() */
95	WORKER_DESC_LEN		= 24,
96};
97
98/* Convenience constants - of type 'unsigned long', not 'enum'! */
99#define WORK_OFFQ_CANCELING	(1ul << WORK_OFFQ_CANCELING_BIT)
100#define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1)
101#define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
102#define WORK_STRUCT_PWQ_MASK	(~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
103
104#define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
105#define WORK_DATA_STATIC_INIT()	\
106	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
107
108struct delayed_work {
109	struct work_struct work;
110	struct timer_list timer;
111
112	/* target workqueue and CPU ->timer uses to queue ->work */
113	struct workqueue_struct *wq;
114	int cpu;
115};
116
117struct rcu_work {
118	struct work_struct work;
119	struct rcu_head rcu;
120
121	/* target workqueue ->rcu uses to queue ->work */
122	struct workqueue_struct *wq;
123};
124
125enum wq_affn_scope {
126	WQ_AFFN_DFL,			/* use system default */
127	WQ_AFFN_CPU,			/* one pod per CPU */
128	WQ_AFFN_SMT,			/* one pod poer SMT */
129	WQ_AFFN_CACHE,			/* one pod per LLC */
130	WQ_AFFN_NUMA,			/* one pod per NUMA node */
131	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
132
133	WQ_AFFN_NR_TYPES,
134};
135
136/**
137 * struct workqueue_attrs - A struct for workqueue attributes.
138 *
139 * This can be used to change attributes of an unbound workqueue.
140 */
141struct workqueue_attrs {
142	/**
143	 * @nice: nice level
144	 */
145	int nice;
146
147	/**
148	 * @cpumask: allowed CPUs
149	 *
150	 * Work items in this workqueue are affine to these CPUs and not allowed
151	 * to execute on other CPUs. A pool serving a workqueue must have the
152	 * same @cpumask.
153	 */
154	cpumask_var_t cpumask;
155
156	/**
157	 * @__pod_cpumask: internal attribute used to create per-pod pools
158	 *
159	 * Internal use only.
160	 *
161	 * Per-pod unbound worker pools are used to improve locality. Always a
162	 * subset of ->cpumask. A workqueue can be associated with multiple
163	 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
164	 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
165	 */
166	cpumask_var_t __pod_cpumask;
167
168	/**
169	 * @affn_strict: affinity scope is strict
170	 *
171	 * If clear, workqueue will make a best-effort attempt at starting the
172	 * worker inside @__pod_cpumask but the scheduler is free to migrate it
173	 * outside.
174	 *
175	 * If set, workers are only allowed to run inside @__pod_cpumask.
176	 */
177	bool affn_strict;
178
179	/*
180	 * Below fields aren't properties of a worker_pool. They only modify how
181	 * :c:func:`apply_workqueue_attrs` select pools and thus don't
182	 * participate in pool hash calculations or equality comparisons.
183	 */
184
185	/**
186	 * @affn_scope: unbound CPU affinity scope
187	 *
188	 * CPU pods are used to improve execution locality of unbound work
189	 * items. There are multiple pod types, one for each wq_affn_scope, and
190	 * every CPU in the system belongs to one pod in every pod type. CPUs
191	 * that belong to the same pod share the worker pool. For example,
192	 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
193	 * pool for each NUMA node.
194	 */
195	enum wq_affn_scope affn_scope;
196
197	/**
198	 * @ordered: work items must be executed one by one in queueing order
199	 */
200	bool ordered;
201};
202
203static inline struct delayed_work *to_delayed_work(struct work_struct *work)
204{
205	return container_of(work, struct delayed_work, work);
206}
207
208static inline struct rcu_work *to_rcu_work(struct work_struct *work)
209{
210	return container_of(work, struct rcu_work, work);
211}
212
213struct execute_work {
214	struct work_struct work;
215};
216
217#ifdef CONFIG_LOCKDEP
218/*
219 * NB: because we have to copy the lockdep_map, setting _key
220 * here is required, otherwise it could get initialised to the
221 * copy of the lockdep_map!
222 */
223#define __WORK_INIT_LOCKDEP_MAP(n, k) \
224	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
225#else
226#define __WORK_INIT_LOCKDEP_MAP(n, k)
227#endif
228
229#define __WORK_INITIALIZER(n, f) {					\
230	.data = WORK_DATA_STATIC_INIT(),				\
231	.entry	= { &(n).entry, &(n).entry },				\
232	.func = (f),							\
233	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
234	}
235
236#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
237	.work = __WORK_INITIALIZER((n).work, (f)),			\
238	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
239				     (tflags) | TIMER_IRQSAFE),		\
240	}
241
242#define DECLARE_WORK(n, f)						\
243	struct work_struct n = __WORK_INITIALIZER(n, f)
244
245#define DECLARE_DELAYED_WORK(n, f)					\
246	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
247
248#define DECLARE_DEFERRABLE_WORK(n, f)					\
249	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
250
251#ifdef CONFIG_DEBUG_OBJECTS_WORK
252extern void __init_work(struct work_struct *work, int onstack);
253extern void destroy_work_on_stack(struct work_struct *work);
254extern void destroy_delayed_work_on_stack(struct delayed_work *work);
255static inline unsigned int work_static(struct work_struct *work)
256{
257	return *work_data_bits(work) & WORK_STRUCT_STATIC;
258}
259#else
260static inline void __init_work(struct work_struct *work, int onstack) { }
261static inline void destroy_work_on_stack(struct work_struct *work) { }
262static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
263static inline unsigned int work_static(struct work_struct *work) { return 0; }
264#endif
265
266/*
267 * initialize all of a work item in one go
268 *
269 * NOTE! No point in using "atomic_long_set()": using a direct
270 * assignment of the work data initializer allows the compiler
271 * to generate better code.
272 */
273#ifdef CONFIG_LOCKDEP
274#define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
275	do {								\
276		__init_work((_work), _onstack);				\
277		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
278		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
279		INIT_LIST_HEAD(&(_work)->entry);			\
280		(_work)->func = (_func);				\
281	} while (0)
282#else
283#define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
284	do {								\
285		__init_work((_work), _onstack);				\
286		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
287		INIT_LIST_HEAD(&(_work)->entry);			\
288		(_work)->func = (_func);				\
289	} while (0)
290#endif
291
292#define __INIT_WORK(_work, _func, _onstack)				\
293	do {								\
294		static __maybe_unused struct lock_class_key __key;	\
295									\
296		__INIT_WORK_KEY(_work, _func, _onstack, &__key);	\
297	} while (0)
298
299#define INIT_WORK(_work, _func)						\
300	__INIT_WORK((_work), (_func), 0)
301
302#define INIT_WORK_ONSTACK(_work, _func)					\
303	__INIT_WORK((_work), (_func), 1)
304
305#define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\
306	__INIT_WORK_KEY((_work), (_func), 1, _key)
307
308#define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
309	do {								\
310		INIT_WORK(&(_work)->work, (_func));			\
311		__init_timer(&(_work)->timer,				\
312			     delayed_work_timer_fn,			\
313			     (_tflags) | TIMER_IRQSAFE);		\
314	} while (0)
315
316#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
317	do {								\
318		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
319		__init_timer_on_stack(&(_work)->timer,			\
320				      delayed_work_timer_fn,		\
321				      (_tflags) | TIMER_IRQSAFE);	\
322	} while (0)
323
324#define INIT_DELAYED_WORK(_work, _func)					\
325	__INIT_DELAYED_WORK(_work, _func, 0)
326
327#define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
328	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
329
330#define INIT_DEFERRABLE_WORK(_work, _func)				\
331	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
332
333#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
334	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
335
336#define INIT_RCU_WORK(_work, _func)					\
337	INIT_WORK(&(_work)->work, (_func))
338
339#define INIT_RCU_WORK_ONSTACK(_work, _func)				\
340	INIT_WORK_ONSTACK(&(_work)->work, (_func))
341
342/**
343 * work_pending - Find out whether a work item is currently pending
344 * @work: The work item in question
345 */
346#define work_pending(work) \
347	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
348
349/**
350 * delayed_work_pending - Find out whether a delayable work item is currently
351 * pending
352 * @w: The work item in question
353 */
354#define delayed_work_pending(w) \
355	work_pending(&(w)->work)
356
357/*
358 * Workqueue flags and constants.  For details, please refer to
359 * Documentation/core-api/workqueue.rst.
360 */
361enum wq_flags {
362	WQ_BH			= 1 << 0, /* execute in bottom half (softirq) context */
363	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
364	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
365	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
366	WQ_HIGHPRI		= 1 << 4, /* high priority */
367	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
368	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
369
370	/*
371	 * Per-cpu workqueues are generally preferred because they tend to
372	 * show better performance thanks to cache locality.  Per-cpu
373	 * workqueues exclude the scheduler from choosing the CPU to
374	 * execute the worker threads, which has an unfortunate side effect
375	 * of increasing power consumption.
376	 *
377	 * The scheduler considers a CPU idle if it doesn't have any task
378	 * to execute and tries to keep idle cores idle to conserve power;
379	 * however, for example, a per-cpu work item scheduled from an
380	 * interrupt handler on an idle CPU will force the scheduler to
381	 * execute the work item on that CPU breaking the idleness, which in
382	 * turn may lead to more scheduling choices which are sub-optimal
383	 * in terms of power consumption.
384	 *
385	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
386	 * but become unbound if workqueue.power_efficient kernel param is
387	 * specified.  Per-cpu workqueues which are identified to
388	 * contribute significantly to power-consumption are identified and
389	 * marked with this flag and enabling the power_efficient mode
390	 * leads to noticeable power saving at the cost of small
391	 * performance disadvantage.
392	 *
393	 * http://thread.gmane.org/gmane.linux.kernel/1480396
394	 */
395	WQ_POWER_EFFICIENT	= 1 << 7,
396
397	__WQ_DESTROYING		= 1 << 15, /* internal: workqueue is destroying */
398	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
399	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
400	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
401
402	/* BH wq only allows the following flags */
403	__WQ_BH_ALLOWS		= WQ_BH | WQ_HIGHPRI,
404};
405
406enum wq_consts {
407	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
408	WQ_UNBOUND_MAX_ACTIVE	= WQ_MAX_ACTIVE,
409	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
410
411	/*
412	 * Per-node default cap on min_active. Unless explicitly set, min_active
413	 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
414	 * workqueue_struct->min_active definition.
415	 */
416	WQ_DFL_MIN_ACTIVE	= 8,
417};
418
419/*
420 * System-wide workqueues which are always present.
421 *
422 * system_wq is the one used by schedule[_delayed]_work[_on]().
423 * Multi-CPU multi-threaded.  There are users which expect relatively
424 * short queue flush time.  Don't queue works which can run for too
425 * long.
426 *
427 * system_highpri_wq is similar to system_wq but for work items which
428 * require WQ_HIGHPRI.
429 *
430 * system_long_wq is similar to system_wq but may host long running
431 * works.  Queue flushing might take relatively long.
432 *
433 * system_unbound_wq is unbound workqueue.  Workers are not bound to
434 * any specific CPU, not concurrency managed, and all queued works are
435 * executed immediately as long as max_active limit is not reached and
436 * resources are available.
437 *
438 * system_freezable_wq is equivalent to system_wq except that it's
439 * freezable.
440 *
441 * *_power_efficient_wq are inclined towards saving power and converted
442 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
443 * they are same as their non-power-efficient counterparts - e.g.
444 * system_power_efficient_wq is identical to system_wq if
445 * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
446 *
447 * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
448 * are executed in the queueing CPU's BH context in the queueing order.
449 */
450extern struct workqueue_struct *system_wq;
451extern struct workqueue_struct *system_highpri_wq;
452extern struct workqueue_struct *system_long_wq;
453extern struct workqueue_struct *system_unbound_wq;
454extern struct workqueue_struct *system_freezable_wq;
455extern struct workqueue_struct *system_power_efficient_wq;
456extern struct workqueue_struct *system_freezable_power_efficient_wq;
457extern struct workqueue_struct *system_bh_wq;
458extern struct workqueue_struct *system_bh_highpri_wq;
459
460void workqueue_softirq_action(bool highpri);
461void workqueue_softirq_dead(unsigned int cpu);
462
463/**
464 * alloc_workqueue - allocate a workqueue
465 * @fmt: printf format for the name of the workqueue
466 * @flags: WQ_* flags
467 * @max_active: max in-flight work items, 0 for default
468 * remaining args: args for @fmt
469 *
470 * For a per-cpu workqueue, @max_active limits the number of in-flight work
471 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
472 * executing at most one work item for the workqueue.
473 *
474 * For unbound workqueues, @max_active limits the number of in-flight work items
475 * for the whole system. e.g. @max_active of 16 indicates that that there can be
476 * at most 16 work items executing for the workqueue in the whole system.
477 *
478 * As sharing the same active counter for an unbound workqueue across multiple
479 * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
480 * according to the proportion of the number of online CPUs and enforced
481 * independently.
482 *
483 * Depending on online CPU distribution, a node may end up with per-node
484 * max_active which is significantly lower than @max_active, which can lead to
485 * deadlocks if the per-node concurrency limit is lower than the maximum number
486 * of interdependent work items for the workqueue.
487 *
488 * To guarantee forward progress regardless of online CPU distribution, the
489 * concurrency limit on every node is guaranteed to be equal to or greater than
490 * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
491 * that the sum of per-node max_active's may be larger than @max_active.
492 *
493 * For detailed information on %WQ_* flags, please refer to
494 * Documentation/core-api/workqueue.rst.
495 *
496 * RETURNS:
497 * Pointer to the allocated workqueue on success, %NULL on failure.
498 */
499__printf(1, 4) struct workqueue_struct *
500alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
501
502/**
503 * alloc_ordered_workqueue - allocate an ordered workqueue
504 * @fmt: printf format for the name of the workqueue
505 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
506 * @args: args for @fmt
507 *
508 * Allocate an ordered workqueue.  An ordered workqueue executes at
509 * most one work item at any given time in the queued order.  They are
510 * implemented as unbound workqueues with @max_active of one.
511 *
512 * RETURNS:
513 * Pointer to the allocated workqueue on success, %NULL on failure.
514 */
515#define alloc_ordered_workqueue(fmt, flags, args...)			\
516	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
517
518#define create_workqueue(name)						\
519	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
520#define create_freezable_workqueue(name)				\
521	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
522			WQ_MEM_RECLAIM, 1, (name))
523#define create_singlethread_workqueue(name)				\
524	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
525
526#define from_work(var, callback_work, work_fieldname)	\
527	container_of(callback_work, typeof(*var), work_fieldname)
528
529extern void destroy_workqueue(struct workqueue_struct *wq);
530
531struct workqueue_attrs *alloc_workqueue_attrs(void);
532void free_workqueue_attrs(struct workqueue_attrs *attrs);
533int apply_workqueue_attrs(struct workqueue_struct *wq,
534			  const struct workqueue_attrs *attrs);
535extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
536
537extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
538			struct work_struct *work);
539extern bool queue_work_node(int node, struct workqueue_struct *wq,
540			    struct work_struct *work);
541extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
542			struct delayed_work *work, unsigned long delay);
543extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
544			struct delayed_work *dwork, unsigned long delay);
545extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
546
547extern void __flush_workqueue(struct workqueue_struct *wq);
548extern void drain_workqueue(struct workqueue_struct *wq);
549
550extern int schedule_on_each_cpu(work_func_t func);
551
552int execute_in_process_context(work_func_t fn, struct execute_work *);
553
554extern bool flush_work(struct work_struct *work);
555extern bool cancel_work(struct work_struct *work);
556extern bool cancel_work_sync(struct work_struct *work);
557
558extern bool flush_delayed_work(struct delayed_work *dwork);
559extern bool cancel_delayed_work(struct delayed_work *dwork);
560extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
561
562extern bool flush_rcu_work(struct rcu_work *rwork);
563
564extern void workqueue_set_max_active(struct workqueue_struct *wq,
565				     int max_active);
566extern void workqueue_set_min_active(struct workqueue_struct *wq,
567				     int min_active);
568extern struct work_struct *current_work(void);
569extern bool current_is_workqueue_rescuer(void);
570extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
571extern unsigned int work_busy(struct work_struct *work);
572extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
573extern void print_worker_info(const char *log_lvl, struct task_struct *task);
574extern void show_all_workqueues(void);
575extern void show_freezable_workqueues(void);
576extern void show_one_workqueue(struct workqueue_struct *wq);
577extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
578
579/**
580 * queue_work - queue work on a workqueue
581 * @wq: workqueue to use
582 * @work: work to queue
583 *
584 * Returns %false if @work was already on a queue, %true otherwise.
585 *
586 * We queue the work to the CPU on which it was submitted, but if the CPU dies
587 * it can be processed by another CPU.
588 *
589 * Memory-ordering properties:  If it returns %true, guarantees that all stores
590 * preceding the call to queue_work() in the program order will be visible from
591 * the CPU which will execute @work by the time such work executes, e.g.,
592 *
593 * { x is initially 0 }
594 *
595 *   CPU0				CPU1
596 *
597 *   WRITE_ONCE(x, 1);			[ @work is being executed ]
598 *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
599 *
600 * Forbids: r0 == true && r1 == 0
601 */
602static inline bool queue_work(struct workqueue_struct *wq,
603			      struct work_struct *work)
604{
605	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
606}
607
608/**
609 * queue_delayed_work - queue work on a workqueue after delay
610 * @wq: workqueue to use
611 * @dwork: delayable work to queue
612 * @delay: number of jiffies to wait before queueing
613 *
614 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
615 */
616static inline bool queue_delayed_work(struct workqueue_struct *wq,
617				      struct delayed_work *dwork,
618				      unsigned long delay)
619{
620	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
621}
622
623/**
624 * mod_delayed_work - modify delay of or queue a delayed work
625 * @wq: workqueue to use
626 * @dwork: work to queue
627 * @delay: number of jiffies to wait before queueing
628 *
629 * mod_delayed_work_on() on local CPU.
630 */
631static inline bool mod_delayed_work(struct workqueue_struct *wq,
632				    struct delayed_work *dwork,
633				    unsigned long delay)
634{
635	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
636}
637
638/**
639 * schedule_work_on - put work task on a specific cpu
640 * @cpu: cpu to put the work task on
641 * @work: job to be done
642 *
643 * This puts a job on a specific cpu
644 */
645static inline bool schedule_work_on(int cpu, struct work_struct *work)
646{
647	return queue_work_on(cpu, system_wq, work);
648}
649
650/**
651 * schedule_work - put work task in global workqueue
652 * @work: job to be done
653 *
654 * Returns %false if @work was already on the kernel-global workqueue and
655 * %true otherwise.
656 *
657 * This puts a job in the kernel-global workqueue if it was not already
658 * queued and leaves it in the same position on the kernel-global
659 * workqueue otherwise.
660 *
661 * Shares the same memory-ordering properties of queue_work(), cf. the
662 * DocBook header of queue_work().
663 */
664static inline bool schedule_work(struct work_struct *work)
665{
666	return queue_work(system_wq, work);
667}
668
669/*
670 * Detect attempt to flush system-wide workqueues at compile time when possible.
671 * Warn attempt to flush system-wide workqueues at runtime.
672 *
673 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
674 * for reasons and steps for converting system-wide workqueues into local workqueues.
675 */
676extern void __warn_flushing_systemwide_wq(void)
677	__compiletime_warning("Please avoid flushing system-wide workqueues.");
678
679/* Please stop using this function, for this function will be removed in near future. */
680#define flush_scheduled_work()						\
681({									\
682	__warn_flushing_systemwide_wq();				\
683	__flush_workqueue(system_wq);					\
684})
685
686#define flush_workqueue(wq)						\
687({									\
688	struct workqueue_struct *_wq = (wq);				\
689									\
690	if ((__builtin_constant_p(_wq == system_wq) &&			\
691	     _wq == system_wq) ||					\
692	    (__builtin_constant_p(_wq == system_highpri_wq) &&		\
693	     _wq == system_highpri_wq) ||				\
694	    (__builtin_constant_p(_wq == system_long_wq) &&		\
695	     _wq == system_long_wq) ||					\
696	    (__builtin_constant_p(_wq == system_unbound_wq) &&		\
697	     _wq == system_unbound_wq) ||				\
698	    (__builtin_constant_p(_wq == system_freezable_wq) &&	\
699	     _wq == system_freezable_wq) ||				\
700	    (__builtin_constant_p(_wq == system_power_efficient_wq) &&	\
701	     _wq == system_power_efficient_wq) ||			\
702	    (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
703	     _wq == system_freezable_power_efficient_wq))		\
704		__warn_flushing_systemwide_wq();			\
705	__flush_workqueue(_wq);						\
706})
707
708/**
709 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
710 * @cpu: cpu to use
711 * @dwork: job to be done
712 * @delay: number of jiffies to wait
713 *
714 * After waiting for a given time this puts a job in the kernel-global
715 * workqueue on the specified CPU.
716 */
717static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
718					    unsigned long delay)
719{
720	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
721}
722
723/**
724 * schedule_delayed_work - put work task in global workqueue after delay
725 * @dwork: job to be done
726 * @delay: number of jiffies to wait or 0 for immediate execution
727 *
728 * After waiting for a given time this puts a job in the kernel-global
729 * workqueue.
730 */
731static inline bool schedule_delayed_work(struct delayed_work *dwork,
732					 unsigned long delay)
733{
734	return queue_delayed_work(system_wq, dwork, delay);
735}
736
737#ifndef CONFIG_SMP
738static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
739{
740	return fn(arg);
741}
742static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
743{
744	return fn(arg);
745}
746#else
747long work_on_cpu_key(int cpu, long (*fn)(void *),
748		     void *arg, struct lock_class_key *key);
749/*
750 * A new key is defined for each caller to make sure the work
751 * associated with the function doesn't share its locking class.
752 */
753#define work_on_cpu(_cpu, _fn, _arg)			\
754({							\
755	static struct lock_class_key __key;		\
756							\
757	work_on_cpu_key(_cpu, _fn, _arg, &__key);	\
758})
759
760long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
761			  void *arg, struct lock_class_key *key);
762
763/*
764 * A new key is defined for each caller to make sure the work
765 * associated with the function doesn't share its locking class.
766 */
767#define work_on_cpu_safe(_cpu, _fn, _arg)		\
768({							\
769	static struct lock_class_key __key;		\
770							\
771	work_on_cpu_safe_key(_cpu, _fn, _arg, &__key);	\
772})
773#endif /* CONFIG_SMP */
774
775#ifdef CONFIG_FREEZER
776extern void freeze_workqueues_begin(void);
777extern bool freeze_workqueues_busy(void);
778extern void thaw_workqueues(void);
779#endif /* CONFIG_FREEZER */
780
781#ifdef CONFIG_SYSFS
782int workqueue_sysfs_register(struct workqueue_struct *wq);
783#else	/* CONFIG_SYSFS */
784static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
785{ return 0; }
786#endif	/* CONFIG_SYSFS */
787
788#ifdef CONFIG_WQ_WATCHDOG
789void wq_watchdog_touch(int cpu);
790#else	/* CONFIG_WQ_WATCHDOG */
791static inline void wq_watchdog_touch(int cpu) { }
792#endif	/* CONFIG_WQ_WATCHDOG */
793
794#ifdef CONFIG_SMP
795int workqueue_prepare_cpu(unsigned int cpu);
796int workqueue_online_cpu(unsigned int cpu);
797int workqueue_offline_cpu(unsigned int cpu);
798#endif
799
800void __init workqueue_init_early(void);
801void __init workqueue_init(void);
802void __init workqueue_init_topology(void);
803
804#endif
805