Branch data Line data Source code
1 : : /*
2 : : * workqueue.h --- work queue handling for Linux.
3 : : */
4 : :
5 : : #ifndef _LINUX_WORKQUEUE_H
6 : : #define _LINUX_WORKQUEUE_H
7 : :
8 : : #include <linux/timer.h>
9 : : #include <linux/linkage.h>
10 : : #include <linux/bitops.h>
11 : : #include <linux/lockdep.h>
12 : : #include <linux/threads.h>
13 : : #include <linux/atomic.h>
14 : : #include <linux/cpumask.h>
15 : :
16 : : struct workqueue_struct;
17 : :
18 : : struct work_struct;
19 : : typedef void (*work_func_t)(struct work_struct *work);
20 : : void delayed_work_timer_fn(unsigned long __data);
21 : :
22 : : /*
23 : : * The first word is the work queue pointer and the flags rolled into
24 : : * one
25 : : */
26 : : #define work_data_bits(work) ((unsigned long *)(&(work)->data))
27 : :
28 : : enum {
29 : : WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
30 : : WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
31 : : WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
32 : : WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
33 : : #ifdef CONFIG_DEBUG_OBJECTS_WORK
34 : : WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
35 : : WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
36 : : #else
37 : : WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
38 : : #endif
39 : :
40 : : WORK_STRUCT_COLOR_BITS = 4,
41 : :
42 : : WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
43 : : WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
44 : : WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
45 : : WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
46 : : #ifdef CONFIG_DEBUG_OBJECTS_WORK
47 : : WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
48 : : #else
49 : : WORK_STRUCT_STATIC = 0,
50 : : #endif
51 : :
52 : : /*
53 : : * The last color is no color used for works which don't
54 : : * participate in workqueue flushing.
55 : : */
56 : : WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 : : WORK_NO_COLOR = WORK_NR_COLORS,
58 : :
59 : : /* special cpu IDs */
60 : : WORK_CPU_UNBOUND = NR_CPUS,
61 : : WORK_CPU_END = NR_CPUS + 1,
62 : :
63 : : /*
64 : : * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
65 : : * This makes pwqs aligned to 256 bytes and allows 15 workqueue
66 : : * flush colors.
67 : : */
68 : : WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 : : WORK_STRUCT_COLOR_BITS,
70 : :
71 : : /* data contains off-queue information when !WORK_STRUCT_PWQ */
72 : : WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
73 : :
74 : : WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75 : :
76 : : /*
77 : : * When a work item is off queue, its high bits point to the last
78 : : * pool it was on. Cap at 31 bits and use the highest number to
79 : : * indicate that no pool is associated.
80 : : */
81 : : WORK_OFFQ_FLAG_BITS = 1,
82 : : WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 : : WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 : : WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 : : WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
86 : :
87 : : /* convenience constants */
88 : : WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
89 : : WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
90 : : WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
91 : :
92 : : /* bit mask for work_busy() return values */
93 : : WORK_BUSY_PENDING = 1 << 0,
94 : : WORK_BUSY_RUNNING = 1 << 1,
95 : :
96 : : /* maximum string length for set_worker_desc() */
97 : : WORKER_DESC_LEN = 24,
98 : : };
99 : :
100 : : struct work_struct {
101 : : atomic_long_t data;
102 : : struct list_head entry;
103 : : work_func_t func;
104 : : #ifdef CONFIG_LOCKDEP
105 : : struct lockdep_map lockdep_map;
106 : : #endif
107 : : };
108 : :
109 : : #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
110 : : #define WORK_DATA_STATIC_INIT() \
111 : : ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
112 : :
113 : : struct delayed_work {
114 : : struct work_struct work;
115 : : struct timer_list timer;
116 : :
117 : : /* target workqueue and CPU ->timer uses to queue ->work */
118 : : struct workqueue_struct *wq;
119 : : int cpu;
120 : : };
121 : :
122 : : /*
123 : : * A struct for workqueue attributes. This can be used to change
124 : : * attributes of an unbound workqueue.
125 : : *
126 : : * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
127 : : * only modifies how apply_workqueue_attrs() select pools and thus doesn't
128 : : * participate in pool hash calculations or equality comparisons.
129 : : */
130 : : struct workqueue_attrs {
131 : : int nice; /* nice level */
132 : : cpumask_var_t cpumask; /* allowed CPUs */
133 : : bool no_numa; /* disable NUMA affinity */
134 : : };
135 : :
136 : : static inline struct delayed_work *to_delayed_work(struct work_struct *work)
137 : : {
138 : : return container_of(work, struct delayed_work, work);
139 : : }
140 : :
141 : : struct execute_work {
142 : : struct work_struct work;
143 : : };
144 : :
145 : : #ifdef CONFIG_LOCKDEP
146 : : /*
147 : : * NB: because we have to copy the lockdep_map, setting _key
148 : : * here is required, otherwise it could get initialised to the
149 : : * copy of the lockdep_map!
150 : : */
151 : : #define __WORK_INIT_LOCKDEP_MAP(n, k) \
152 : : .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
153 : : #else
154 : : #define __WORK_INIT_LOCKDEP_MAP(n, k)
155 : : #endif
156 : :
157 : : #define __WORK_INITIALIZER(n, f) { \
158 : : .data = WORK_DATA_STATIC_INIT(), \
159 : : .entry = { &(n).entry, &(n).entry }, \
160 : : .func = (f), \
161 : : __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
162 : : }
163 : :
164 : : #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
165 : : .work = __WORK_INITIALIZER((n).work, (f)), \
166 : : .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
167 : : 0, (unsigned long)&(n), \
168 : : (tflags) | TIMER_IRQSAFE), \
169 : : }
170 : :
171 : : #define DECLARE_WORK(n, f) \
172 : : struct work_struct n = __WORK_INITIALIZER(n, f)
173 : :
174 : : #define DECLARE_DELAYED_WORK(n, f) \
175 : : struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
176 : :
177 : : #define DECLARE_DEFERRABLE_WORK(n, f) \
178 : : struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
179 : :
180 : : /*
181 : : * initialize a work item's function pointer
182 : : */
183 : : #define PREPARE_WORK(_work, _func) \
184 : : do { \
185 : : (_work)->func = (_func); \
186 : : } while (0)
187 : :
188 : : #define PREPARE_DELAYED_WORK(_work, _func) \
189 : : PREPARE_WORK(&(_work)->work, (_func))
190 : :
191 : : #ifdef CONFIG_DEBUG_OBJECTS_WORK
192 : : extern void __init_work(struct work_struct *work, int onstack);
193 : : extern void destroy_work_on_stack(struct work_struct *work);
194 : : static inline unsigned int work_static(struct work_struct *work)
195 : : {
196 : : return *work_data_bits(work) & WORK_STRUCT_STATIC;
197 : : }
198 : : #else
199 : : static inline void __init_work(struct work_struct *work, int onstack) { }
200 : : static inline void destroy_work_on_stack(struct work_struct *work) { }
201 : : static inline unsigned int work_static(struct work_struct *work) { return 0; }
202 : : #endif
203 : :
204 : : /*
205 : : * initialize all of a work item in one go
206 : : *
207 : : * NOTE! No point in using "atomic_long_set()": using a direct
208 : : * assignment of the work data initializer allows the compiler
209 : : * to generate better code.
210 : : */
211 : : #ifdef CONFIG_LOCKDEP
212 : : #define __INIT_WORK(_work, _func, _onstack) \
213 : : do { \
214 : : static struct lock_class_key __key; \
215 : : \
216 : : __init_work((_work), _onstack); \
217 : : (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
218 : : lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
219 : : INIT_LIST_HEAD(&(_work)->entry); \
220 : : PREPARE_WORK((_work), (_func)); \
221 : : } while (0)
222 : : #else
223 : : #define __INIT_WORK(_work, _func, _onstack) \
224 : : do { \
225 : : __init_work((_work), _onstack); \
226 : : (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
227 : : INIT_LIST_HEAD(&(_work)->entry); \
228 : : PREPARE_WORK((_work), (_func)); \
229 : : } while (0)
230 : : #endif
231 : :
232 : : #define INIT_WORK(_work, _func) \
233 : : do { \
234 : : __INIT_WORK((_work), (_func), 0); \
235 : : } while (0)
236 : :
237 : : #define INIT_WORK_ONSTACK(_work, _func) \
238 : : do { \
239 : : __INIT_WORK((_work), (_func), 1); \
240 : : } while (0)
241 : :
242 : : #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
243 : : do { \
244 : : INIT_WORK(&(_work)->work, (_func)); \
245 : : __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
246 : : (unsigned long)(_work), \
247 : : (_tflags) | TIMER_IRQSAFE); \
248 : : } while (0)
249 : :
250 : : #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
251 : : do { \
252 : : INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
253 : : __setup_timer_on_stack(&(_work)->timer, \
254 : : delayed_work_timer_fn, \
255 : : (unsigned long)(_work), \
256 : : (_tflags) | TIMER_IRQSAFE); \
257 : : } while (0)
258 : :
259 : : #define INIT_DELAYED_WORK(_work, _func) \
260 : : __INIT_DELAYED_WORK(_work, _func, 0)
261 : :
262 : : #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
263 : : __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
264 : :
265 : : #define INIT_DEFERRABLE_WORK(_work, _func) \
266 : : __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
267 : :
268 : : #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
269 : : __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
270 : :
271 : : /**
272 : : * work_pending - Find out whether a work item is currently pending
273 : : * @work: The work item in question
274 : : */
275 : : #define work_pending(work) \
276 : : test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
277 : :
278 : : /**
279 : : * delayed_work_pending - Find out whether a delayable work item is currently
280 : : * pending
281 : : * @work: The work item in question
282 : : */
283 : : #define delayed_work_pending(w) \
284 : : work_pending(&(w)->work)
285 : :
286 : : /**
287 : : * work_clear_pending - for internal use only, mark a work item as not pending
288 : : * @work: The work item in question
289 : : */
290 : : #define work_clear_pending(work) \
291 : : clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
292 : :
293 : : /*
294 : : * Workqueue flags and constants. For details, please refer to
295 : : * Documentation/workqueue.txt.
296 : : */
297 : : enum {
298 : : /*
299 : : * All wqs are now non-reentrant making the following flag
300 : : * meaningless. Will be removed.
301 : : */
302 : : WQ_NON_REENTRANT = 1 << 0, /* DEPRECATED */
303 : :
304 : : WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
305 : : WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
306 : : WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
307 : : WQ_HIGHPRI = 1 << 4, /* high priority */
308 : : WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
309 : : WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
310 : :
311 : : /*
312 : : * Per-cpu workqueues are generally preferred because they tend to
313 : : * show better performance thanks to cache locality. Per-cpu
314 : : * workqueues exclude the scheduler from choosing the CPU to
315 : : * execute the worker threads, which has an unfortunate side effect
316 : : * of increasing power consumption.
317 : : *
318 : : * The scheduler considers a CPU idle if it doesn't have any task
319 : : * to execute and tries to keep idle cores idle to conserve power;
320 : : * however, for example, a per-cpu work item scheduled from an
321 : : * interrupt handler on an idle CPU will force the scheduler to
322 : : * excute the work item on that CPU breaking the idleness, which in
323 : : * turn may lead to more scheduling choices which are sub-optimal
324 : : * in terms of power consumption.
325 : : *
326 : : * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
327 : : * but become unbound if workqueue.power_efficient kernel param is
328 : : * specified. Per-cpu workqueues which are identified to
329 : : * contribute significantly to power-consumption are identified and
330 : : * marked with this flag and enabling the power_efficient mode
331 : : * leads to noticeable power saving at the cost of small
332 : : * performance disadvantage.
333 : : *
334 : : * http://thread.gmane.org/gmane.linux.kernel/1480396
335 : : */
336 : : WQ_POWER_EFFICIENT = 1 << 7,
337 : :
338 : : __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
339 : : __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
340 : :
341 : : WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
342 : : WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
343 : : WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
344 : : };
345 : :
346 : : /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
347 : : #define WQ_UNBOUND_MAX_ACTIVE \
348 : : max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
349 : :
350 : : /*
351 : : * System-wide workqueues which are always present.
352 : : *
353 : : * system_wq is the one used by schedule[_delayed]_work[_on]().
354 : : * Multi-CPU multi-threaded. There are users which expect relatively
355 : : * short queue flush time. Don't queue works which can run for too
356 : : * long.
357 : : *
358 : : * system_long_wq is similar to system_wq but may host long running
359 : : * works. Queue flushing might take relatively long.
360 : : *
361 : : * system_unbound_wq is unbound workqueue. Workers are not bound to
362 : : * any specific CPU, not concurrency managed, and all queued works are
363 : : * executed immediately as long as max_active limit is not reached and
364 : : * resources are available.
365 : : *
366 : : * system_freezable_wq is equivalent to system_wq except that it's
367 : : * freezable.
368 : : *
369 : : * *_power_efficient_wq are inclined towards saving power and converted
370 : : * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
371 : : * they are same as their non-power-efficient counterparts - e.g.
372 : : * system_power_efficient_wq is identical to system_wq if
373 : : * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
374 : : */
375 : : extern struct workqueue_struct *system_wq;
376 : : extern struct workqueue_struct *system_long_wq;
377 : : extern struct workqueue_struct *system_unbound_wq;
378 : : extern struct workqueue_struct *system_freezable_wq;
379 : : extern struct workqueue_struct *system_power_efficient_wq;
380 : : extern struct workqueue_struct *system_freezable_power_efficient_wq;
381 : :
382 : : static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
383 : : {
384 : : return system_wq;
385 : : }
386 : :
387 : : static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
388 : : {
389 : : return system_freezable_wq;
390 : : }
391 : :
392 : : /* equivlalent to system_wq and system_freezable_wq, deprecated */
393 : : #define system_nrt_wq __system_nrt_wq()
394 : : #define system_nrt_freezable_wq __system_nrt_freezable_wq()
395 : :
396 : : extern struct workqueue_struct *
397 : : __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
398 : : struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
399 : :
400 : : /**
401 : : * alloc_workqueue - allocate a workqueue
402 : : * @fmt: printf format for the name of the workqueue
403 : : * @flags: WQ_* flags
404 : : * @max_active: max in-flight work items, 0 for default
405 : : * @args: args for @fmt
406 : : *
407 : : * Allocate a workqueue with the specified parameters. For detailed
408 : : * information on WQ_* flags, please refer to Documentation/workqueue.txt.
409 : : *
410 : : * The __lock_name macro dance is to guarantee that single lock_class_key
411 : : * doesn't end up with different namesm, which isn't allowed by lockdep.
412 : : *
413 : : * RETURNS:
414 : : * Pointer to the allocated workqueue on success, %NULL on failure.
415 : : */
416 : : #ifdef CONFIG_LOCKDEP
417 : : #define alloc_workqueue(fmt, flags, max_active, args...) \
418 : : ({ \
419 : : static struct lock_class_key __key; \
420 : : const char *__lock_name; \
421 : : \
422 : : __lock_name = #fmt#args; \
423 : : \
424 : : __alloc_workqueue_key((fmt), (flags), (max_active), \
425 : : &__key, __lock_name, ##args); \
426 : : })
427 : : #else
428 : : #define alloc_workqueue(fmt, flags, max_active, args...) \
429 : : __alloc_workqueue_key((fmt), (flags), (max_active), \
430 : : NULL, NULL, ##args)
431 : : #endif
432 : :
433 : : /**
434 : : * alloc_ordered_workqueue - allocate an ordered workqueue
435 : : * @fmt: printf format for the name of the workqueue
436 : : * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
437 : : * @args: args for @fmt
438 : : *
439 : : * Allocate an ordered workqueue. An ordered workqueue executes at
440 : : * most one work item at any given time in the queued order. They are
441 : : * implemented as unbound workqueues with @max_active of one.
442 : : *
443 : : * RETURNS:
444 : : * Pointer to the allocated workqueue on success, %NULL on failure.
445 : : */
446 : : #define alloc_ordered_workqueue(fmt, flags, args...) \
447 : : alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
448 : :
449 : : #define create_workqueue(name) \
450 : : alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
451 : : #define create_freezable_workqueue(name) \
452 : : alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
453 : : 1, (name))
454 : : #define create_singlethread_workqueue(name) \
455 : : alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
456 : :
457 : : extern void destroy_workqueue(struct workqueue_struct *wq);
458 : :
459 : : struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
460 : : void free_workqueue_attrs(struct workqueue_attrs *attrs);
461 : : int apply_workqueue_attrs(struct workqueue_struct *wq,
462 : : const struct workqueue_attrs *attrs);
463 : :
464 : : extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
465 : : struct work_struct *work);
466 : : extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
467 : : struct delayed_work *work, unsigned long delay);
468 : : extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
469 : : struct delayed_work *dwork, unsigned long delay);
470 : :
471 : : extern void flush_workqueue(struct workqueue_struct *wq);
472 : : extern void drain_workqueue(struct workqueue_struct *wq);
473 : : extern void flush_scheduled_work(void);
474 : :
475 : : extern int schedule_on_each_cpu(work_func_t func);
476 : :
477 : : int execute_in_process_context(work_func_t fn, struct execute_work *);
478 : :
479 : : extern bool flush_work(struct work_struct *work);
480 : : extern bool cancel_work_sync(struct work_struct *work);
481 : :
482 : : extern bool flush_delayed_work(struct delayed_work *dwork);
483 : : extern bool cancel_delayed_work(struct delayed_work *dwork);
484 : : extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
485 : :
486 : : extern void workqueue_set_max_active(struct workqueue_struct *wq,
487 : : int max_active);
488 : : extern bool current_is_workqueue_rescuer(void);
489 : : extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
490 : : extern unsigned int work_busy(struct work_struct *work);
491 : : extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
492 : : extern void print_worker_info(const char *log_lvl, struct task_struct *task);
493 : :
494 : : /**
495 : : * queue_work - queue work on a workqueue
496 : : * @wq: workqueue to use
497 : : * @work: work to queue
498 : : *
499 : : * Returns %false if @work was already on a queue, %true otherwise.
500 : : *
501 : : * We queue the work to the CPU on which it was submitted, but if the CPU dies
502 : : * it can be processed by another CPU.
503 : : */
504 : : static inline bool queue_work(struct workqueue_struct *wq,
505 : : struct work_struct *work)
506 : : {
507 : 375254 : return queue_work_on(WORK_CPU_UNBOUND, wq, work);
508 : : }
509 : :
510 : : /**
511 : : * queue_delayed_work - queue work on a workqueue after delay
512 : : * @wq: workqueue to use
513 : : * @dwork: delayable work to queue
514 : : * @delay: number of jiffies to wait before queueing
515 : : *
516 : : * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
517 : : */
518 : : static inline bool queue_delayed_work(struct workqueue_struct *wq,
519 : : struct delayed_work *dwork,
520 : : unsigned long delay)
521 : : {
522 : 136656 : return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
523 : : }
524 : :
525 : : /**
526 : : * mod_delayed_work - modify delay of or queue a delayed work
527 : : * @wq: workqueue to use
528 : : * @dwork: work to queue
529 : : * @delay: number of jiffies to wait before queueing
530 : : *
531 : : * mod_delayed_work_on() on local CPU.
532 : : */
533 : : static inline bool mod_delayed_work(struct workqueue_struct *wq,
534 : : struct delayed_work *dwork,
535 : : unsigned long delay)
536 : : {
537 : 9866 : return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
538 : : }
539 : :
540 : : /**
541 : : * schedule_work_on - put work task on a specific cpu
542 : : * @cpu: cpu to put the work task on
543 : : * @work: job to be done
544 : : *
545 : : * This puts a job on a specific cpu
546 : : */
547 : : static inline bool schedule_work_on(int cpu, struct work_struct *work)
548 : : {
549 : 65 : return queue_work_on(cpu, system_wq, work);
550 : : }
551 : :
552 : : /**
553 : : * schedule_work - put work task in global workqueue
554 : : * @work: job to be done
555 : : *
556 : : * Returns %false if @work was already on the kernel-global workqueue and
557 : : * %true otherwise.
558 : : *
559 : : * This puts a job in the kernel-global workqueue if it was not already
560 : : * queued and leaves it in the same position on the kernel-global
561 : : * workqueue otherwise.
562 : : */
563 : : static inline bool schedule_work(struct work_struct *work)
564 : : {
565 : 149654 : return queue_work(system_wq, work);
566 : : }
567 : :
568 : : /**
569 : : * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
570 : : * @cpu: cpu to use
571 : : * @dwork: job to be done
572 : : * @delay: number of jiffies to wait
573 : : *
574 : : * After waiting for a given time this puts a job in the kernel-global
575 : : * workqueue on the specified CPU.
576 : : */
577 : : static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
578 : : unsigned long delay)
579 : : {
580 : 81 : return queue_delayed_work_on(cpu, system_wq, dwork, delay);
581 : : }
582 : :
583 : : /**
584 : : * schedule_delayed_work - put work task in global workqueue after delay
585 : : * @dwork: job to be done
586 : : * @delay: number of jiffies to wait or 0 for immediate execution
587 : : *
588 : : * After waiting for a given time this puts a job in the kernel-global
589 : : * workqueue.
590 : : */
591 : : static inline bool schedule_delayed_work(struct delayed_work *dwork,
592 : : unsigned long delay)
593 : : {
594 : 49510 : return queue_delayed_work(system_wq, dwork, delay);
595 : : }
596 : :
597 : : /**
598 : : * keventd_up - is workqueue initialized yet?
599 : : */
600 : : static inline bool keventd_up(void)
601 : : {
602 : 82 : return system_wq != NULL;
603 : : }
604 : :
605 : : /*
606 : : * Like above, but uses del_timer() instead of del_timer_sync(). This means,
607 : : * if it returns 0 the timer function may be running and the queueing is in
608 : : * progress.
609 : : */
610 : : static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
611 : : {
612 : : bool ret;
613 : :
614 : : ret = del_timer(&work->timer);
615 : : if (ret)
616 : : work_clear_pending(&work->work);
617 : : return ret;
618 : : }
619 : :
620 : : /* used to be different but now identical to flush_work(), deprecated */
621 : : static inline bool __deprecated flush_work_sync(struct work_struct *work)
622 : : {
623 : : return flush_work(work);
624 : : }
625 : :
626 : : /* used to be different but now identical to flush_delayed_work(), deprecated */
627 : : static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
628 : : {
629 : : return flush_delayed_work(dwork);
630 : : }
631 : :
632 : : #ifndef CONFIG_SMP
633 : : static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
634 : : {
635 : : return fn(arg);
636 : : }
637 : : #else
638 : : long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
639 : : #endif /* CONFIG_SMP */
640 : :
641 : : #ifdef CONFIG_FREEZER
642 : : extern void freeze_workqueues_begin(void);
643 : : extern bool freeze_workqueues_busy(void);
644 : : extern void thaw_workqueues(void);
645 : : #endif /* CONFIG_FREEZER */
646 : :
647 : : #ifdef CONFIG_SYSFS
648 : : int workqueue_sysfs_register(struct workqueue_struct *wq);
649 : : #else /* CONFIG_SYSFS */
650 : : static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
651 : : { return 0; }
652 : : #endif /* CONFIG_SYSFS */
653 : :
654 : : #endif
|