Lines Matching full:work
3 * workqueue.h --- work queue handling for Linux.
21 typedef void (*work_func_t)(struct work_struct *work);
25 * The first word is the work queue pointer and the flags rolled into
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
73 * When a work item is off queue, its high bits point to the last
112 struct work_struct work; member
115 /* target workqueue and CPU ->timer uses to queue ->work */
121 struct work_struct work; member
124 /* target workqueue ->rcu uses to queue ->work */
153 * Work items in this workqueue are affine to these CPUs and not allowed
191 * CPU pods are used to improve execution locality of unbound work
201 * @ordered: work items must be executed one by one in queueing order
206 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument
208 return container_of(work, struct delayed_work, work); in to_delayed_work()
211 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument
213 return container_of(work, struct rcu_work, work); in to_rcu_work()
217 struct work_struct work; member
240 .work = __WORK_INITIALIZER((n).work, (f)), \
255 extern void __init_work(struct work_struct *work, int onstack);
256 extern void destroy_work_on_stack(struct work_struct *work);
257 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
258 static inline unsigned int work_static(struct work_struct *work) in work_static() argument
260 return *work_data_bits(work) & WORK_STRUCT_STATIC; in work_static()
263 static inline void __init_work(struct work_struct *work, int onstack) { } in __init_work() argument
264 static inline void destroy_work_on_stack(struct work_struct *work) { } in destroy_work_on_stack() argument
265 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } in destroy_delayed_work_on_stack() argument
266 static inline unsigned int work_static(struct work_struct *work) { return 0; } in work_static() argument
270 * initialize all of a work item in one go
273 * assignment of the work data initializer allows the compiler
313 INIT_WORK(&(_work)->work, (_func)); \
321 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
340 INIT_WORK(&(_work)->work, (_func))
343 INIT_WORK_ONSTACK(&(_work)->work, (_func))
346 * work_pending - Find out whether a work item is currently pending
347 * @work: The work item in question
349 #define work_pending(work) \ argument
350 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
353 * delayed_work_pending - Find out whether a delayable work item is currently
355 * @w: The work item in question
358 work_pending(&(w)->work)
381 * however, for example, a per-cpu work item scheduled from an
383 * execute the work item on that CPU breaking the idleness, which in
418 * system_highpri_wq is similar to system_wq but for work items which
450 * @max_active: max in-flight work items per CPU, 0 for default
470 * most one work item at any given time in the queued order. They are
497 struct work_struct *work);
499 struct work_struct *work);
501 struct delayed_work *work, unsigned long delay);
513 extern bool flush_work(struct work_struct *work);
514 extern bool cancel_work(struct work_struct *work);
515 extern bool cancel_work_sync(struct work_struct *work);
528 extern unsigned int work_busy(struct work_struct *work);
537 * queue_work - queue work on a workqueue
539 * @work: work to queue
541 * Returns %false if @work was already on a queue, %true otherwise.
543 * We queue the work to the CPU on which it was submitted, but if the CPU dies
548 * the CPU which will execute @work by the time such work executes, e.g.,
554 * WRITE_ONCE(x, 1); [ @work is being executed ]
555 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
560 struct work_struct *work) in queue_work() argument
562 return queue_work_on(WORK_CPU_UNBOUND, wq, work); in queue_work()
566 * queue_delayed_work - queue work on a workqueue after delay
568 * @dwork: delayable work to queue
581 * mod_delayed_work - modify delay of or queue a delayed work
583 * @dwork: work to queue
596 * schedule_work_on - put work task on a specific cpu
597 * @cpu: cpu to put the work task on
598 * @work: job to be done
602 static inline bool schedule_work_on(int cpu, struct work_struct *work) in schedule_work_on() argument
604 return queue_work_on(cpu, system_wq, work); in schedule_work_on()
608 * schedule_work - put work task in global workqueue
609 * @work: job to be done
611 * Returns %false if @work was already on the kernel-global workqueue and
621 static inline bool schedule_work(struct work_struct *work) in schedule_work() argument
623 return queue_work(system_wq, work); in schedule_work()
666 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
681 * schedule_delayed_work - put work task in global workqueue after delay
707 * A new key is defined for each caller to make sure the work
721 * A new key is defined for each caller to make sure the work