Home
last modified time | relevance | path

Searched refs:work (Results 1 – 25 of 43) sorted by relevance

12

/kernel/
Dtask_work.c27 task_work_add(struct task_struct *task, struct callback_head *work, bool notify) in task_work_add() argument
35 work->next = head; in task_work_add()
36 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
58 struct callback_head *work; in task_work_cancel() local
67 while ((work = ACCESS_ONCE(*pprev))) { in task_work_cancel()
69 if (work->func != func) in task_work_cancel()
70 pprev = &work->next; in task_work_cancel()
71 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel()
76 return work; in task_work_cancel()
90 struct callback_head *work, *head, *next; in task_work_run() local
[all …]
Dirq_work.c29 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
37 flags = work->flags & ~IRQ_WORK_PENDING; in irq_work_claim()
40 oflags = cmpxchg(&work->flags, flags, nflags); in irq_work_claim()
66 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument
75 if (!irq_work_claim(work)) in irq_work_queue_on()
78 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) in irq_work_queue_on()
87 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument
90 if (!irq_work_claim(work)) in irq_work_queue()
97 if (work->flags & IRQ_WORK_LAZY) { in irq_work_queue()
98 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && in irq_work_queue()
[all …]
Dkthread.c581 struct kthread_work *work; in kthread_worker_fn() local
596 work = NULL; in kthread_worker_fn()
599 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
601 list_del_init(&work->node); in kthread_worker_fn()
603 worker->current_work = work; in kthread_worker_fn()
606 if (work) { in kthread_worker_fn()
608 work->func(work); in kthread_worker_fn()
623 struct kthread_work *work) in queuing_blocked() argument
627 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
632 struct kthread_work *work, in insert_kthread_work() argument
[all …]
Dworkqueue.c426 struct work_struct *work = addr; in work_fixup_init() local
430 cancel_work_sync(work); in work_fixup_init()
431 debug_object_init(work, &work_debug_descr); in work_fixup_init()
445 struct work_struct *work = addr; in work_fixup_activate() local
455 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { in work_fixup_activate()
456 debug_object_init(work, &work_debug_descr); in work_fixup_activate()
457 debug_object_activate(work, &work_debug_descr); in work_fixup_activate()
477 struct work_struct *work = addr; in work_fixup_free() local
481 cancel_work_sync(work); in work_fixup_free()
482 debug_object_free(work, &work_debug_descr); in work_fixup_free()
[all …]
Dstop_machine.c77 struct cpu_stop_work *work) in __cpu_stop_queue_work() argument
79 list_add_tail(&work->list, &stopper->works); in __cpu_stop_queue_work()
84 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument
91 __cpu_stop_queue_work(stopper, work); in cpu_stop_queue_work()
93 cpu_stop_signal_done(work->done, false); in cpu_stop_queue_work()
124 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; in stop_one_cpu() local
127 cpu_stop_queue_work(cpu, &work); in stop_one_cpu()
320 struct cpu_stop_work *work; in queue_stop_cpus_work() local
330 work = &per_cpu(cpu_stopper.stop_work, cpu); in queue_stop_cpus_work()
331 work->fn = fn; in queue_stop_cpus_work()
[all …]
Dasync.c74 struct work_struct work; member
113 static void async_run_entry_fn(struct work_struct *work) in async_run_entry_fn() argument
116 container_of(work, struct async_entry, work); in async_run_entry_fn()
177 INIT_WORK(&entry->work, async_run_entry_fn); in __async_schedule()
198 queue_work(system_unbound_wq, &entry->work); in __async_schedule()
Djump_label.c96 unsigned long rate_limit, struct delayed_work *work) in __static_key_slow_dec() argument
113 schedule_delayed_work(work, rate_limit); in __static_key_slow_dec()
120 static void jump_label_update_timeout(struct work_struct *work) in jump_label_update_timeout() argument
123 container_of(work, struct static_key_deferred, work.work); in jump_label_update_timeout()
137 __static_key_slow_dec(&key->key, key->timeout, &key->work); in static_key_slow_dec_deferred()
144 flush_delayed_work(&key->work); in static_key_deferred_flush()
153 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
Dacct.c89 struct work_struct work; member
172 schedule_work(&acct->work); in acct_pin_kill()
180 static void close_work(struct work_struct *work) in close_work() argument
182 struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); in close_work()
243 INIT_WORK(&acct->work, close_work); in acct_on()
Dkmod.c330 static void call_usermodehelper_exec_work(struct work_struct *work) in call_usermodehelper_exec_work() argument
333 container_of(work, struct subprocess_info, work); in call_usermodehelper_exec_work()
539 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); in call_usermodehelper_setup()
586 queue_work(system_unbound_wq, &sub_info->work); in call_usermodehelper_exec()
Dpadata.c72 struct padata_parallel_queue, work); in padata_parallel_worker()
142 queue_work_on(target_cpu, pinst->wq, &queue->work); in padata_do_parallel()
251 queue_work_on(cb_cpu, pinst->wq, &squeue->work); in padata_reorder()
271 static void invoke_padata_reorder(struct work_struct *work) in invoke_padata_reorder() argument
276 pd = container_of(work, struct parallel_data, reorder_work); in invoke_padata_reorder()
289 squeue = container_of(serial_work, struct padata_serial_queue, work); in padata_serial_worker()
378 INIT_WORK(&squeue->work, padata_serial_worker); in padata_init_squeues()
402 INIT_WORK(&pqueue->work, padata_parallel_worker); in padata_init_pqueues()
Dreboot.c450 static void poweroff_work_func(struct work_struct *work) in poweroff_work_func() argument
472 static void reboot_work_func(struct work_struct *work) in reboot_work_func() argument
Dcpuset.c295 static void cpuset_hotplug_workfn(struct work_struct *work);
998 struct work_struct work; member
1004 static void cpuset_migrate_mm_workfn(struct work_struct *work) in cpuset_migrate_mm_workfn() argument
1007 container_of(work, struct cpuset_migrate_mm_work, work); in cpuset_migrate_mm_workfn()
1025 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
1026 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
2320 static void cpuset_hotplug_workfn(struct work_struct *work) in cpuset_hotplug_workfn() argument
Dpid_namespace.c73 static void proc_cleanup_work(struct work_struct *work) in proc_cleanup_work() argument
75 struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); in proc_cleanup_work()
DKconfig.hz40 NTSC frame rates for video and multimedia work.
/kernel/rcu/
Dsrcu.c108 INIT_DELAYED_WORK(&sp->work, process_srcu); in init_srcu_struct_fields()
398 queue_delayed_work(system_power_efficient_wq, &sp->work, 0); in call_srcu()
658 &sp->work, SRCU_INTERVAL); in srcu_reschedule()
664 void process_srcu(struct work_struct *work) in process_srcu() argument
668 sp = container_of(work, struct srcu_struct, work.work); in process_srcu()
/kernel/power/
Dqos.c424 static void pm_qos_work_fn(struct work_struct *work) in pm_qos_work_fn() argument
426 struct pm_qos_request *req = container_of(to_delayed_work(work), in pm_qos_work_fn()
428 work); in pm_qos_work_fn()
457 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); in pm_qos_add_request()
485 cancel_delayed_work_sync(&req->work); in pm_qos_update_request()
507 cancel_delayed_work_sync(&req->work); in pm_qos_update_request_timeout()
516 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); in pm_qos_update_request_timeout()
538 cancel_delayed_work_sync(&req->work); in pm_qos_remove_request()
Dwakelock.c83 static void __wakelocks_gc(struct work_struct *work);
98 static void __wakelocks_gc(struct work_struct *work) in __wakelocks_gc() argument
Dautosleep.c26 static void try_to_suspend(struct work_struct *work) in try_to_suspend() argument
/kernel/events/
Dring_buffer.c250 static void rb_irq_work(struct irq_work *work);
621 static void rb_irq_work(struct irq_work *work) in rb_irq_work() argument
623 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work); in rb_irq_work()
749 static void rb_free_work(struct work_struct *work) in rb_free_work() argument
755 rb = container_of(work, struct ring_buffer, work); in rb_free_work()
769 schedule_work(&rb->work); in rb_free()
785 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
Dinternal.h16 struct work_struct work; member
/kernel/sched/
Dcpufreq_schedutil.c54 struct kthread_work work; member
405 static void sugov_work(struct kthread_work *work) in sugov_work() argument
407 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); in sugov_work()
436 queue_kthread_work(&sg_policy->worker, &sg_policy->work); in sugov_irq_work()
562 init_kthread_work(&sg_policy->work, sugov_work); in sugov_kthread_create()
775 kthread_cancel_work_sync(&sg_policy->work); in sugov_stop()
/kernel/trace/
Dring_buffer.c28 static void update_pages_handler(struct work_struct *work);
398 struct irq_work work; member
511 static void rb_wake_up_waiters(struct irq_work *work) in rb_wake_up_waiters() argument
513 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); in rb_wake_up_waiters()
536 struct rb_irq_work *work; in ring_buffer_wait() local
545 work = &buffer->irq_work; in ring_buffer_wait()
552 work = &cpu_buffer->irq_work; in ring_buffer_wait()
558 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
560 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
583 work->full_waiters_pending = true; in ring_buffer_wait()
[all …]
/kernel/bpf/
Dsyscall.c76 static void bpf_map_free_deferred(struct work_struct *work) in bpf_map_free_deferred() argument
78 struct bpf_map *map = container_of(work, struct bpf_map, work); in bpf_map_free_deferred()
99 INIT_WORK(&map->work, bpf_map_free_deferred); in bpf_map_put()
100 schedule_work(&map->work); in bpf_map_put()
Dcore.c810 static void bpf_prog_free_deferred(struct work_struct *work) in bpf_prog_free_deferred() argument
814 aux = container_of(work, struct bpf_prog_aux, work); in bpf_prog_free_deferred()
823 INIT_WORK(&aux->work, bpf_prog_free_deferred); in bpf_prog_free()
824 schedule_work(&aux->work); in bpf_prog_free()
/kernel/irq/
Dmanage.c223 if (!schedule_work(&desc->affinity_notify->work)) { in irq_set_affinity_locked()
265 static void irq_affinity_notify(struct work_struct *work) in irq_affinity_notify() argument
268 container_of(work, struct irq_affinity_notify, work); in irq_affinity_notify()
318 INIT_WORK(&notify->work, irq_affinity_notify); in irq_set_affinity_notifier()
327 if (cancel_work_sync(&old_notify->work)) { in irq_set_affinity_notifier()

12