Home
last modified time | relevance | path

Searched refs:work (Results 1 – 25 of 66) sorted by relevance

123

/kernel/
Dirq_work.c30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim()
53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument
56 if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local()
57 if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local()
61 if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) in __irq_work_queue_local()
67 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument
70 if (!irq_work_claim(work)) in irq_work_queue()
75 __irq_work_queue_local(work); in irq_work_queue()
88 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument
[all …]
Dkthread.c728 struct kthread_work *work; in kthread_worker_fn() local
751 work = NULL; in kthread_worker_fn()
754 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
756 list_del_init(&work->node); in kthread_worker_fn()
758 worker->current_work = work; in kthread_worker_fn()
761 if (work) { in kthread_worker_fn()
762 kthread_work_func_t func = work->func; in kthread_worker_fn()
764 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
765 work->func(work); in kthread_worker_fn()
770 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn()
[all …]
Dtask_work.c32 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument
38 kasan_record_aux_stack(work); in task_work_add()
44 work->next = head; in task_work_add()
45 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
78 struct callback_head *work; in task_work_cancel_match() local
90 while ((work = READ_ONCE(*pprev))) { in task_work_cancel_match()
91 if (!match(work, data)) in task_work_cancel_match()
92 pprev = &work->next; in task_work_cancel_match()
93 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match()
98 return work; in task_work_cancel_match()
[all …]
Dworkqueue.c465 struct work_struct *work = addr; in work_is_static_object() local
467 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
476 struct work_struct *work = addr; in work_fixup_init() local
480 cancel_work_sync(work); in work_fixup_init()
481 debug_object_init(work, &work_debug_descr); in work_fixup_init()
494 struct work_struct *work = addr; in work_fixup_free() local
498 cancel_work_sync(work); in work_fixup_free()
499 debug_object_free(work, &work_debug_descr); in work_fixup_free()
514 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
516 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
[all …]
Dstop_machine.c85 struct cpu_stop_work *work, in __cpu_stop_queue_work() argument
88 list_add_tail(&work->list, &stopper->works); in __cpu_stop_queue_work()
93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument
104 __cpu_stop_queue_work(stopper, work, &wakeq); in cpu_stop_queue_work()
105 else if (work->done) in cpu_stop_queue_work()
106 cpu_stop_signal_done(work->done); in cpu_stop_queue_work()
142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; in stop_one_cpu() local
145 if (!cpu_stop_queue_work(cpu, &work)) in stop_one_cpu()
397 struct cpu_stop_work *work; in queue_stop_cpus_work() local
410 work = &per_cpu(cpu_stopper.stop_work, cpu); in queue_stop_cpus_work()
[all …]
Dasync.c70 struct work_struct work; member
115 static void async_run_entry_fn(struct work_struct *work) in async_run_entry_fn() argument
118 container_of(work, struct async_entry, work); in async_run_entry_fn()
158 INIT_WORK(&entry->work, async_run_entry_fn); in __async_schedule_node_domain()
176 queue_work_node(node, system_unbound_wq, &entry->work); in __async_schedule_node_domain()
Djump_label.c259 void jump_label_update_timeout(struct work_struct *work) in jump_label_update_timeout() argument
262 container_of(work, struct static_key_deferred, work.work); in jump_label_update_timeout()
281 struct delayed_work *work, in __static_key_slow_dec_deferred() argument
289 schedule_delayed_work(work, timeout); in __static_key_slow_dec_deferred()
293 void __static_key_deferred_flush(void *key, struct delayed_work *work) in __static_key_deferred_flush() argument
296 flush_delayed_work(work); in __static_key_deferred_flush()
305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
Dumh.c160 static void call_usermodehelper_exec_work(struct work_struct *work) in call_usermodehelper_exec_work() argument
163 container_of(work, struct subprocess_info, work); in call_usermodehelper_exec_work()
369 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); in call_usermodehelper_setup()
435 queue_work(system_unbound_wq, &sub_info->work); in call_usermodehelper_exec()
Dacct.c92 struct work_struct work; member
175 schedule_work(&acct->work); in acct_pin_kill()
183 static void close_work(struct work_struct *work) in close_work() argument
185 struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); in close_work()
246 INIT_WORK(&acct->work, close_work); in acct_on()
Dsmp.c1198 struct work_struct work; member
1206 static void smp_call_on_cpu_callback(struct work_struct *work) in smp_call_on_cpu_callback() argument
1210 sscs = container_of(work, struct smp_call_on_cpu_struct, work); in smp_call_on_cpu_callback()
1229 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); in smp_call_on_cpu()
1234 queue_work_on(cpu, system_wq, &sscs.work); in smp_call_on_cpu()
Dpadata.c48 static void __init padata_mt_helper(struct work_struct *work);
315 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); in padata_reorder()
335 static void invoke_padata_reorder(struct work_struct *work) in invoke_padata_reorder() argument
340 pd = container_of(work, struct parallel_data, reorder_work); in invoke_padata_reorder()
353 squeue = container_of(serial_work, struct padata_serial_queue, work); in padata_serial_worker()
541 INIT_WORK(&squeue->work, padata_serial_worker); in padata_init_squeues()
Dreboot.c484 static void poweroff_work_func(struct work_struct *work) in poweroff_work_func() argument
506 static void reboot_work_func(struct work_struct *work) in reboot_work_func() argument
532 static void hw_failure_emergency_poweroff_func(struct work_struct *work) in hw_failure_emergency_poweroff_func() argument
/kernel/locking/
Dtest-ww_mutex.c20 struct work_struct work; member
31 static void test_mutex_work(struct work_struct *work) in test_mutex_work() argument
33 struct test_mutex *mtx = container_of(work, typeof(*mtx), work); in test_mutex_work()
58 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); in __test_mutex()
64 schedule_work(&mtx.work); in __test_mutex()
92 flush_work(&mtx.work); in __test_mutex()
93 destroy_work_on_stack(&mtx.work); in __test_mutex()
148 struct work_struct work; member
157 static void test_abba_work(struct work_struct *work) in test_abba_work() argument
159 struct test_abba *abba = container_of(work, typeof(*abba), work); in test_abba_work()
[all …]
/kernel/entry/
Dcommon.c45 unsigned long work) in syscall_trace_enter() argument
54 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { in syscall_trace_enter()
60 if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) { in syscall_trace_enter()
62 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) in syscall_trace_enter()
67 if (work & SYSCALL_WORK_SECCOMP) { in syscall_trace_enter()
76 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) in syscall_trace_enter()
87 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in __syscall_enter_from_user_work() local
89 if (work & SYSCALL_WORK_ENTER) in __syscall_enter_from_user_work()
90 syscall = syscall_trace_enter(regs, syscall, work); in __syscall_enter_from_user_work()
224 static inline bool report_single_step(unsigned long work) in report_single_step() argument
[all …]
/kernel/rcu/
Dsrcutree.c45 static void srcu_invoke_callbacks(struct work_struct *work);
47 static void process_srcu(struct work_struct *work);
150 INIT_WORK(&sdp->work, srcu_invoke_callbacks); in init_srcu_struct_nodes()
172 INIT_DELAYED_WORK(&ssp->work, process_srcu); in init_srcu_struct_fields()
375 flush_delayed_work(&ssp->work); in cleanup_srcu_struct()
380 flush_work(&sdp->work); in cleanup_srcu_struct()
459 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_delay_timer()
466 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_queue_delayed_work_on()
682 queue_delayed_work(rcu_gp_wq, &ssp->work, in srcu_funnel_gp_start()
684 else if (list_empty(&ssp->work.work.entry)) in srcu_funnel_gp_start()
[all …]
/kernel/irq/
Dirq_sim.c14 struct irq_work work; member
81 irq_work_queue(&irq_ctx->work_ctx->work); in irq_sim_set_irqchip_state()
100 static void irq_sim_handle_irq(struct irq_work *work) in irq_sim_handle_irq() argument
106 work_ctx = container_of(work, struct irq_sim_work_ctx, work); in irq_sim_handle_irq()
184 init_irq_work(&work_ctx->work, irq_sim_handle_irq); in irq_domain_create_sim()
207 irq_work_sync(&work_ctx->work); in irq_domain_remove_sim()
/kernel/bpf/
Dstackmap.c42 struct stack_map_irq_work *work; in do_up_read() local
47 work = container_of(entry, struct stack_map_irq_work, irq_work); in do_up_read()
48 mmap_read_unlock_non_owner(work->mm); in do_up_read()
153 struct stack_map_irq_work *work = NULL; in stack_map_get_build_id_offset() local
157 work = this_cpu_ptr(&up_read_work); in stack_map_get_build_id_offset()
158 if (irq_work_is_busy(&work->irq_work)) { in stack_map_get_build_id_offset()
206 if (!work) { in stack_map_get_build_id_offset()
209 work->mm = current->mm; in stack_map_get_build_id_offset()
216 irq_work_queue(&work->irq_work); in stack_map_get_build_id_offset()
727 struct stack_map_irq_work *work; in stack_map_init() local
[all …]
Dringbuf.c35 struct irq_work work; member
122 static void bpf_ringbuf_notify(struct irq_work *work) in bpf_ringbuf_notify() argument
124 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify()
139 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc()
393 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
395 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
Dtrampoline.c200 static void __bpf_tramp_image_put_deferred(struct work_struct *work) in __bpf_tramp_image_put_deferred() argument
204 im = container_of(work, struct bpf_tramp_image, work); in __bpf_tramp_image_put_deferred()
218 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred); in __bpf_tramp_image_put_rcu()
219 schedule_work(&im->work); in __bpf_tramp_image_put_rcu()
/kernel/sched/
Dcpufreq_schedutil.c38 struct kthread_work work; member
477 static void sugov_work(struct kthread_work *work) in sugov_work() argument
479 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); in sugov_work()
509 kthread_queue_work(&sg_policy->worker, &sg_policy->work); in sugov_irq_work()
615 kthread_init_work(&sg_policy->work, sugov_work); in sugov_kthread_create()
818 kthread_cancel_work_sync(&sg_policy->work); in sugov_stop()
/kernel/trace/
Dbpf_trace.c776 struct send_signal_irq_work *work; in do_bpf_send_signal() local
778 work = container_of(entry, struct send_signal_irq_work, irq_work); in do_bpf_send_signal()
779 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); in do_bpf_send_signal()
780 put_task_struct(work->task); in do_bpf_send_signal()
785 struct send_signal_irq_work *work = NULL; in bpf_send_signal_common() local
809 work = this_cpu_ptr(&send_signal_work); in bpf_send_signal_common()
810 if (irq_work_is_busy(&work->irq_work)) in bpf_send_signal_common()
817 work->task = get_task_struct(current); in bpf_send_signal_common()
818 work->sig = sig; in bpf_send_signal_common()
819 work->type = type; in bpf_send_signal_common()
[all …]
Dring_buffer.c33 static void update_pages_handler(struct work_struct *work);
368 struct irq_work work; member
842 static void rb_wake_up_waiters(struct irq_work *work) in rb_wake_up_waiters() argument
844 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); in rb_wake_up_waiters()
894 irq_work_queue(&rbwork->work); in ring_buffer_wake_waiters()
911 struct rb_irq_work *work; in ring_buffer_wait() local
921 work = &buffer->irq_work; in ring_buffer_wait()
928 work = &cpu_buffer->irq_work; in ring_buffer_wait()
931 wait_index = READ_ONCE(work->wait_index); in ring_buffer_wait()
935 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
[all …]
/kernel/events/
Dring_buffer.c885 static void rb_free_work(struct work_struct *work) in rb_free_work() argument
891 rb = container_of(work, struct perf_buffer, work); in rb_free_work()
905 schedule_work(&rb->work); in rb_free()
923 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
/kernel/time/
Dposix-cpu-timers.c1158 static void posix_cpu_timers_work(struct callback_head *work) in posix_cpu_timers_work() argument
1160 struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work); in posix_cpu_timers_work()
1214 memset(&p->posix_cputimers_work.work, 0, in clear_posix_cputimers_work()
1215 sizeof(p->posix_cputimers_work.work)); in clear_posix_cputimers_work()
1216 init_task_work(&p->posix_cputimers_work.work, in clear_posix_cputimers_work()
1249 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME); in __run_posix_cpu_timers()
/kernel/power/
Dwakelock.c85 static void __wakelocks_gc(struct work_struct *work);
100 static void __wakelocks_gc(struct work_struct *work) in __wakelocks_gc() argument

123