/kernel/ |
D | irq_work.c | 55 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 59 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim() 78 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 85 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local() 97 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local() 106 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument 109 if (!irq_work_claim(work)) in irq_work_queue() 114 __irq_work_queue_local(work); in irq_work_queue() 127 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument 130 return irq_work_queue(work); in irq_work_queue_on() [all …]
|
D | task_work.c | 42 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 48 kasan_record_aux_stack(work); in task_work_add() 54 work->next = head; in task_work_add() 55 } while (!try_cmpxchg(&task->task_works, &head, work)); in task_work_add() 91 struct callback_head *work; in task_work_cancel_match() local 103 work = READ_ONCE(*pprev); in task_work_cancel_match() 104 while (work) { in task_work_cancel_match() 105 if (!match(work, data)) { in task_work_cancel_match() 106 pprev = &work->next; in task_work_cancel_match() 107 work = READ_ONCE(*pprev); in task_work_cancel_match() [all …]
|
D | kthread.c | 798 struct kthread_work *work; in kthread_worker_fn() local 821 work = NULL; in kthread_worker_fn() 824 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 826 list_del_init(&work->node); in kthread_worker_fn() 828 worker->current_work = work; in kthread_worker_fn() 831 if (work) { in kthread_worker_fn() 832 kthread_work_func_t func = work->func; in kthread_worker_fn() 834 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn() 835 work->func(work); in kthread_worker_fn() 840 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn() [all …]
|
D | workqueue.c | 466 struct work_struct *work = addr; in work_is_static_object() local 468 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object() 477 struct work_struct *work = addr; in work_fixup_init() local 481 cancel_work_sync(work); in work_fixup_init() 482 debug_object_init(work, &work_debug_descr); in work_fixup_init() 495 struct work_struct *work = addr; in work_fixup_free() local 499 cancel_work_sync(work); in work_fixup_free() 500 debug_object_free(work, &work_debug_descr); in work_fixup_free() 515 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument 517 debug_object_activate(work, &work_debug_descr); in debug_work_activate() [all …]
|
D | stop_machine.c | 85 struct cpu_stop_work *work, in __cpu_stop_queue_work() argument 88 list_add_tail(&work->list, &stopper->works); in __cpu_stop_queue_work() 93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) in cpu_stop_queue_work() argument 104 __cpu_stop_queue_work(stopper, work, &wakeq); in cpu_stop_queue_work() 105 else if (work->done) in cpu_stop_queue_work() 106 cpu_stop_signal_done(work->done); in cpu_stop_queue_work() 142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; in stop_one_cpu() local 145 if (!cpu_stop_queue_work(cpu, &work)) in stop_one_cpu() 397 struct cpu_stop_work *work; in queue_stop_cpus_work() local 410 work = &per_cpu(cpu_stopper.stop_work, cpu); in queue_stop_cpus_work() [all …]
|
D | async.c | 70 struct work_struct work; member 115 static void async_run_entry_fn(struct work_struct *work) in async_run_entry_fn() argument 118 container_of(work, struct async_entry, work); in async_run_entry_fn() 158 INIT_WORK(&entry->work, async_run_entry_fn); in __async_schedule_node_domain() 176 queue_work_node(node, system_unbound_wq, &entry->work); in __async_schedule_node_domain()
|
D | jump_label.c | 259 void jump_label_update_timeout(struct work_struct *work) in jump_label_update_timeout() argument 262 container_of(work, struct static_key_deferred, work.work); in jump_label_update_timeout() 281 struct delayed_work *work, in __static_key_slow_dec_deferred() argument 289 schedule_delayed_work(work, timeout); in __static_key_slow_dec_deferred() 293 void __static_key_deferred_flush(void *key, struct delayed_work *work) in __static_key_deferred_flush() argument 296 flush_delayed_work(work); in __static_key_deferred_flush() 305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
|
D | umh.c | 161 static void call_usermodehelper_exec_work(struct work_struct *work) in call_usermodehelper_exec_work() argument 164 container_of(work, struct subprocess_info, work); in call_usermodehelper_exec_work() 370 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); in call_usermodehelper_setup() 437 queue_work(system_unbound_wq, &sub_info->work); in call_usermodehelper_exec()
|
D | acct.c | 111 struct work_struct work; member 194 schedule_work(&acct->work); in acct_pin_kill() 202 static void close_work(struct work_struct *work) in close_work() argument 204 struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); in close_work() 265 INIT_WORK(&acct->work, close_work); in acct_on()
|
D | smp.c | 1227 struct work_struct work; member 1235 static void smp_call_on_cpu_callback(struct work_struct *work) in smp_call_on_cpu_callback() argument 1239 sscs = container_of(work, struct smp_call_on_cpu_struct, work); in smp_call_on_cpu_callback() 1258 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); in smp_call_on_cpu() 1263 queue_work_on(cpu, system_wq, &sscs.work); in smp_call_on_cpu()
|
D | padata.c | 48 static void __init padata_mt_helper(struct work_struct *work); 315 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); in padata_reorder() 335 static void invoke_padata_reorder(struct work_struct *work) in invoke_padata_reorder() argument 340 pd = container_of(work, struct parallel_data, reorder_work); in invoke_padata_reorder() 353 squeue = container_of(serial_work, struct padata_serial_queue, work); in padata_serial_worker() 541 INIT_WORK(&squeue->work, padata_serial_worker); in padata_init_squeues()
|
/kernel/entry/ |
D | common.c | 49 unsigned long work) in syscall_trace_enter() argument 58 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { in syscall_trace_enter() 64 if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) { in syscall_trace_enter() 66 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) in syscall_trace_enter() 71 if (work & SYSCALL_WORK_SECCOMP) { in syscall_trace_enter() 80 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) in syscall_trace_enter() 91 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in __syscall_enter_from_user_work() local 93 if (work & SYSCALL_WORK_ENTER) in __syscall_enter_from_user_work() 94 syscall = syscall_trace_enter(regs, syscall, work); in __syscall_enter_from_user_work() 220 static inline bool report_single_step(unsigned long work) in report_single_step() argument [all …]
|
/kernel/locking/ |
D | test-ww_mutex.c | 29 struct work_struct work; member 40 static void test_mutex_work(struct work_struct *work) in test_mutex_work() argument 42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work); in test_mutex_work() 67 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); in __test_mutex() 73 schedule_work(&mtx.work); in __test_mutex() 101 flush_work(&mtx.work); in __test_mutex() 102 destroy_work_on_stack(&mtx.work); in __test_mutex() 177 struct work_struct work; member 186 static void test_abba_work(struct work_struct *work) in test_abba_work() argument 188 struct test_abba *abba = container_of(work, typeof(*abba), work); in test_abba_work() [all …]
|
/kernel/bpf/ |
D | mmap_unlock_work.h | 26 struct mmap_unlock_irq_work *work = NULL; in bpf_mmap_unlock_get_irq_work() local 31 work = this_cpu_ptr(&mmap_unlock_work); in bpf_mmap_unlock_get_irq_work() 32 if (irq_work_is_busy(&work->irq_work)) { in bpf_mmap_unlock_get_irq_work() 45 *work_ptr = work; in bpf_mmap_unlock_get_irq_work() 49 static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm) in bpf_mmap_unlock_mm() argument 51 if (!work) { in bpf_mmap_unlock_mm() 54 work->mm = mm; in bpf_mmap_unlock_mm() 61 irq_work_queue(&work->irq_work); in bpf_mmap_unlock_mm()
|
D | ringbuf.c | 36 struct irq_work work; member 156 static void bpf_ringbuf_notify(struct irq_work *work) in bpf_ringbuf_notify() argument 158 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() 174 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc() 479 irq_work_queue(&rb->work); in bpf_ringbuf_commit() 481 irq_work_queue(&rb->work); in bpf_ringbuf_commit() 763 irq_work_queue(&rb->work); in BPF_CALL_4() 765 irq_work_queue(&rb->work); in BPF_CALL_4()
|
D | memalloc.c | 290 static void bpf_mem_refill(struct irq_work *work) in bpf_mem_refill() argument 292 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); in bpf_mem_refill() 453 static void free_mem_alloc_deferred(struct work_struct *work) in free_mem_alloc_deferred() argument 455 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); in free_mem_alloc_deferred() 485 INIT_WORK(©->work, free_mem_alloc_deferred); in destroy_mem_alloc() 486 queue_work(system_unbound_wq, ©->work); in destroy_mem_alloc()
|
D | task_iter.c | 783 struct mmap_unlock_irq_work *work = NULL; in BPF_CALL_5() local 799 irq_work_busy = bpf_mmap_unlock_get_irq_work(&work); in BPF_CALL_5() 811 bpf_mmap_unlock_mm(work, mm); in BPF_CALL_5() 830 struct mmap_unlock_irq_work *work; in do_mmap_read_unlock() local 835 work = container_of(entry, struct mmap_unlock_irq_work, irq_work); in do_mmap_read_unlock() 836 mmap_read_unlock_non_owner(work->mm); in do_mmap_read_unlock() 841 struct mmap_unlock_irq_work *work; in task_iter_init() local 845 work = per_cpu_ptr(&mmap_unlock_work, cpu); in task_iter_init() 846 init_irq_work(&work->irq_work, do_mmap_read_unlock); in task_iter_init()
|
/kernel/irq/ |
D | irq_sim.c | 14 struct irq_work work; member 81 irq_work_queue(&irq_ctx->work_ctx->work); in irq_sim_set_irqchip_state() 100 static void irq_sim_handle_irq(struct irq_work *work) in irq_sim_handle_irq() argument 106 work_ctx = container_of(work, struct irq_sim_work_ctx, work); in irq_sim_handle_irq() 184 work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq); in irq_domain_create_sim() 207 irq_work_sync(&work_ctx->work); in irq_domain_remove_sim()
|
/kernel/rcu/ |
D | srcutree.c | 75 static void srcu_invoke_callbacks(struct work_struct *work); 77 static void process_srcu(struct work_struct *work); 142 INIT_WORK(&sdp->work, srcu_invoke_callbacks); in init_srcu_struct_data() 248 INIT_DELAYED_WORK(&ssp->work, process_srcu); in init_srcu_struct_fields() 602 flush_delayed_work(&ssp->work); in cleanup_srcu_struct() 607 flush_work(&sdp->work); in cleanup_srcu_struct() 690 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_delay_timer() 697 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_queue_delayed_work_on() 951 queue_delayed_work(rcu_gp_wq, &ssp->work, in srcu_funnel_gp_start() 953 else if (list_empty(&ssp->work.work.entry)) in srcu_funnel_gp_start() [all …]
|
/kernel/sched/ |
D | cpufreq_schedutil.c | 32 struct kthread_work work; member 470 static void sugov_work(struct kthread_work *work) in sugov_work() argument 472 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); in sugov_work() 502 kthread_queue_work(&sg_policy->worker, &sg_policy->work); in sugov_irq_work() 608 kthread_init_work(&sg_policy->work, sugov_work); in sugov_kthread_create() 811 kthread_cancel_work_sync(&sg_policy->work); in sugov_stop()
|
/kernel/trace/ |
D | ring_buffer.c | 41 static void update_pages_handler(struct work_struct *work); 376 struct irq_work work; member 876 static void rb_wake_up_waiters(struct irq_work *work) in rb_wake_up_waiters() argument 878 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); in rb_wake_up_waiters() 928 irq_work_queue(&rbwork->work); in ring_buffer_wake_waiters() 945 struct rb_irq_work *work; in ring_buffer_wait() local 955 work = &buffer->irq_work; in ring_buffer_wait() 962 work = &cpu_buffer->irq_work; in ring_buffer_wait() 965 wait_index = READ_ONCE(work->wait_index); in ring_buffer_wait() 969 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait() [all …]
|
D | bpf_trace.c | 836 struct send_signal_irq_work *work; in do_bpf_send_signal() local 838 work = container_of(entry, struct send_signal_irq_work, irq_work); in do_bpf_send_signal() 839 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); in do_bpf_send_signal() 840 put_task_struct(work->task); in do_bpf_send_signal() 845 struct send_signal_irq_work *work = NULL; in bpf_send_signal_common() local 867 work = this_cpu_ptr(&send_signal_work); in bpf_send_signal_common() 868 if (irq_work_is_busy(&work->irq_work)) in bpf_send_signal_common() 875 work->task = get_task_struct(current); in bpf_send_signal_common() 876 work->sig = sig; in bpf_send_signal_common() 877 work->type = type; in bpf_send_signal_common() [all …]
|
/kernel/events/ |
D | ring_buffer.c | 888 static void rb_free_work(struct work_struct *work) in rb_free_work() argument 894 rb = container_of(work, struct perf_buffer, work); in rb_free_work() 908 schedule_work(&rb->work); in rb_free() 926 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
|
/kernel/time/ |
D | posix-cpu-timers.c | 1165 static void posix_cpu_timers_work(struct callback_head *work) in posix_cpu_timers_work() argument 1167 struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work); in posix_cpu_timers_work() 1221 memset(&p->posix_cputimers_work.work, 0, in clear_posix_cputimers_work() 1222 sizeof(p->posix_cputimers_work.work)); in clear_posix_cputimers_work() 1223 init_task_work(&p->posix_cputimers_work.work, in clear_posix_cputimers_work() 1256 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME); in __run_posix_cpu_timers()
|
/kernel/power/ |
D | wakelock.c | 85 static void __wakelocks_gc(struct work_struct *work); 100 static void __wakelocks_gc(struct work_struct *work) in __wakelocks_gc() argument
|