/kernel/ |
D | stop_machine.c | 45 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) in cpu_stop_init_done() argument 47 memset(done, 0, sizeof(*done)); in cpu_stop_init_done() 48 atomic_set(&done->nr_todo, nr_todo); in cpu_stop_init_done() 49 init_completion(&done->completion); in cpu_stop_init_done() 53 static void cpu_stop_signal_done(struct cpu_stop_done *done) in cpu_stop_signal_done() argument 55 if (atomic_dec_and_test(&done->nr_todo)) in cpu_stop_signal_done() 56 complete(&done->completion); in cpu_stop_signal_done() 80 else if (work->done) in cpu_stop_queue_work() 81 cpu_stop_signal_done(work->done); in cpu_stop_queue_work() 116 struct cpu_stop_done done; in stop_one_cpu() local [all …]
|
D | kthread.c | 47 struct completion *done; member 271 struct completion *done; in kthread() local 279 done = xchg(&create->done, NULL); in kthread() 280 if (!done) { in kthread() 287 complete(done); in kthread() 305 complete(done); in kthread() 339 struct completion *done = xchg(&create->done, NULL); in create_kthread() local 341 if (!done) { in create_kthread() 346 complete(done); in create_kthread() 356 DECLARE_COMPLETION_ONSTACK(done); in __kthread_create_on_node() [all …]
|
D | umh.c | 404 DECLARE_COMPLETION_ONSTACK(done); in call_usermodehelper_exec() 430 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; in call_usermodehelper_exec() 438 retval = wait_for_completion_killable(&done); in call_usermodehelper_exec() 448 wait_for_completion(&done); in call_usermodehelper_exec()
|
D | smp.c | 1000 struct completion done; member 1018 complete(&sscs->done); in smp_call_on_cpu_callback() 1024 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), in smp_call_on_cpu() 1036 wait_for_completion(&sscs.done); in smp_call_on_cpu()
|
D | acct.c | 93 struct completion done; member 176 wait_for_completion(&acct->done); in acct_pin_kill() 190 complete(&acct->done); in close_work() 247 init_completion(&acct->done); in acct_on()
|
D | watchdog.c | 444 struct completion *done = this_cpu_ptr(&softlockup_completion); in watchdog_enable() local 448 init_completion(done); in watchdog_enable() 449 complete(done); in watchdog_enable()
|
D | static_call.c | 132 goto done; in __static_call_update() 143 goto done; in __static_call_update() 203 done: in __static_call_update()
|
D | watch_queue.c | 106 bool done = false; in post_one_notification() local 145 done = true; in post_one_notification() 149 if (done) in post_one_notification() 151 return done; in post_one_notification()
|
D | workqueue.c | 234 struct completion done; /* flush completion */ member 1220 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight() 2646 struct completion done; member 2653 complete(&barr->done); in wq_barrier_func() 2696 init_completion_map(&barr->done, &target->lockdep_map); in insert_wq_barrier() 2786 complete(&wq->first_flusher->done); in flush_workqueue_prep_pwqs() 2803 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), in flush_workqueue() 2862 wait_for_completion(&this_flusher.done); in flush_workqueue() 2892 complete(&next->done); in flush_workqueue() 3074 wait_for_completion(&barr.done); in __flush_work()
|
/kernel/sched/ |
D | completion.c | 34 if (x->done != UINT_MAX) in complete() 35 x->done++; in complete() 64 x->done = UINT_MAX; in complete_all() 74 if (!x->done) { in do_wait_for_common() 87 } while (!x->done && timeout); in do_wait_for_common() 89 if (!x->done) in do_wait_for_common() 92 if (x->done != UINT_MAX) in do_wait_for_common() 93 x->done--; in do_wait_for_common() 293 if (!READ_ONCE(x->done)) in try_wait_for_completion() 297 if (!x->done) in try_wait_for_completion() [all …]
|
D | idle.c | 344 int done; member 351 WRITE_ONCE(it->done, 1); in idle_inject_timer_fn() 376 it.done = 0; in play_idle_precise() 382 while (!READ_ONCE(it.done)) in play_idle_precise()
|
D | pelt.c | 555 goto done; in sched_pelt_multiplier() 569 goto done; in sched_pelt_multiplier() 576 done: in sched_pelt_multiplier()
|
D | fair.c | 4027 goto done; in util_est_update() 4039 goto done; in util_est_update() 4071 done: in util_est_update() 4672 goto done; in pick_next_entity() 4714 done: in pick_next_entity() 7435 goto done; in pick_next_task_fair() 7445 goto done; in pick_next_task_fair() 7456 done: __maybe_unused; in pick_next_task_fair() 8239 static bool __update_blocked_others(struct rq *rq, bool *done) in __update_blocked_others() argument 8260 *done = false; in __update_blocked_others() [all …]
|
/kernel/power/ |
D | swap.c | 591 wait_queue_head_t done; /* crc update done */ member 611 wake_up(&d->done); in crc32_threadfn() 620 wake_up(&d->done); in crc32_threadfn() 633 wait_queue_head_t done; /* compression done */ member 655 wake_up(&d->done); in lzo_compress_threadfn() 664 wake_up(&d->done); in lzo_compress_threadfn() 730 init_waitqueue_head(&data[thr].done); in save_image_lzo() 747 init_waitqueue_head(&crc->done); in save_image_lzo() 813 wait_event(data[thr].done, in save_image_lzo() 853 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in save_image_lzo() [all …]
|
/kernel/trace/ |
D | preemptirq_delay_test.c | 32 static struct completion done; variable 120 complete(&done); in preemptirq_delay_run() 138 init_completion(&done); in preemptirq_run_test() 145 wait_for_completion(&done); in preemptirq_run_test()
|
D | trace_uprobe.c | 1229 bool done; in trace_uprobe_filter_remove() local 1234 done = filter->nr_systemwide || in trace_uprobe_filter_remove() 1239 done = filter->nr_systemwide; in trace_uprobe_filter_remove() 1243 return done; in trace_uprobe_filter_remove() 1250 bool done; in trace_uprobe_filter_add() local 1262 done = filter->nr_systemwide || in trace_uprobe_filter_add() 1267 done = filter->nr_systemwide; in trace_uprobe_filter_add() 1272 return done; in trace_uprobe_filter_add()
|
/kernel/bpf/ |
D | bpf_iter.c | 95 goto done; in bpf_seq_read() 104 goto done; in bpf_seq_read() 109 goto done; in bpf_seq_read() 120 goto done; in bpf_seq_read() 135 goto done; in bpf_seq_read() 164 goto done; in bpf_seq_read() 179 goto done; in bpf_seq_read() 195 goto done; in bpf_seq_read() 204 goto done; in bpf_seq_read() 209 done: in bpf_seq_read()
|
D | inode.c | 175 bool done; member 222 if (map_iter(m)->done) in map_seq_next() 232 map_iter(m)->done = true; in map_seq_next() 241 if (map_iter(m)->done) in map_seq_start()
|
/kernel/locking/ |
D | test-ww_mutex.c | 22 struct completion ready, go, done; member 44 complete(&mtx->done); in test_mutex_work() 61 init_completion(&mtx.done); in __test_mutex() 74 if (completion_done(&mtx.done)) { in __test_mutex() 81 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); in __test_mutex()
|
/kernel/debug/kdb/ |
D | kdb_bp.c | 392 int done = 0; in kdb_bc() local 448 done++; in kdb_bc() 489 return (!done) ? KDB_BPTNOTFOUND : 0; in kdb_bc()
|
/kernel/irq/ |
D | affinity.c | 255 unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0; in __irq_build_affinity_masks() local 328 done += nv->nvectors; in __irq_build_affinity_masks() 331 return done; in __irq_build_affinity_masks()
|
/kernel/livepatch/ |
D | transition.c | 313 goto done; in klp_try_switch_task() 318 goto done; in klp_try_switch_task() 325 done: in klp_try_switch_task()
|
/kernel/rcu/ |
D | rcuscale.c | 364 bool started = false, done = false, alldone = false; in rcu_scale_writer() local 426 if (!done && i >= MIN_MEAS) { in rcu_scale_writer() 427 done = true; in rcu_scale_writer() 450 if (done && !alldone && in rcu_scale_writer()
|
D | tree_exp.h | 75 bool done; in sync_exp_reset_tree_hotplug() local 111 done = false; in sync_exp_reset_tree_hotplug() 115 done = true; in sync_exp_reset_tree_hotplug() 118 if (done) in sync_exp_reset_tree_hotplug()
|
/kernel/cgroup/ |
D | cpuset.c | 807 goto done; in generate_sched_domains() 817 goto done; in generate_sched_domains() 822 goto done; in generate_sched_domains() 895 goto done; in generate_sched_domains() 946 done: in generate_sched_domains() 1886 goto done; in update_nodemask() 1900 goto done; in update_nodemask() 1905 goto done; in update_nodemask() 1911 goto done; in update_nodemask() 1915 goto done; in update_nodemask() [all …]
|