/kernel/ |
D | stop_machine.c | 70 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) in cpu_stop_init_done() argument 72 memset(done, 0, sizeof(*done)); in cpu_stop_init_done() 73 atomic_set(&done->nr_todo, nr_todo); in cpu_stop_init_done() 74 init_completion(&done->completion); in cpu_stop_init_done() 78 static void cpu_stop_signal_done(struct cpu_stop_done *done) in cpu_stop_signal_done() argument 80 if (atomic_dec_and_test(&done->nr_todo)) in cpu_stop_signal_done() 81 complete(&done->completion); in cpu_stop_signal_done() 105 else if (work->done) in cpu_stop_queue_work() 106 cpu_stop_signal_done(work->done); in cpu_stop_queue_work() 141 struct cpu_stop_done done; in stop_one_cpu() local [all …]
|
D | kthread.c | 47 struct completion *done; member 347 struct completion *done; in kthread() local 354 done = xchg(&create->done, NULL); in kthread() 355 if (!done) { in kthread() 378 complete(done); in kthread() 412 struct completion *done = xchg(&create->done, NULL); in create_kthread() local 414 if (!done) { in create_kthread() 419 complete(done); in create_kthread() 429 DECLARE_COMPLETION_ONSTACK(done); in __kthread_create_on_node() 439 create->done = &done; in __kthread_create_on_node() [all …]
|
D | umh.c | 408 DECLARE_COMPLETION_ONSTACK(done); in call_usermodehelper_exec() 434 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; in call_usermodehelper_exec() 445 retval = wait_for_completion_state(&done, state | TASK_KILLABLE); in call_usermodehelper_exec() 461 wait_for_completion_state(&done, state); in call_usermodehelper_exec()
|
D | acct.c | 112 struct completion done; member 195 wait_for_completion(&acct->done); in acct_pin_kill() 209 complete(&acct->done); in close_work() 266 init_completion(&acct->done); in acct_on()
|
D | static_call_inline.c | 132 goto done; in __static_call_update() 143 goto done; in __static_call_update() 203 done: in __static_call_update()
|
D | watch_queue.c | 106 bool done = false; in post_one_notification() local 142 done = true; in post_one_notification() 146 if (done) in post_one_notification() 148 return done; in post_one_notification()
|
D | watchdog.c | 463 struct completion *done = this_cpu_ptr(&softlockup_completion); in watchdog_enable() local 467 init_completion(done); in watchdog_enable() 468 complete(done); in watchdog_enable()
|
D | smp.c | 1228 struct completion done; member 1246 complete(&sscs->done); in smp_call_on_cpu_callback() 1252 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), in smp_call_on_cpu() 1264 wait_for_completion(&sscs.done); in smp_call_on_cpu()
|
D | workqueue.c | 252 struct completion done; /* flush completion */ member 1233 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight() 2647 struct completion done; member 2654 complete(&barr->done); in wq_barrier_func() 2698 init_completion_map(&barr->done, &target->lockdep_map); in insert_wq_barrier() 2795 complete(&wq->first_flusher->done); in flush_workqueue_prep_pwqs() 2812 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), in __flush_workqueue() 2871 wait_for_completion(&this_flusher.done); in __flush_workqueue() 2901 complete(&next->done); in __flush_workqueue() 3083 wait_for_completion(&barr.done); in __flush_work()
|
/kernel/sched/ |
D | completion.c | 34 if (x->done != UINT_MAX) in complete() 35 x->done++; in complete() 64 x->done = UINT_MAX; in complete_all() 74 if (!x->done) { in do_wait_for_common() 87 } while (!x->done && timeout); in do_wait_for_common() 89 if (!x->done) in do_wait_for_common() 92 if (x->done != UINT_MAX) in do_wait_for_common() 93 x->done--; in do_wait_for_common() 305 if (!READ_ONCE(x->done)) in try_wait_for_completion() 309 if (!x->done) in try_wait_for_completion() [all …]
|
D | idle.c | 345 int done; member 352 WRITE_ONCE(it->done, 1); in idle_inject_timer_fn() 378 it.done = 0; in play_idle_precise() 384 while (!READ_ONCE(it.done)) in play_idle_precise()
|
D | pelt.c | 497 goto done; in sched_pelt_multiplier() 511 goto done; in sched_pelt_multiplier() 518 done: in sched_pelt_multiplier()
|
D | fair.c | 4390 goto done; in util_est_update() 4402 goto done; in util_est_update() 4434 done: in util_est_update() 4448 bool fits, uclamp_max_fits, done = false; in util_fits_cpu() local 4450 trace_android_rvh_util_fits_cpu(util, uclamp_min, uclamp_max, cpu, &fits, &done); in util_fits_cpu() 4452 if (done) in util_fits_cpu() 5047 goto done; in pick_next_entity() 5089 done: in pick_next_entity() 5403 goto done; in throttle_cfs_rq() 5424 goto done; in throttle_cfs_rq() [all …]
|
/kernel/power/ |
D | swap.c | 601 wait_queue_head_t done; /* crc update done */ member 621 wake_up(&d->done); in crc32_threadfn() 630 wake_up(&d->done); in crc32_threadfn() 643 wait_queue_head_t done; /* compression done */ member 665 wake_up(&d->done); in lzo_compress_threadfn() 674 wake_up(&d->done); in lzo_compress_threadfn() 737 init_waitqueue_head(&data[thr].done); in save_image_lzo() 754 init_waitqueue_head(&crc->done); in save_image_lzo() 820 wait_event(data[thr].done, in save_image_lzo() 860 wait_event(crc->done, atomic_read_acquire(&crc->stop)); in save_image_lzo() [all …]
|
/kernel/bpf/ |
D | bpf_iter.c | 110 goto done; in bpf_seq_read() 119 goto done; in bpf_seq_read() 124 goto done; in bpf_seq_read() 135 goto done; in bpf_seq_read() 150 goto done; in bpf_seq_read() 180 goto done; in bpf_seq_read() 195 goto done; in bpf_seq_read() 208 goto done; in bpf_seq_read() 219 goto done; in bpf_seq_read() 228 goto done; in bpf_seq_read() [all …]
|
D | inode.c | 176 bool done; member 223 if (map_iter(m)->done) in map_seq_next() 233 map_iter(m)->done = true; in map_seq_next() 242 if (map_iter(m)->done) in map_seq_start()
|
/kernel/trace/ |
D | preemptirq_delay_test.c | 35 static struct completion done; variable 134 complete(&done); in preemptirq_delay_run() 152 init_completion(&done); in preemptirq_run_test() 159 wait_for_completion(&done); in preemptirq_run_test()
|
D | trace_uprobe.c | 1227 bool done; in trace_uprobe_filter_remove() local 1232 done = filter->nr_systemwide || in trace_uprobe_filter_remove() 1237 done = filter->nr_systemwide; in trace_uprobe_filter_remove() 1241 return done; in trace_uprobe_filter_remove() 1248 bool done; in trace_uprobe_filter_add() local 1260 done = filter->nr_systemwide || in trace_uprobe_filter_add() 1265 done = filter->nr_systemwide; in trace_uprobe_filter_add() 1270 return done; in trace_uprobe_filter_add()
|
/kernel/locking/ |
D | test-ww_mutex.c | 31 struct completion ready, go, done; member 53 complete(&mtx->done); in test_mutex_work() 70 init_completion(&mtx.done); in __test_mutex() 83 if (completion_done(&mtx.done)) { in __test_mutex() 90 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); in __test_mutex()
|
/kernel/debug/kdb/ |
D | kdb_bp.c | 392 int done = 0; in kdb_bc() local 448 done++; in kdb_bc() 489 return (!done) ? KDB_BPTNOTFOUND : 0; in kdb_bc()
|
/kernel/rcu/ |
D | rcuscale.c | 414 bool started = false, done = false, alldone = false; in rcu_scale_writer() local 477 if (!done && i >= MIN_MEAS) { in rcu_scale_writer() 478 done = true; in rcu_scale_writer() 501 if (done && !alldone && in rcu_scale_writer()
|
D | tree_exp.h | 77 bool done; in sync_exp_reset_tree_hotplug() local 113 done = false; in sync_exp_reset_tree_hotplug() 117 done = true; in sync_exp_reset_tree_hotplug() 120 if (done) in sync_exp_reset_tree_hotplug()
|
/kernel/cgroup/ |
D | cpuset.c | 951 goto done; in generate_sched_domains() 961 goto done; in generate_sched_domains() 966 goto done; in generate_sched_domains() 1039 goto done; in generate_sched_domains() 1090 done: in generate_sched_domains() 2159 goto done; in update_nodemask() 2173 goto done; in update_nodemask() 2178 goto done; in update_nodemask() 2184 goto done; in update_nodemask() 2188 goto done; in update_nodemask() [all …]
|
/kernel/kcsan/ |
D | .kunitconfig | 2 # Under kunit_tool, this can be done by using the --qemu_args
|
/kernel/debug/ |
D | debug_core.c | 1053 goto done; in dbg_notify_reboot() 1055 goto done; in dbg_notify_reboot() 1059 done: in dbg_notify_reboot()
|