/kernel/time/ |
D | posix-cpu-timers.c | 108 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock)); in cpu_timer_task_rcu() 117 u64 delta, incr, expires = timer->it.cpu.node.expires; in bump_cpu_timer() 137 timer->it.cpu.node.expires += incr; in bump_cpu_timer() 141 return timer->it.cpu.node.expires; in bump_cpu_timer() 404 timerqueue_init(&new_timer->it.cpu.node); in posix_cpu_timer_create() 405 new_timer->it.cpu.pid = get_pid(pid); in posix_cpu_timer_create() 447 struct cpu_timer *ctmr = &timer->it.cpu; in disarm_timer() 467 struct cpu_timer *ctmr = &timer->it.cpu; in posix_cpu_timer_del() 490 if (timer->it.cpu.firing) in posix_cpu_timer_del() 553 struct cpu_timer *ctmr = &timer->it.cpu; in arm_timer() [all …]
|
D | itimer.c | 51 struct cpu_itimer *it = &tsk->signal->it[clock_id]; in get_cpu_itimer() local 55 val = it->expires; in get_cpu_itimer() 56 interval = it->incr; in get_cpu_itimer() 173 struct cpu_itimer *it = &tsk->signal->it[clock_id]; in set_cpu_itimer() local 180 oval = it->expires; in set_cpu_itimer() 181 ointerval = it->incr; in set_cpu_itimer() 187 it->expires = nval; in set_cpu_itimer() 188 it->incr = ninterval; in set_cpu_itimer()
|
D | alarmtimer.c | 564 it.alarm.alarmtimer); in alarm_handle_timer() 597 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_rearm() 610 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_forward() 622 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_remaining() 633 return alarm_try_to_cancel(&timr->it.alarm.alarmtimer); in alarm_timer_try_to_cancel() 646 hrtimer_cancel_wait_running(&timr->it.alarm.alarmtimer.timer); in alarm_timer_wait_running() 659 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_arm() 667 alarm_start(&timr->it.alarm.alarmtimer, expires); in alarm_timer_arm() 739 alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer); in alarm_timer_create()
|
D | posix-timers.c | 295 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_rearm() 373 timr = container_of(timer, struct k_itimer, it.real.timer); in posix_timer_fn() 493 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); in common_timer_create() 642 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_remaining() 649 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_forward() 796 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_arm() 812 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); in common_hrtimer_arm() 813 timr->it.real.timer.function = posix_timer_fn; in common_hrtimer_arm() 825 return hrtimer_try_to_cancel(&timr->it.real.timer); in common_hrtimer_try_to_cancel() 830 hrtimer_cancel_wait_running(&timer->it.real.timer); in common_timer_wait_running()
|
D | time.c | 861 int get_itimerspec64(struct itimerspec64 *it, in get_itimerspec64() argument 866 ret = get_timespec64(&it->it_interval, &uit->it_interval); in get_itimerspec64() 870 ret = get_timespec64(&it->it_value, &uit->it_value); in get_itimerspec64() 876 int put_itimerspec64(const struct itimerspec64 *it, in put_itimerspec64() argument 881 ret = put_timespec64(&it->it_interval, &uit->it_interval); in put_itimerspec64() 885 ret = put_timespec64(&it->it_value, &uit->it_value); in put_itimerspec64()
|
D | Kconfig | 97 rate, even when the CPU doesn't need it. 129 desired range of dynticks CPUs to use it. This is implemented at 157 for context tracking and the subsystems that rely on it: RCU 173 We keep it around for a little while to enforce backward
|
/kernel/cgroup/ |
D | legacy_freezer.c | 248 struct css_task_iter it; in update_if_frozen() local 271 css_task_iter_start(css, 0, &it); in update_if_frozen() 273 while ((task = css_task_iter_next(&it))) { in update_if_frozen() 288 css_task_iter_end(&it); in update_if_frozen() 320 struct css_task_iter it; in freeze_cgroup() local 323 css_task_iter_start(&freezer->css, 0, &it); in freeze_cgroup() 324 while ((task = css_task_iter_next(&it))) in freeze_cgroup() 326 css_task_iter_end(&it); in freeze_cgroup() 331 struct css_task_iter it; in unfreeze_cgroup() local 334 css_task_iter_start(&freezer->css, 0, &it); in unfreeze_cgroup() [all …]
|
D | cgroup.c | 242 static void css_task_iter_skip(struct css_task_iter *it, 883 struct css_task_iter *it, *pos; in css_set_skip_task_iters() local 885 list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node) in css_set_skip_task_iters() 886 css_task_iter_skip(it, task); in css_set_skip_task_iters() 1793 struct css_task_iter *it; in rebind_subsystems() local 1823 list_for_each_entry(it, &cset->task_iters, iters_node) in rebind_subsystems() 1824 if (it->cset_head == &scgrp->e_csets[ss->id]) in rebind_subsystems() 1825 it->cset_head = &dcgrp->e_csets[ss->id]; in rebind_subsystems() 3826 struct css_task_iter it; in __cgroup_kill() local 3835 css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it); in __cgroup_kill() [all …]
|
D | cgroup-v1.c | 104 struct css_task_iter it; in cgroup_transfer_tasks() local 134 css_task_iter_start(&from->self, 0, &it); in cgroup_transfer_tasks() 137 task = css_task_iter_next(&it); in cgroup_transfer_tasks() 142 css_task_iter_end(&it); in cgroup_transfer_tasks() 338 struct css_task_iter it; in pidlist_array_load() local 355 css_task_iter_start(&cgrp->self, 0, &it); in pidlist_array_load() 356 while ((tsk = css_task_iter_next(&it))) { in pidlist_array_load() 367 css_task_iter_end(&it); in pidlist_array_load() 713 struct css_task_iter it; in cgroupstats_build() local 737 css_task_iter_start(&cgrp->self, 0, &it); in cgroupstats_build() [all …]
|
D | freezer.c | 184 struct css_task_iter it; in cgroup_do_freeze() local 201 css_task_iter_start(&cgrp->self, 0, &it); in cgroup_do_freeze() 202 while ((task = css_task_iter_next(&it))) { in cgroup_do_freeze() 211 css_task_iter_end(&it); in cgroup_do_freeze()
|
D | cpuset.c | 980 struct css_task_iter it; in dl_update_tasks_root_domain() local 986 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain() 988 while ((task = css_task_iter_next(&it))) in dl_update_tasks_root_domain() 991 css_task_iter_end(&it); in dl_update_tasks_root_domain() 1140 struct css_task_iter it; in update_tasks_cpumask() local 1144 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask() 1145 while ((task = css_task_iter_next(&it))) { in update_tasks_cpumask() 1154 css_task_iter_end(&it); in update_tasks_cpumask() 1786 struct css_task_iter it; in update_tasks_nodemask() local 1803 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask() [all …]
|
/kernel/sched/ |
D | idle.c | 353 struct idle_timer *it = container_of(timer, struct idle_timer, timer); in idle_inject_timer_fn() local 355 WRITE_ONCE(it->done, 1); in idle_inject_timer_fn() 363 struct idle_timer it; in play_idle_precise() local 381 it.done = 0; in play_idle_precise() 382 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in play_idle_precise() 383 it.timer.function = idle_inject_timer_fn; in play_idle_precise() 384 hrtimer_start(&it.timer, ns_to_ktime(duration_ns), in play_idle_precise() 387 while (!READ_ONCE(it.done)) in play_idle_precise()
|
D | rt.c | 2679 struct css_task_iter it; in tg_has_rt_tasks() local 2688 css_task_iter_start(&tg->css, 0, &it); in tg_has_rt_tasks() 2689 while (!ret && (task = css_task_iter_next(&it))) in tg_has_rt_tasks() 2691 css_task_iter_end(&it); in tg_has_rt_tasks()
|
D | core.c | 1726 struct css_task_iter it; in uclamp_update_active_tasks() local 1729 css_task_iter_start(css, 0, &it); in uclamp_update_active_tasks() 1730 while ((p = css_task_iter_next(&it))) in uclamp_update_active_tasks() 1732 css_task_iter_end(&it); in uclamp_update_active_tasks()
|
/kernel/trace/ |
D | Kconfig | 110 # options do not appear when something else selects it. We need the two options 170 tracing is enabled by the administrator. If it's runtime disabled 248 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 253 then it will not have any overhead while the stack tracer 326 to be scheduled in, starting from the point it has woken up. 338 time, this tracer will detect it. This is useful for testing 355 When the tracer is not running, it has no affect on the system, 356 but when it is running, it can cause the system to be 361 file. Every time a latency is greater than tracing_thresh, it will 482 trace recording, as it needs to add some checks to synchronize [all …]
|
/kernel/bpf/ |
D | Kconfig | 71 still reenable it by setting it to 0 later on, or permanently 72 disable it by setting it to 1 (from which no other transition to
|
D | core.c | 759 unsigned int it = 0; in bpf_get_kallsym() local 767 if (it++ != symnum) in bpf_get_kallsym()
|
/kernel/rcu/ |
D | Kconfig.debug | 16 Enable RCU lockdep checking for list usages. By default it is 19 false-positive splats, we keep it default disabled but once all 80 Say M if you want to build it as a module instead. 124 grace periods, making them as short as it can. This limits 128 But in conjunction with tools like KASAN, it can be helpful
|
/kernel/power/ |
D | Kconfig | 48 system and powers it off; and restores that checkpoint on reboot. 57 In principle it does not require ACPI or APM, although for example 58 ACPI will be used for the final steps when it is available. One 65 have it detect the saved image, restore memory state from it, and 136 non-zero numbered CPU, it may define ARCH_SUSPEND_NONZERO_CPU. This 198 make it wake up a few seconds later using an RTC wakeup alarm. 202 linked, ensuring that it's available when this test runs. 250 machine, reboot it and then run 274 manpage ("man 8 hdparm") for that), and it doesn't turn off
|
/kernel/ |
D | Kconfig.preempt | 31 low priority process to voluntarily preempt itself even if it 49 even if it is in kernel mode executing a system call and would 117 SCHED_CORE is default disabled. When it is enabled and unused,
|
/kernel/dma/ |
D | Kconfig | 159 default, but it can be enabled by passing cma=size[MG] to the kernel. 170 If 0 percent is selected, CMA is disabled by default, but it can be 199 for larger buffers it just a memory waste. With this parameter you can
|
/kernel/debug/kdb/ |
D | kdb_cmds | 5 # be safe. Global breakpoint commands affect each cpu as it is booted.
|
/kernel/bpf/preload/ |
D | Kconfig | 10 # The dependency on !COMPILE_TEST prevents it from being enabled
|