/kernel/time/ |
D | posix-cpu-timers.c | 115 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock)); in cpu_timer_task_rcu() 124 u64 delta, incr, expires = timer->it.cpu.node.expires; in bump_cpu_timer() 144 timer->it.cpu.node.expires += incr; in bump_cpu_timer() 148 return timer->it.cpu.node.expires; in bump_cpu_timer() 411 timerqueue_init(&new_timer->it.cpu.node); in posix_cpu_timer_create() 412 new_timer->it.cpu.pid = get_pid(pid); in posix_cpu_timer_create() 454 struct cpu_timer *ctmr = &timer->it.cpu; in disarm_timer() 474 struct cpu_timer *ctmr = &timer->it.cpu; in posix_cpu_timer_del() 497 if (timer->it.cpu.firing) in posix_cpu_timer_del() 560 struct cpu_timer *ctmr = &timer->it.cpu; in arm_timer() [all …]
|
D | itimer.c | 51 struct cpu_itimer *it = &tsk->signal->it[clock_id]; in get_cpu_itimer() local 55 val = it->expires; in get_cpu_itimer() 56 interval = it->incr; in get_cpu_itimer() 173 struct cpu_itimer *it = &tsk->signal->it[clock_id]; in set_cpu_itimer() local 180 oval = it->expires; in set_cpu_itimer() 181 ointerval = it->incr; in set_cpu_itimer() 187 it->expires = nval; in set_cpu_itimer() 188 it->incr = ninterval; in set_cpu_itimer()
|
D | alarmtimer.c | 564 it.alarm.alarmtimer); in alarm_handle_timer() 597 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_rearm() 610 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_forward() 622 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_remaining() 633 return alarm_try_to_cancel(&timr->it.alarm.alarmtimer); in alarm_timer_try_to_cancel() 646 hrtimer_cancel_wait_running(&timr->it.alarm.alarmtimer.timer); in alarm_timer_wait_running() 659 struct alarm *alarm = &timr->it.alarm.alarmtimer; in alarm_timer_arm() 667 alarm_start(&timr->it.alarm.alarmtimer, expires); in alarm_timer_arm() 739 alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer); in alarm_timer_create()
|
D | posix-timers.c | 295 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_rearm() 373 timr = container_of(timer, struct k_itimer, it.real.timer); in posix_timer_fn() 493 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); in common_timer_create() 642 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_remaining() 649 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_forward() 796 struct hrtimer *timer = &timr->it.real.timer; in common_hrtimer_arm() 812 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); in common_hrtimer_arm() 813 timr->it.real.timer.function = posix_timer_fn; in common_hrtimer_arm() 825 return hrtimer_try_to_cancel(&timr->it.real.timer); in common_hrtimer_try_to_cancel() 830 hrtimer_cancel_wait_running(&timer->it.real.timer); in common_timer_wait_running()
|
D | time.c | 861 int get_itimerspec64(struct itimerspec64 *it, in get_itimerspec64() argument 866 ret = get_timespec64(&it->it_interval, &uit->it_interval); in get_itimerspec64() 870 ret = get_timespec64(&it->it_value, &uit->it_value); in get_itimerspec64() 876 int put_itimerspec64(const struct itimerspec64 *it, in put_itimerspec64() argument 881 ret = put_timespec64(&it->it_interval, &uit->it_interval); in put_itimerspec64() 885 ret = put_timespec64(&it->it_value, &uit->it_value); in put_itimerspec64()
|
D | Kconfig | 106 rate, even when the CPU doesn't need it. 138 desired range of dynticks CPUs to use it. This is implemented at 172 for user context tracking and the subsystems that rely on it: RCU 188 We keep it around for a little while to enforce backward
|
/kernel/cgroup/ |
D | legacy_freezer.c | 260 struct css_task_iter it; in update_if_frozen() local 283 css_task_iter_start(css, 0, &it); in update_if_frozen() 285 while ((task = css_task_iter_next(&it))) { in update_if_frozen() 292 css_task_iter_end(&it); in update_if_frozen() 324 struct css_task_iter it; in freeze_cgroup() local 327 css_task_iter_start(&freezer->css, 0, &it); in freeze_cgroup() 328 while ((task = css_task_iter_next(&it))) in freeze_cgroup() 330 css_task_iter_end(&it); in freeze_cgroup() 335 struct css_task_iter it; in unfreeze_cgroup() local 338 css_task_iter_start(&freezer->css, 0, &it); in unfreeze_cgroup() [all …]
|
D | cgroup.c | 245 static void css_task_iter_skip(struct css_task_iter *it, 884 struct css_task_iter *it, *pos; in css_set_skip_task_iters() local 886 list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node) in css_set_skip_task_iters() 887 css_task_iter_skip(it, task); in css_set_skip_task_iters() 1835 struct css_task_iter *it; in rebind_subsystems() local 1865 list_for_each_entry(it, &cset->task_iters, iters_node) in rebind_subsystems() 1866 if (it->cset_head == &scgrp->e_csets[ss->id]) in rebind_subsystems() 1867 it->cset_head = &dcgrp->e_csets[ss->id]; in rebind_subsystems() 3958 struct css_task_iter it; in __cgroup_kill() local 3967 css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it); in __cgroup_kill() [all …]
|
D | cgroup-v1.c | 99 struct css_task_iter it; in cgroup_transfer_tasks() local 129 css_task_iter_start(&from->self, 0, &it); in cgroup_transfer_tasks() 132 task = css_task_iter_next(&it); in cgroup_transfer_tasks() 137 css_task_iter_end(&it); in cgroup_transfer_tasks() 333 struct css_task_iter it; in pidlist_array_load() local 350 css_task_iter_start(&cgrp->self, 0, &it); in pidlist_array_load() 351 while ((tsk = css_task_iter_next(&it))) { in pidlist_array_load() 362 css_task_iter_end(&it); in pidlist_array_load() 705 struct css_task_iter it; in cgroupstats_build() local 726 css_task_iter_start(&cgrp->self, 0, &it); in cgroupstats_build() [all …]
|
D | freezer.c | 184 struct css_task_iter it; in cgroup_do_freeze() local 201 css_task_iter_start(&cgrp->self, 0, &it); in cgroup_do_freeze() 202 while ((task = css_task_iter_next(&it))) { in cgroup_do_freeze() 211 css_task_iter_end(&it); in cgroup_do_freeze()
|
D | cpuset.c | 1107 struct css_task_iter it; in dl_update_tasks_root_domain() local 1113 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain() 1115 while ((task = css_task_iter_next(&it))) in dl_update_tasks_root_domain() 1118 css_task_iter_end(&it); in dl_update_tasks_root_domain() 1268 struct css_task_iter it; in update_tasks_cpumask() local 1272 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask() 1273 while ((task = css_task_iter_next(&it))) { in update_tasks_cpumask() 1285 css_task_iter_end(&it); in update_tasks_cpumask() 2032 struct css_task_iter it; in update_tasks_nodemask() local 2049 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask() [all …]
|
/kernel/sched/ |
D | idle.c | 350 struct idle_timer *it = container_of(timer, struct idle_timer, timer); in idle_inject_timer_fn() local 352 WRITE_ONCE(it->done, 1); in idle_inject_timer_fn() 360 struct idle_timer it; in play_idle_precise() local 378 it.done = 0; in play_idle_precise() 379 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in play_idle_precise() 380 it.timer.function = idle_inject_timer_fn; in play_idle_precise() 381 hrtimer_start(&it.timer, ns_to_ktime(duration_ns), in play_idle_precise() 384 while (!READ_ONCE(it.done)) in play_idle_precise()
|
D | rt.c | 2842 struct css_task_iter it; in tg_has_rt_tasks() local 2851 css_task_iter_start(&tg->css, 0, &it); in tg_has_rt_tasks() 2852 while (!ret && (task = css_task_iter_next(&it))) in tg_has_rt_tasks() 2854 css_task_iter_end(&it); in tg_has_rt_tasks()
|
/kernel/trace/ |
D | Kconfig | 93 An architecture selects this if it sorts the mcount_loc section 140 # options do not appear when something else selects it. We need the two options 201 tracing is enabled by the administrator. If it's runtime disabled 295 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 300 then it will not have any overhead while the stack tracer 373 to be scheduled in, starting from the point it has woken up. 385 time, this tracer will detect it. This is useful for testing 402 When the tracer is not running, it has no affect on the system, 403 but when it is running, it can cause the system to be 408 file. Every time a latency is greater than tracing_thresh, it will [all …]
|
/kernel/bpf/ |
D | Kconfig | 78 still reenable it by setting it to 0 later on, or permanently 79 disable it by setting it to 1 (from which no other transition to
|
D | core.c | 756 unsigned int it = 0; in bpf_get_kallsym() local 764 if (it++ != symnum) in bpf_get_kallsym()
|
/kernel/power/ |
D | Kconfig | 48 system and powers it off; and restores that checkpoint on reboot. 57 In principle it does not require ACPI or APM, although for example 58 ACPI will be used for the final steps when it is available. One 65 have it detect the saved image, restore memory state from it, and 136 non-zero numbered CPU, it may define ARCH_SUSPEND_NONZERO_CPU. This 156 enabled. In particular, only enable this if it is very common to be 218 make it wake up a few seconds later using an RTC wakeup alarm. 222 linked, ensuring that it's available when this test runs. 270 machine, reboot it and then run 294 manpage ("man 8 hdparm") for that), and it doesn't turn off
|
/kernel/rcu/ |
D | Kconfig.debug | 16 Enable RCU lockdep checking for list usages. By default it is 19 false-positive splats, we keep it default disabled but once all 71 Say M if you want to build it as a module instead. 128 grace periods, making them as short as it can. This limits 132 But in conjunction with tools like KASAN, it can be helpful
|
D | Kconfig | 290 it is your responsibility to ensure that latency-sensitive 323 Use rcutree.enable_rcu_lazy=0 to turn it off at boot time. 330 Allows building the kernel with CONFIG_RCU_LAZY=y yet keep it default 332 it back on. 350 higher than this value if the kernel takes a long time to initialize but it
|
/kernel/module/ |
D | Kconfig | 46 kernel believes it is unsafe: the kernel will remove the module 47 without waiting for anyone to stop using it (using the -f option to 66 Saying Y here makes it sometimes possible to use modules 78 supports it. 85 sum of the source files which made it. This helps maintainers 209 For modules inside an initrd or initramfs, it's more efficient to 269 but it creates consistency between symbols defining namespaces and 279 When kernel code requests a module, it does so by calling
|
/kernel/trace/rv/ |
D | Kconfig | 22 actual execution, comparing it against a formal specification of 48 The model is borken on purpose: it serves to test reactors.
|
/kernel/ |
D | Kconfig.preempt | 44 low priority process to voluntarily preempt itself even if it 60 even if it is in kernel mode executing a system call and would 132 SCHED_CORE is default disabled. When it is enabled and unused,
|
/kernel/dma/ |
D | Kconfig | 154 default, but it can be enabled by passing cma=size[MG] to the kernel. 165 If 0 percent is selected, CMA is disabled by default, but it can be 194 for larger buffers it just a memory waste. With this parameter you can
|
/kernel/debug/kdb/ |
D | kdb_cmds | 5 # be safe. Global breakpoint commands affect each cpu as it is booted.
|
/kernel/bpf/preload/ |
D | Kconfig | 10 # The dependency on !COMPILE_TEST prevents it from being enabled
|