/kernel/trace/ |
D | trace_power.c | 52 struct power_trace *it; in power_print_line() local 58 it = &field->state_data; in power_print_line() 59 stamp = ktime_to_timespec(it->stamp); in power_print_line() 60 duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); in power_print_line() 63 if (it->type == POWER_CSTATE) in power_print_line() 67 it->state, iter->cpu, in power_print_line() 70 if (it->type == POWER_PSTATE) in power_print_line() 74 it->state, iter->cpu); in power_print_line() 98 void trace_power_start(struct power_trace *it, unsigned int type, in trace_power_start() argument 104 memset(it, 0, sizeof(struct power_trace)); in trace_power_start() [all …]
|
D | trace_hw_branches.c | 89 struct hw_branch_entry *it; in bts_trace_print_line() local 91 trace_assign_type(it, entry); in bts_trace_print_line() 96 it->from, it->to) && in bts_trace_print_line() 97 (!it->from || in bts_trace_print_line() 98 seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) && in bts_trace_print_line()
|
D | Kconfig | 63 tracing is enabled by the administrator. If it's runtime disabled 142 to be scheduled in, starting from the point it has woken up. 159 This tracer helps developers to optimize boot times: it records 192 taken in the kernel is recorded whether it hit or miss. 245 kernel and displays it in debugfs/tracing/stack_trace. 250 then it will not have any overhead while the stack tracer 286 were made. If so, it runs stop_machine (stops all CPUS) 325 as it will write garbage to IO memory starting at a given address. 326 However, it should be safe to use on e.g. unused portion of VRAM.
|
/kernel/ |
D | posix-cpu-timers.c | 128 if (timer->it.cpu.incr.sched == 0) in bump_cpu_timer() 134 if (now.sched < timer->it.cpu.expires.sched) in bump_cpu_timer() 136 incr = timer->it.cpu.incr.sched; in bump_cpu_timer() 137 delta = now.sched + incr - timer->it.cpu.expires.sched; in bump_cpu_timer() 144 timer->it.cpu.expires.sched += incr; in bump_cpu_timer() 151 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) in bump_cpu_timer() 153 incr = timer->it.cpu.incr.cpu; in bump_cpu_timer() 155 timer->it.cpu.expires.cpu); in bump_cpu_timer() 162 timer->it.cpu.expires.cpu = in bump_cpu_timer() 163 cputime_add(timer->it.cpu.expires.cpu, incr); in bump_cpu_timer() [all …]
|
D | cgroup_freezer.c | 225 struct cgroup_iter it; in update_freezer_state() local 229 cgroup_iter_start(cgroup, &it); in update_freezer_state() 230 while ((task = cgroup_iter_next(cgroup, &it))) { in update_freezer_state() 247 cgroup_iter_end(cgroup, &it); in update_freezer_state() 278 struct cgroup_iter it; in try_to_freeze_cgroup() local 283 cgroup_iter_start(cgroup, &it); in try_to_freeze_cgroup() 284 while ((task = cgroup_iter_next(cgroup, &it))) { in try_to_freeze_cgroup() 292 cgroup_iter_end(cgroup, &it); in try_to_freeze_cgroup() 299 struct cgroup_iter it; in unfreeze_cgroup() local 302 cgroup_iter_start(cgroup, &it); in unfreeze_cgroup() [all …]
|
D | posix-timers.c | 196 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); in common_timer_create() 274 struct hrtimer *timer = &timr->it.real.timer; in schedule_next_timer() 276 if (timr->it.real.interval.tv64 == 0) in schedule_next_timer() 281 timr->it.real.interval); in schedule_next_timer() 363 timr = container_of(timer, struct k_itimer, it.real.timer); in posix_timer_fn() 366 if (timr->it.real.interval.tv64 != 0) in posix_timer_fn() 375 if (timr->it.real.interval.tv64 != 0) { in posix_timer_fn() 404 if (timr->it.real.interval.tv64 < kj.tv64) in posix_timer_fn() 410 timr->it.real.interval); in posix_timer_fn() 625 struct hrtimer *timer = &timr->it.real.timer; in common_timer_get() [all …]
|
D | cgroup.c | 1757 struct cgroup_iter *it) in cgroup_advance_iter() argument 1759 struct list_head *l = it->cg_link; in cgroup_advance_iter() 1767 it->cg_link = NULL; in cgroup_advance_iter() 1773 it->cg_link = l; in cgroup_advance_iter() 1774 it->task = cg->tasks.next; in cgroup_advance_iter() 1805 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) in cgroup_iter_start() argument 1816 it->cg_link = &cgrp->css_sets; in cgroup_iter_start() 1817 cgroup_advance_iter(cgrp, it); in cgroup_iter_start() 1821 struct cgroup_iter *it) in cgroup_iter_next() argument 1824 struct list_head *l = it->task; in cgroup_iter_next() [all …]
|
D | Kconfig.preempt | 29 low priority process to voluntarily preempt itself even if it 43 even if it is in kernel mode executing a system call and would
|
D | cpuset.c | 1046 struct cgroup_iter it; in update_tasks_nodemask() local 1078 cgroup_iter_start(cs->css.cgroup, &it); in update_tasks_nodemask() 1079 while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { in update_tasks_nodemask() 1092 cgroup_iter_end(cs->css.cgroup, &it); in update_tasks_nodemask()
|
D | compat.c | 181 struct compat_itimerval __user *it) in compat_sys_getitimer() argument 187 if (!error && put_compat_itimerval(it, &kit)) in compat_sys_getitimer()
|
/kernel/power/ |
D | Kconfig | 68 machine, reboot it and then run 102 make it wake up a few seconds later using an RTC wakeup alarm. 106 linked, ensuring that it's available when this test runs. 175 back when it should resume. 183 to the screen and notifies user-space when it should resume. 192 system and powers it off; and restores that checkpoint on reboot. 201 In principle it does not require ACPI or APM, although for example 202 ACPI will be used for the final steps when it is available. One 209 have it detect the saved image, restore memory state from it, and 266 manpage ("man 8 hdparm") for that), and it doesn't turn off
|