/kernel/sched/ |
D | pelt.c | 111 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 141 if (running) in accumulate_sum() 177 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 211 runnable = running = 0; in ___update_load_sum() 220 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum() 317 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 320 running, in update_rt_rq_load_avg() 321 running, in update_rt_rq_load_avg() 322 running)) { in update_rt_rq_load_avg() 341 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument [all …]
|
D | pelt.h | 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 11 int update_irq_load_avg(struct rq *rq, u64 running); 14 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument 150 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 156 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument 162 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
|
D | core.c | 1705 bool queued, running; in do_set_cpus_allowed() local 1710 running = task_current(rq, p); in do_set_cpus_allowed() 1720 if (running) in do_set_cpus_allowed() 1727 if (running) in do_set_cpus_allowed() 2005 int running, queued; in wait_task_inactive() local 2043 running = task_running(rq, p); in wait_task_inactive() 2062 if (unlikely(running)) { in wait_task_inactive() 4501 int prio, oldprio, queued, running, queue_flag = in rt_mutex_setprio() local 4563 running = task_current(rq, p); in rt_mutex_setprio() 4566 if (running) in rt_mutex_setprio() [all …]
|
/kernel/time/ |
D | Kconfig | 76 This option keeps the tick running periodically at a constant 106 the CPU is running tasks. Typically this requires running a single 107 task on the CPU. Chances for running tickless are maximized when
|
D | hrtimer.c | 1513 base->running == timer) in hrtimer_active() 1552 base->running = timer; in __run_hrtimer() 1607 WARN_ON_ONCE(base->running != timer); in __run_hrtimer() 1608 base->running = NULL; in __run_hrtimer()
|
/kernel/rcu/ |
D | Kconfig.debug | 36 after the fact on the running kernel to be tested, if desired. 53 after the fact on the running kernel to be tested, if desired.
|
D | Kconfig | 204 (!PREEMPT kernels). Nothing prevents this kthread from running
|
/kernel/events/ |
D | core.c | 626 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument 635 *running = event->total_time_running; in __perf_update_times() 637 *running += delta; in __perf_update_times() 4057 u64 *enabled, u64 *running) in perf_event_read_local() argument 4106 if (enabled || running) { in perf_event_read_local() 4113 if (running) in perf_event_read_local() 4114 *running = __running; in perf_event_read_local() 4831 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument 4837 *running = 0; in __perf_event_read_value() 4846 *running += event->total_time_running + in __perf_event_read_value() [all …]
|
/kernel/trace/ |
D | bpf_trace.c | 348 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter() argument 366 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter() 400 &buf->running); in BPF_CALL_4() 908 &buf->running); in BPF_CALL_3()
|
D | Kconfig | 273 When the tracer is not running, it has no affect on the system, 274 but when it is running, it can cause the system to be 404 events into a running trace buffer to see when and where the 717 affected by processes that are running. 783 increase the memory footprint of the running kernel.
|