Home
last modified time | relevance | path

Searched refs:running (Results 1 – 10 of 10) sorted by relevance

/kernel/sched/
Dpelt.c166 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument
208 if (running) in accumulate_sum()
244 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument
280 runnable = running = 0; in ___update_load_sum()
289 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum()
410 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
413 running, in update_rt_rq_load_avg()
414 running, in update_rt_rq_load_avg()
415 running)) { in update_rt_rq_load_avg()
436 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
[all …]
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
175 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
181 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
198 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
Dcore.c1952 bool queued, running; in do_set_cpus_allowed() local
1957 running = task_current(rq, p); in do_set_cpus_allowed()
1967 if (running) in do_set_cpus_allowed()
1974 if (running) in do_set_cpus_allowed()
2343 int running, queued; in wait_task_inactive() local
2381 running = task_running(rq, p); in wait_task_inactive()
2400 if (unlikely(running)) { in wait_task_inactive()
5117 int prio, oldprio, queued, running, queue_flag = in rt_mutex_setprio() local
5179 running = task_current(rq, p); in rt_mutex_setprio()
5182 if (running) in rt_mutex_setprio()
[all …]
/kernel/time/
DKconfig85 This option keeps the tick running periodically at a constant
115 the CPU is running tasks. Typically this requires running a single
116 task on the CPU. Chances for running tickless are maximized when
Dhrtimer.c1512 base->running == timer) in hrtimer_active()
1552 base->running = timer; in __run_hrtimer()
1611 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1612 base->running = NULL; in __run_hrtimer()
/kernel/rcu/
DKconfig.debug38 after the fact on the running kernel to be tested, if desired.
57 after the fact on the running kernel to be tested, if desired.
76 The kernel module may be built after the fact on the running kernel to be
DKconfig232 (!PREEMPTION kernels). Nothing prevents this kthread from running
/kernel/events/
Dcore.c628 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
637 *running = event->total_time_running; in __perf_update_times()
639 *running += delta; in __perf_update_times()
4359 u64 *enabled, u64 *running) in perf_event_read_local() argument
4408 if (enabled || running) { in perf_event_read_local()
4415 if (running) in perf_event_read_local()
4416 *running = __running; in perf_event_read_local()
5138 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5144 *running = 0; in __perf_event_read_value()
5153 *running += event->total_time_running + in __perf_event_read_value()
[all …]
/kernel/trace/
DKconfig356 When the tracer is not running, it has no affect on the system,
357 but when it is running, it can cause the system to be
500 events into a running trace buffer to see when and where the
740 affected by processes that are running.
768 increase the memory footprint of the running kernel.
911 running histograms.
Dbpf_trace.c805 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter() argument
823 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter()
857 &buf->running); in BPF_CALL_4()
1517 &buf->running); in BPF_CALL_3()