Home
last modified time | relevance | path

Searched refs:running (Results 1 – 12 of 12) sorted by relevance

/kernel/sched/
Dpelt.c108 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument
150 if (running) in accumulate_sum()
185 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument
223 runnable = running = 0; in ___update_load_sum()
232 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum()
354 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
357 running, in update_rt_rq_load_avg()
358 running, in update_rt_rq_load_avg()
359 running)) { in update_rt_rq_load_avg()
380 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
[all …]
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
175 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
181 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
198 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
Dcore.c2544 bool queued, running; in __do_set_cpus_allowed() local
2564 running = task_current(rq, p); in __do_set_cpus_allowed()
2574 if (running) in __do_set_cpus_allowed()
2581 if (running) in __do_set_cpus_allowed()
3305 int running, queued; in wait_task_inactive() local
3343 running = task_running(rq, p); in wait_task_inactive()
3362 if (unlikely(running)) { in wait_task_inactive()
6987 int prio, oldprio, queued, running, queue_flag = in rt_mutex_setprio() local
7049 running = task_current(rq, p); in rt_mutex_setprio()
7052 if (running) in rt_mutex_setprio()
[all …]
Dfair.c9453 allow_numa_imbalance(unsigned int running, unsigned int weight) in allow_numa_imbalance() argument
9455 return (running < (weight >> 2)); in allow_numa_imbalance()
/kernel/time/
DKconfig96 This option keeps the tick running periodically at a constant
124 the CPU is running tasks. Typically this requires running a single
125 task on the CPU. Chances for running tickless are maximized when
Dhrtimer.c1614 base->running == timer) in hrtimer_active()
1654 base->running = timer; in __run_hrtimer()
1713 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1714 base->running = NULL; in __run_hrtimer()
/kernel/rcu/
DKconfig.debug38 after the fact on the running kernel to be tested, if desired.
57 after the fact on the running kernel to be tested, if desired.
76 The kernel module may be built after the fact on the running kernel to be
DKconfig251 from running on the specified CPUs, but (1) the kthreads may be
/kernel/events/
Dcore.c630 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
639 *running = event->total_time_running; in __perf_update_times()
641 *running += delta; in __perf_update_times()
4551 u64 *running) in calc_timer_values() argument
4557 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4569 u64 *enabled, u64 *running) in perf_event_read_local() argument
4618 if (enabled || running) { in perf_event_read_local()
4624 if (running) in perf_event_read_local()
4625 *running = __running; in perf_event_read_local()
5345 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
[all …]
/kernel/trace/
Dtrace_osnoise.c1749 int running, err; in osnoise_cpus_write() local
1770 running = osnoise_busy; in osnoise_cpus_write()
1771 if (running) in osnoise_cpus_write()
1785 if (running) in osnoise_cpus_write()
DKconfig362 When the tracer is not running, it has no affect on the system,
363 but when it is running, it can cause the system to be
383 The osnoise tracer leverages the hwlat_detector by running a similar
569 events into a running trace buffer to see when and where the
809 affected by processes that are running.
837 increase the memory footprint of the running kernel.
994 tracer is running on, specify cpu_affinity=cpu_num at the end of the
1043 running histograms.
Dbpf_trace.c494 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter() argument
512 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter()
546 &buf->running); in BPF_CALL_4()
1319 &buf->running); in BPF_CALL_3()