Home
last modified time | relevance | path

Searched refs:running (Results 1 – 14 of 14) sorted by relevance

/kernel/sched/
Dpelt.c105 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument
147 if (running) in accumulate_sum()
183 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument
221 runnable = running = 0; in ___update_load_sum()
230 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum()
353 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
356 running, in update_rt_rq_load_avg()
357 running, in update_rt_rq_load_avg()
358 running)) { in update_rt_rq_load_avg()
379 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
[all …]
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
222 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
228 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
245 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
Dcore.c2299 int running, queued, match; in wait_task_inactive() local
2337 running = task_on_cpu(rq, p); in wait_task_inactive()
2363 if (unlikely(running)) { in wait_task_inactive()
2773 bool queued, running; in __do_set_cpus_allowed() local
2793 running = task_current(rq, p); in __do_set_cpus_allowed()
2803 if (running) in __do_set_cpus_allowed()
2810 if (running) in __do_set_cpus_allowed()
7099 int prio, oldprio, queued, running, queue_flag = in rt_mutex_setprio() local
7163 running = task_current(rq, p); in rt_mutex_setprio()
7166 if (running) in rt_mutex_setprio()
[all …]
/kernel/time/
DKconfig105 This option keeps the tick running periodically at a constant
133 the CPU is running tasks. Typically this requires running a single
134 task on the CPU. Chances for running tickless are maximized when
Dhrtimer.c1614 base->running == timer) in hrtimer_active()
1654 base->running = timer; in __run_hrtimer()
1713 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1714 base->running = NULL; in __run_hrtimer()
/kernel/rcu/
DKconfig.debug35 after the fact on the running kernel to be tested, if desired.
51 after the fact on the running kernel to be tested, if desired.
67 The kernel module may be built after the fact on the running kernel to be
DKconfig259 from running on the specified CPUs, but (1) the kthreads may be
287 Of course, running as SCHED_FIFO during callback floods will
/kernel/configs/
Ddebug.config1 # The config is based on running daily CI for enterprise Linux distros to
/kernel/events/
Dcore.c629 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
638 *running = event->total_time_running; in __perf_update_times()
640 *running += delta; in __perf_update_times()
4470 u64 *running) in calc_timer_values() argument
4476 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4488 u64 *enabled, u64 *running) in perf_event_read_local() argument
4537 if (enabled || running) { in perf_event_read_local()
4543 if (running) in perf_event_read_local()
4544 *running = __running; in perf_event_read_local()
5264 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
[all …]
/kernel/trace/
Dtrace_osnoise.c1928 int running, err; in osnoise_cpus_write() local
1948 running = osnoise_has_registered_instances(); in osnoise_cpus_write()
1949 if (running) in osnoise_cpus_write()
1963 if (running) in osnoise_cpus_write()
DKconfig402 When the tracer is not running, it has no affect on the system,
403 but when it is running, it can cause the system to be
423 The osnoise tracer leverages the hwlat_detector by running a similar
609 events into a running trace buffer to see when and where the
865 affected by processes that are running.
893 increase the memory footprint of the running kernel.
1064 tracer is running on, specify cpu_affinity=cpu_num at the end of the
1113 running histograms.
Dbpf_trace.c549 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter() argument
567 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter()
601 &buf->running); in BPF_CALL_4()
1702 &buf->running); in BPF_CALL_3()
/kernel/trace/rv/
DKconfig46 Enable wwnr (wakeup while not running) sample monitor, this is a
/kernel/module/
DKconfig7 be inserted in the running kernel, rather than being
69 make them incompatible with the kernel you are running. If