/kernel/sched/ |
D | pelt.c | 108 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 150 if (running) in accumulate_sum() 185 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 223 runnable = running = 0; in ___update_load_sum() 232 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum() 354 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 357 running, in update_rt_rq_load_avg() 358 running, in update_rt_rq_load_avg() 359 running)) { in update_rt_rq_load_avg() 380 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument [all …]
|
D | pelt.h | 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 31 int update_irq_load_avg(struct rq *rq, u64 running); 34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument 175 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 181 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument 198 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
|
D | core.c | 2544 bool queued, running; in __do_set_cpus_allowed() local 2564 running = task_current(rq, p); in __do_set_cpus_allowed() 2574 if (running) in __do_set_cpus_allowed() 2581 if (running) in __do_set_cpus_allowed() 3305 int running, queued; in wait_task_inactive() local 3343 running = task_running(rq, p); in wait_task_inactive() 3362 if (unlikely(running)) { in wait_task_inactive() 6987 int prio, oldprio, queued, running, queue_flag = in rt_mutex_setprio() local 7049 running = task_current(rq, p); in rt_mutex_setprio() 7052 if (running) in rt_mutex_setprio() [all …]
|
D | fair.c | 9453 allow_numa_imbalance(unsigned int running, unsigned int weight) in allow_numa_imbalance() argument 9455 return (running < (weight >> 2)); in allow_numa_imbalance()
|
/kernel/time/ |
D | Kconfig | 96 This option keeps the tick running periodically at a constant 124 the CPU is running tasks. Typically this requires running a single 125 task on the CPU. Chances for running tickless are maximized when
|
D | hrtimer.c | 1614 base->running == timer) in hrtimer_active() 1654 base->running = timer; in __run_hrtimer() 1713 WARN_ON_ONCE(base->running != timer); in __run_hrtimer() 1714 base->running = NULL; in __run_hrtimer()
|
/kernel/rcu/ |
D | Kconfig.debug | 38 after the fact on the running kernel to be tested, if desired. 57 after the fact on the running kernel to be tested, if desired. 76 The kernel module may be built after the fact on the running kernel to be
|
D | Kconfig | 251 from running on the specified CPUs, but (1) the kthreads may be
|
/kernel/events/ |
D | core.c | 630 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument 639 *running = event->total_time_running; in __perf_update_times() 641 *running += delta; in __perf_update_times() 4551 u64 *running) in calc_timer_values() argument 4557 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values() 4569 u64 *enabled, u64 *running) in perf_event_read_local() argument 4618 if (enabled || running) { in perf_event_read_local() 4624 if (running) in perf_event_read_local() 4625 *running = __running; in perf_event_read_local() 5345 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument [all …]
|
/kernel/trace/ |
D | trace_osnoise.c | 1749 int running, err; in osnoise_cpus_write() local 1770 running = osnoise_busy; in osnoise_cpus_write() 1771 if (running) in osnoise_cpus_write() 1785 if (running) in osnoise_cpus_write()
|
D | Kconfig | 362 When the tracer is not running, it has no affect on the system, 363 but when it is running, it can cause the system to be 383 The osnoise tracer leverages the hwlat_detector by running a similar 569 events into a running trace buffer to see when and where the 809 affected by processes that are running. 837 increase the memory footprint of the running kernel. 994 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1043 running histograms.
|
D | bpf_trace.c | 494 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter() argument 512 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter() 546 &buf->running); in BPF_CALL_4() 1319 &buf->running); in BPF_CALL_3()
|