Home
last modified time | relevance | path

Searched refs:runtime (Results 1 – 8 of 8) sorted by relevance

/kernel/sched/
Ddeadline.c71 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) in init_dl_bandwidth() argument
75 dl_b->dl_runtime = runtime; in init_dl_bandwidth()
358 dl_se->runtime = pi_se->dl_runtime; in setup_new_dl_entity()
394 dl_se->runtime = pi_se->dl_runtime; in replenish_dl_entity()
403 while (dl_se->runtime <= 0) { in replenish_dl_entity()
405 dl_se->runtime += pi_se->dl_runtime; in replenish_dl_entity()
420 dl_se->runtime = pi_se->dl_runtime; in replenish_dl_entity()
476 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); in dl_entity_overflow()
513 dl_se->runtime = pi_se->dl_runtime; in update_dl_entity()
708 return (dl_se->runtime <= 0); in dl_runtime_exceeded()
[all …]
Drt.c39 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) in init_rt_bandwidth() argument
42 rt_b->rt_runtime = runtime; in init_rt_bandwidth()
824 u64 runtime; in do_sched_rt_period_timer() local
829 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
830 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
831 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
923 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded() local
928 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
932 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
933 if (runtime == RUNTIME_INF) in sched_rt_runtime_exceeded()
[all …]
Dwalt.c560 u32 runtime, int samples, int event) in update_history() argument
568 if (!runtime || is_idle_task(p) || exiting_task(p) || !samples) in update_history()
582 hist[widx] = runtime; in update_history()
591 demand = runtime; in update_history()
599 demand = max(avg, runtime); in update_history()
614 trace_walt_update_history(rq, p, runtime, samples, event); in update_history()
Dfair.c1586 u64 runtime, delta, now; in numa_get_avg_runtime() local
1589 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
1592 delta = runtime - p->last_sum_exec_runtime; in numa_get_avg_runtime()
1599 p->last_sum_exec_runtime = runtime; in numa_get_avg_runtime()
1611 u64 runtime, period; in task_numa_placement() local
1622 runtime = numa_get_avg_runtime(p, &period); in task_numa_placement()
1652 f_weight = div64_u64(runtime << 16, period + 1); in task_numa_placement()
3239 cfs_b->runtime = cfs_b->quota; in __refill_cfs_bandwidth_runtime()
3282 if (cfs_b->runtime > 0) { in assign_cfs_rq_runtime()
3283 amount = min(cfs_b->runtime, min_amount); in assign_cfs_rq_runtime()
[all …]
Dcore.c2260 unsigned long to_ratio(u64 period, u64 runtime) in to_ratio() argument
2262 if (runtime == RUNTIME_INF) in to_ratio()
2273 return div64_u64(runtime << 20, period); in to_ratio()
2344 u64 runtime = attr->sched_runtime; in dl_overflow() local
2345 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; in dl_overflow()
7928 u64 period, runtime; in tg_rt_schedulable() local
7931 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
7935 runtime = d->rt_runtime; in tg_rt_schedulable()
7941 if (runtime > period && runtime != RUNTIME_INF) in tg_rt_schedulable()
7947 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
[all …]
Dsched.h200 u64 quota, runtime; member
1320 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1323 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1326 unsigned long to_ratio(u64 period, u64 runtime);
/kernel/
DKconfig.preempt49 and a slight runtime overhead to kernel code.
/kernel/trace/
DKconfig149 tracing is enabled by the administrator. If it's runtime disabled
184 disabled by default and can be runtime (re-)started
208 disabled by default and can be runtime (re-)started