Searched refs:runtime (Results 1 – 14 of 14) sorted by relevance
/kernel/trace/rv/ |
D | Kconfig | 18 Enable the kernel runtime verification infrastructure. RV is a 26 Documentation/trace/rv/runtime-verification.rst 58 Enables the online runtime verification reactors. A runtime
|
/kernel/sched/ |
D | rt.c | 96 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) in init_rt_bandwidth() argument 99 rt_b->rt_runtime = runtime; in init_rt_bandwidth() 949 u64 runtime; in do_sched_rt_period_timer() local 954 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer() 955 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer() 956 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer() 1006 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded() local 1011 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded() 1015 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded() 1016 if (runtime == RUNTIME_INF) in sched_rt_runtime_exceeded() [all …]
|
D | deadline.c | 413 div64_long((dl_se->runtime * dl_se->dl_period), in task_non_contending() 494 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) in init_dl_bandwidth() argument 498 dl_b->dl_runtime = runtime; in init_dl_bandwidth() 780 dl_se->runtime = pi_of(dl_se)->dl_runtime; in replenish_dl_new_period() 851 if (dl_se->dl_yielded && dl_se->runtime > 0) in replenish_dl_entity() 852 dl_se->runtime = 0; in replenish_dl_entity() 860 while (dl_se->runtime <= 0) { in replenish_dl_entity() 862 dl_se->runtime += pi_of(dl_se)->dl_runtime; in replenish_dl_entity() 931 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); in dl_entity_overflow() 969 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; in update_dl_revised_wakeup() [all …]
|
D | fair.c | 2441 u64 runtime, delta, now; in numa_get_avg_runtime() local 2444 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime() 2447 delta = runtime - p->last_sum_exec_runtime; in numa_get_avg_runtime() 2458 p->last_sum_exec_runtime = runtime; in numa_get_avg_runtime() 2558 u64 runtime, period; in task_numa_placement() local 2575 runtime = numa_get_avg_runtime(p, &period); in task_numa_placement() 2611 f_weight = div64_u64(runtime << 16, period + 1); in task_numa_placement() 2940 u64 runtime = p->se.sum_exec_runtime; in task_numa_work() local 3075 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work() 3076 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work() [all …]
|
D | sched.h | 350 u64 runtime; member 2404 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2407 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 2416 unsigned long to_ratio(u64 period, u64 runtime);
|
D | debug.c | 1049 P(dl.runtime); in proc_sched_show_task()
|
D | core.c | 4846 unsigned long to_ratio(u64 period, u64 runtime) in to_ratio() argument 4848 if (runtime == RUNTIME_INF) in to_ratio() 4859 return div64_u64(runtime << BW_SHIFT, period); in to_ratio() 10814 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
|
/kernel/ |
D | Kconfig.preempt | 64 and a slight runtime overhead to kernel code. 111 The runtime overhead is negligible with HAVE_STATIC_CALL_INLINE enabled 112 but if runtime patching is not available for the specific architecture
|
/kernel/trace/ |
D | trace_osnoise.c | 288 u64 runtime; /* runtime */ member 481 entry->runtime = sample->runtime; in __trace_osnoise_sample() 1298 u64 runtime, stop_in; in run_osnoise() local 1328 runtime = osnoise_data.sample_runtime * NSEC_PER_USEC; in run_osnoise() 1419 } while (total < runtime && !kthread_should_stop()); in run_osnoise() 1437 s.runtime = time_to_us(total); in run_osnoise()
|
D | trace_output.c | 1254 net_runtime = field->runtime - field->noise; in trace_osnoise_print() 1256 do_div(ratio, field->runtime); in trace_osnoise_print() 1260 field->runtime, in trace_osnoise_print() 1286 field->runtime, in trace_osnoise_raw()
|
D | trace_entries.h | 370 __field( u64, runtime )
|
D | Kconfig | 201 tracing is enabled by the administrator. If it's runtime disabled 332 disabled by default and can be runtime (re-)started 356 disabled by default and can be runtime (re-)started 920 size at runtime.
|
/kernel/livepatch/ |
D | Kconfig | 17 This option has no runtime impact until a kernel "patch"
|
/kernel/module/ |
D | Kconfig | 282 at runtime via the sysctl file
|