/kernel/irq/ |
D | timings.c | 319 int period; in irq_timings_next_event_index() local 337 for (period = period_max; period >= PREDICTION_PERIOD_MIN; period--) { in irq_timings_next_event_index() 345 int idx = period; in irq_timings_next_event_index() 346 size_t size = period; in irq_timings_next_event_index() 367 return buffer[len % period]; in irq_timings_next_event_index() 374 if (len - idx < period) in irq_timings_next_event_index()
|
/kernel/sched/ |
D | psi.c | 287 u64 time, u64 period) in calc_avgs() argument 299 pct = div_u64(time * 100, period); in calc_avgs() 364 u64 expires, period; in update_averages() local 381 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages() 405 if (sample > period) in update_averages() 406 sample = period; in update_averages() 408 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages() 1331 u64 period = ULLONG_MAX; in psi_trigger_destroy() local 1339 period = min(period, div_u64(tmp->win.size, in psi_trigger_destroy() 1341 group->poll_min_period = period; in psi_trigger_destroy()
|
D | deadline.c | 494 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) in init_dl_bandwidth() argument 497 dl_b->dl_period = period; in init_dl_bandwidth() 2753 u64 period = global_rt_period(); in sched_dl_global_validate() local 2754 u64 new_bw = to_ratio(period, runtime); in sched_dl_global_validate() 2843 u64 period = attr->sched_period ?: attr->sched_deadline; in sched_dl_overflow() local 2845 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; in sched_dl_overflow() 2942 u64 period, max, min; in __checkparam_dl() local 2967 period = attr->sched_period; in __checkparam_dl() 2968 if (!period) in __checkparam_dl() 2969 period = attr->sched_deadline; in __checkparam_dl() [all …]
|
D | core.c | 4846 unsigned long to_ratio(u64 period, u64 runtime) in to_ratio() argument 4856 if (period == 0) in to_ratio() 4859 return div64_u64(runtime << BW_SHIFT, period); in to_ratio() 10814 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 10816 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, in tg_set_cfs_bandwidth() argument 10830 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) in tg_set_cfs_bandwidth() 10838 if (period > max_cfs_quota_period) in tg_set_cfs_bandwidth() 10857 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth() 10870 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth() 10906 u64 quota, period, burst; in tg_set_cfs_quota() local [all …]
|
D | rt.c | 96 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) in init_rt_bandwidth() argument 98 rt_b->rt_period = ns_to_ktime(period); in init_rt_bandwidth() 2870 u64 period, runtime; in tg_rt_schedulable() local 2872 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable() 2876 period = d->rt_period; in tg_rt_schedulable() 2883 if (runtime > period && runtime != RUNTIME_INF) in tg_rt_schedulable() 2893 total = to_ratio(period, runtime); in tg_rt_schedulable() 2905 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable() 2909 period = d->rt_period; in tg_rt_schedulable() 2913 sum += to_ratio(period, runtime); in tg_rt_schedulable() [all …]
|
D | fair.c | 1190 unsigned long period = smin; in task_scan_start() local 1200 period *= refcount_read(&ng->refcount); in task_scan_start() 1201 period *= shared + 1; in task_scan_start() 1202 period /= private + shared + 1; in task_scan_start() 1206 return max(smin, period); in task_scan_start() 1223 unsigned long period = smax; in task_scan_max() local 1225 period *= refcount_read(&ng->refcount); in task_scan_max() 1226 period *= shared + 1; in task_scan_max() 1227 period /= private + shared + 1; in task_scan_max() 1229 smax = max(smax, period); in task_scan_max() [all …]
|
D | sched.h | 348 ktime_t period; member 2404 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2407 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 2416 unsigned long to_ratio(u64 period, u64 runtime);
|
/kernel/rcu/ |
D | Kconfig.debug | 80 If a given RCU grace period extends more than the specified 82 RCU grace period persists, additional CPU stall warnings are 91 If a given expedited RCU grace period extends more than the 93 If the RCU grace period persists, additional CPU stall warnings
|
D | Kconfig | 181 number of cache misses incurred during RCU's grace-period 205 block the current preemptible RCU grace period for too long. 213 int "Milliseconds to delay boosting after RCU grace-period start" 219 a given grace period before priority-boosting preempted RCU 220 readers blocking that grace period. Note that any RCU reader 221 blocking an expedited RCU grace period is boosted immediately.
|
/kernel/ |
D | watchdog_hld.c | 50 void watchdog_update_hrtimer_threshold(u64 period) in watchdog_update_hrtimer_threshold() argument 73 watchdog_hrtimer_sample_threshold = period * 2; in watchdog_update_hrtimer_threshold()
|
/kernel/time/ |
D | tick-sched.c | 160 ktime_t period; in tick_init_jiffy_update() local 178 period = last_jiffies_update; in tick_init_jiffy_update() 181 return period; in tick_init_jiffy_update()
|
/kernel/events/ |
D | core.c | 1857 size += sizeof(data->period); in __perf_event_header_size() 4067 s64 period, sample_period; in perf_adjust_period() local 4070 period = perf_calculate_period(event, nsec, count); in perf_adjust_period() 4072 delta = (s64)(period - hwc->sample_period); in perf_adjust_period() 4103 u64 now, period = TICK_NSEC; in perf_adjust_freq_unthr_context() local 4154 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context() 7137 perf_output_put(handle, data->period); in perf_output_sample() 9451 u64 period = hwc->last_period; in perf_swevent_set_period() local 9462 nr = div64_u64(period + val, period); in perf_swevent_set_period() 9463 offset = nr * period; in perf_swevent_set_period() [all …]
|
/kernel/power/ |
D | Kconfig | 188 (low power) states, for example after a specified period of inactivity
|
/kernel/trace/ |
D | bpf_trace.c | 2089 bpf_target_off(struct perf_sample_data, period, 8, in pe_prog_convert_ctx_access()
|
D | Kconfig | 434 interference. At the end of the period, the osnoise tracer prints
|