Home
last modified time | relevance | path

Searched refs:period (Results 1 – 14 of 14) sorted by relevance

/kernel/irq/
Dtimings.c319 int period; in irq_timings_next_event_index() local
337 for (period = period_max; period >= PREDICTION_PERIOD_MIN; period--) { in irq_timings_next_event_index()
345 int idx = period; in irq_timings_next_event_index()
346 size_t size = period; in irq_timings_next_event_index()
367 return buffer[len % period]; in irq_timings_next_event_index()
374 if (len - idx < period) in irq_timings_next_event_index()
/kernel/sched/
Dpsi.c283 u64 time, u64 period) in calc_avgs() argument
295 pct = div_u64(time * 100, period); in calc_avgs()
360 u64 expires, period; in update_averages() local
377 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages()
401 if (sample > period) in update_averages()
402 sample = period; in update_averages()
404 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages()
1101 u64 period = ULLONG_MAX; in psi_trigger_destroy() local
1109 period = min(period, div_u64(tmp->win.size, in psi_trigger_destroy()
1111 group->poll_min_period = period; in psi_trigger_destroy()
Dcore.c2921 unsigned long to_ratio(u64 period, u64 runtime) in to_ratio() argument
2931 if (period == 0) in to_ratio()
2934 return div64_u64(runtime << BW_SHIFT, period); in to_ratio()
7388 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7390 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) in tg_set_cfs_bandwidth() argument
7403 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) in tg_set_cfs_bandwidth()
7411 if (period > max_cfs_quota_period) in tg_set_cfs_bandwidth()
7420 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
7433 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
7468 u64 quota, period; in tg_set_cfs_quota() local
[all …]
Drt.c41 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) in init_rt_bandwidth() argument
43 rt_b->rt_period = ns_to_ktime(period); in init_rt_bandwidth()
2485 u64 period, runtime; in tg_rt_schedulable() local
2487 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
2491 period = d->rt_period; in tg_rt_schedulable()
2498 if (runtime > period && runtime != RUNTIME_INF) in tg_rt_schedulable()
2507 total = to_ratio(period, runtime); in tg_rt_schedulable()
2519 period = ktime_to_ns(child->rt_bandwidth.rt_period); in tg_rt_schedulable()
2523 period = d->rt_period; in tg_rt_schedulable()
2527 sum += to_ratio(period, runtime); in tg_rt_schedulable()
[all …]
Dfair.c1145 unsigned long period = smin; in task_scan_start() local
1155 period *= refcount_read(&ng->refcount); in task_scan_start()
1156 period *= shared + 1; in task_scan_start()
1157 period /= private + shared + 1; in task_scan_start()
1161 return max(smin, period); in task_scan_start()
1178 unsigned long period = smax; in task_scan_max() local
1180 period *= refcount_read(&ng->refcount); in task_scan_max()
1181 period *= shared + 1; in task_scan_max()
1182 period /= private + shared + 1; in task_scan_max()
1184 smax = max(smax, period); in task_scan_max()
[all …]
Ddeadline.c337 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) in init_dl_bandwidth() argument
340 dl_b->dl_period = period; in init_dl_bandwidth()
2469 u64 period = global_rt_period(); in sched_dl_global_validate() local
2470 u64 new_bw = to_ratio(period, runtime); in sched_dl_global_validate()
2556 u64 period = attr->sched_period ?: attr->sched_deadline; in sched_dl_overflow() local
2558 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; in sched_dl_overflow()
Dsched.h334 ktime_t period; member
1885 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1888 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1896 unsigned long to_ratio(u64 period, u64 runtime);
/kernel/rcu/
DKconfig126 number of cache misses incurred during RCU's grace-period
158 don't care about increased grace-period durations.
168 block the current preemptible RCU grace period for too long.
176 int "Milliseconds to delay boosting after RCU grace-period start"
182 a given grace period before priority-boosting preempted RCU
183 readers blocking that grace period. Note that any RCU reader
184 blocking an expedited RCU grace period is boosted immediately.
DKconfig.debug66 If a given RCU grace period extends more than the specified
68 RCU grace period persists, additional CPU stall warnings are
/kernel/
Dwatchdog_hld.c50 void watchdog_update_hrtimer_threshold(u64 period) in watchdog_update_hrtimer_threshold() argument
73 watchdog_hrtimer_sample_threshold = period * 2; in watchdog_update_hrtimer_threshold()
/kernel/time/
Dtick-sched.c106 ktime_t period; in tick_init_jiffy_update() local
112 period = last_jiffies_update; in tick_init_jiffy_update()
114 return period; in tick_init_jiffy_update()
/kernel/events/
Dcore.c1739 size += sizeof(data->period); in __perf_event_header_size()
3668 s64 period, sample_period; in perf_adjust_period() local
3671 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
3673 delta = (s64)(period - hwc->sample_period); in perf_adjust_period()
3704 u64 now, period = TICK_NSEC; in perf_adjust_freq_unthr_context() local
3755 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
6396 perf_output_put(handle, data->period); in perf_output_sample()
8246 u64 period = hwc->last_period; in perf_swevent_set_period() local
8257 nr = div64_u64(period + val, period); in perf_swevent_set_period()
8258 offset = nr * period; in perf_swevent_set_period()
[all …]
/kernel/power/
DKconfig158 (low power) states, for example after a specified period of inactivity
/kernel/trace/
Dbpf_trace.c1158 bpf_target_off(struct perf_sample_data, period, 8, in pe_prog_convert_ctx_access()