/kernel/ |
D | hung_task.c | 374 unsigned long interval = sysctl_hung_task_check_interval_secs; in watchdog() local 377 if (interval == 0) in watchdog() 378 interval = timeout; in watchdog() 379 interval = min_t(unsigned long, interval, timeout); in watchdog() 380 t = hung_timeout_jiffies(hung_last_checked, interval); in watchdog()
|
/kernel/irq/ |
D | timings.c | 435 static __always_inline int irq_timings_interval_index(u64 interval) in irq_timings_interval_index() argument 441 u64 interval_us = (interval >> 10) / PREDICTION_FACTOR; in irq_timings_interval_index() 447 u64 interval) in __irq_timings_store() argument 454 index = irq_timings_interval_index(interval); in __irq_timings_store() 467 irqs->ema_time[index] = irq_timings_ema_new(interval, in __irq_timings_store() 476 u64 interval; in irq_timings_store() local 489 interval = ts - old_ts; in irq_timings_store() 502 if (interval >= NSEC_PER_SEC) { in irq_timings_store() 507 __irq_timings_store(irq, irqs, interval); in irq_timings_store()
|
/kernel/time/ |
D | alarmtimer.c | 441 u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) in alarm_forward() argument 451 if (unlikely(delta >= interval)) { in alarm_forward() 452 s64 incr = ktime_to_ns(interval); in alarm_forward() 468 alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); in alarm_forward() 473 static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle) in __alarm_forward_now() argument 492 if (interval < kj) in __alarm_forward_now() 496 return alarm_forward(alarm, now, interval); in __alarm_forward_now() 499 u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) in alarm_forward_now() argument 501 return __alarm_forward_now(alarm, interval, false); in alarm_forward_now()
|
D | timekeeping.c | 311 u64 interval; in tk_setup_internals() local 334 interval = (u64) tmp; in tk_setup_internals() 335 tk->cycle_interval = interval; in tk_setup_internals() 338 tk->xtime_interval = interval * clock->mult; in tk_setup_internals() 340 tk->raw_interval = interval * clock->mult; in tk_setup_internals() 1937 s64 interval = tk->cycle_interval; in timekeeping_apply_adjustment() local 1942 interval = -interval; in timekeeping_apply_adjustment() 1945 interval *= mult_adj; in timekeeping_apply_adjustment() 2003 tk->xtime_interval += interval; in timekeeping_apply_adjustment() 2124 u64 interval = tk->cycle_interval << shift; in logarithmic_accumulation() local [all …]
|
D | itimer.c | 50 u64 val, interval; in get_cpu_itimer() local 56 interval = it->incr; in get_cpu_itimer() 73 value->it_interval = ns_to_timespec64(interval); in get_cpu_itimer()
|
D | clocksource.c | 401 int64_t wd_nsec, cs_nsec, interval; in clocksource_watchdog() local 481 interval = max(cs_nsec, wd_nsec); in clocksource_watchdog() 482 if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) { in clocksource_watchdog() 484 interval > 2 * watchdog_max_interval) { in clocksource_watchdog() 485 watchdog_max_interval = interval; in clocksource_watchdog()
|
D | hrtimer.c | 1037 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) in hrtimer_forward() argument 1050 if (interval < hrtimer_resolution) in hrtimer_forward() 1051 interval = hrtimer_resolution; in hrtimer_forward() 1053 if (unlikely(delta >= interval)) { in hrtimer_forward() 1054 s64 incr = ktime_to_ns(interval); in hrtimer_forward() 1066 hrtimer_add_expires(timer, interval); in hrtimer_forward()
|
/kernel/trace/ |
D | trace_hwlat.c | 362 u64 interval; in kthread_fn() local 374 interval = hwlat_data.sample_window - hwlat_data.sample_width; in kthread_fn() 377 do_div(interval, USEC_PER_MSEC); /* modifies interval value */ in kthread_fn() 380 if (interval < 1) in kthread_fn() 381 interval = 1; in kthread_fn() 383 if (msleep_interruptible(interval)) in kthread_fn()
|
D | trace_osnoise.c | 1465 u64 interval; in osnoise_sleep() local 1469 interval = osnoise_data.sample_period - osnoise_data.sample_runtime; in osnoise_sleep() 1476 if (!interval) { in osnoise_sleep() 1482 wake_time = ktime_add_us(ktime_get(), interval); in osnoise_sleep()
|
D | Kconfig | 861 10 seconds. Each interval it will print out the number of events
|
/kernel/printk/ |
D | sysctl.c | 33 .data = &printk_ratelimit_state.interval,
|
/kernel/sched/ |
D | fair.c | 2300 unsigned long interval = HZ; in numa_migrate_preferred() local 2307 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); in numa_migrate_preferred() 2308 p->numa_migrate_retry = jiffies + interval; in numa_migrate_preferred() 9053 unsigned long interval; in update_group_capacity() local 9055 interval = msecs_to_jiffies(sd->balance_interval); in update_group_capacity() 9056 interval = clamp(interval, 1UL, max_load_balance_interval); in update_group_capacity() 9057 sdg->sgc->next_update = jiffies + interval; in update_group_capacity() 10919 unsigned long interval = sd->balance_interval; in get_sd_balance_interval() local 10922 interval *= sd->busy_factor; in get_sd_balance_interval() 10925 interval = msecs_to_jiffies(interval); in get_sd_balance_interval() [all …]
|
D | core.c | 9090 struct __kernel_timespec __user *, interval) in SYSCALL_DEFINE2() 9096 retval = put_timespec64(&t, interval); in SYSCALL_DEFINE2() 9103 struct old_timespec32 __user *, interval) in SYSCALL_DEFINE2() 9109 retval = put_old_timespec32(&t, interval); in SYSCALL_DEFINE2()
|
/kernel/events/ |
D | core.c | 1094 u64 interval; in __perf_mux_hrtimer_init() local 1104 interval = pmu->hrtimer_interval_ms; in __perf_mux_hrtimer_init() 1105 if (interval < 1) in __perf_mux_hrtimer_init() 1106 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; in __perf_mux_hrtimer_init() 1108 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); in __perf_mux_hrtimer_init()
|