/kernel/time/ |
D | tick-sched.c | 54 static void tick_do_update_jiffies64(ktime_t now) in tick_do_update_jiffies64() argument 63 delta = ktime_sub(now, READ_ONCE(last_jiffies_update)); in tick_do_update_jiffies64() 70 delta = ktime_sub(now, last_jiffies_update); in tick_do_update_jiffies64() 117 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) in tick_sched_do_timer() argument 142 tick_do_update_jiffies64(now); in tick_sched_do_timer() 502 static void tick_nohz_update_jiffies(ktime_t now) in tick_nohz_update_jiffies() argument 506 __this_cpu_write(tick_cpu_sched.idle_waketime, now); in tick_nohz_update_jiffies() 509 tick_do_update_jiffies64(now); in tick_nohz_update_jiffies() 519 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) in update_ts_time_stats() argument 524 delta = ktime_sub(now, ts->idle_entrytime); in update_ts_time_stats() [all …]
|
D | timer_list.c | 23 u64 now; member 57 int idx, u64 now) in print_timer() argument 68 (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now), in print_timer() 69 (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now)); in print_timer() 74 u64 now) in print_active_timers() argument 104 print_timer(m, timer, &tmp, i, now); in print_active_timers() 112 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) in print_base() argument 127 print_active_timers(m, base, now + ktime_to_ns(base->offset)); in print_base() 130 static void print_cpu(struct seq_file *m, int cpu, u64 now) in print_cpu() argument 138 print_base(m, cpu_base->clock_base + i, now); in print_cpu() [all …]
|
D | timekeeping_internal.h | 17 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument 19 u64 ret = (now - last) & mask; in clocksource_delta() 28 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument 30 return (now - last) & mask; in clocksource_delta()
|
D | alarmtimer.c | 244 ktime_t min, now, expires; in alarmtimer_suspend() local 293 now = rtc_tm_to_ktime(tm); in alarmtimer_suspend() 294 now = ktime_add(now, min); in alarmtimer_suspend() 297 ret = rtc_timer_start(rtc, &rtctimer, now, 0); in alarmtimer_suspend() 441 u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) in alarm_forward() argument 446 delta = ktime_sub(now, alarm->node.expires); in alarm_forward() 459 if (alarm->node.expires > now) in alarm_forward() 534 ktime_t now) in alarm_handle_timer() argument 580 static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now) in alarm_timer_forward() argument 584 return alarm_forward(alarm, timr->it_interval, now); in alarm_timer_forward() [all …]
|
D | hrtimer.c | 598 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, in hrtimer_update_base() local 605 return now; in hrtimer_update_base() 918 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) in hrtimer_forward() argument 923 delta = ktime_sub(now, hrtimer_get_expires(timer)); in hrtimer_forward() 939 if (hrtimer_get_expires_tv64(timer) > now) in hrtimer_forward() 1479 struct hrtimer *timer, ktime_t *now, in __run_hrtimer() argument 1516 trace_hrtimer_expire_entry(timer, now); in __run_hrtimer() 1547 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, in __hrtimer_run_queues() argument 1557 basenow = ktime_add(now, base->offset); in __hrtimer_run_queues() 1590 ktime_t now; in hrtimer_run_softirq() local [all …]
|
D | posix-cpu-timers.c | 125 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now) in bump_cpu_timer() argument 133 if (now < expires) in bump_cpu_timer() 137 delta = now + incr - expires; in bump_cpu_timer() 706 u64 now, expires = cpu_timer_getexpires(ctmr); in posix_cpu_timer_get() local 724 now = cpu_clock_sample(clkid, p); in posix_cpu_timer_get() 744 now = cpu_clock_sample_group(clkid, p, false); in posix_cpu_timer_get() 749 if (now < expires) { in posix_cpu_timer_get() 750 itp->it_value = ns_to_timespec64(expires - now); in posix_cpu_timer_get() 764 struct list_head *firing, u64 now) in collect_timerqueue() argument 776 if (++i == MAX_COLLECTED || now < expires) in collect_timerqueue() [all …]
|
D | timekeeping.c | 221 u64 now, last, mask, max, delta; in timekeeping_get_delta() local 233 now = tk_clock_read(tkr); in timekeeping_get_delta() 239 delta = clocksource_delta(now, last, mask); in timekeeping_get_delta() 455 u64 now; in __ktime_get_fast_ns() local 460 now = ktime_to_ns(tkr->base); in __ktime_get_fast_ns() 462 now += timekeeping_delta_to_ns(tkr, in __ktime_get_fast_ns() 469 return now; in __ktime_get_fast_ns() 521 u64 now; in __ktime_get_real_fast_ns() local 526 now = ktime_to_ns(tkr->base_real); in __ktime_get_real_fast_ns() 528 now += timekeeping_delta_to_ns(tkr, in __ktime_get_real_fast_ns() [all …]
|
D | ntp.c | 500 static void sched_sync_hw_clock(struct timespec64 now, in sched_sync_hw_clock() argument 535 struct timespec64 adjust, now; in sync_rtc_clock() local 541 ktime_get_real_ts64(&now); in sync_rtc_clock() 543 adjust = now; in sync_rtc_clock() 555 sched_sync_hw_clock(now, target_nsec, rc); in sync_rtc_clock() 568 struct timespec64 now; in sync_cmos_clock() local 587 ktime_get_real_ts64(&now); in sync_cmos_clock() 588 if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) { in sync_cmos_clock() 602 sched_sync_hw_clock(now, target_nsec, rc); in sync_cmos_clock()
|
D | posix-timers.c | 361 ktime_t now = hrtimer_cb_get_time(timer); in posix_timer_fn() local 390 now = ktime_add(now, kj); in posix_timer_fn() 393 timr->it_overrun += hrtimer_forward(timer, now, in posix_timer_fn() 614 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) in common_hrtimer_remaining() argument 618 return __hrtimer_expires_remaining_adjusted(timer, now); in common_hrtimer_remaining() 621 static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) in common_hrtimer_forward() argument 625 return hrtimer_forward(timer, now, timr->it_interval); in common_hrtimer_forward() 647 ktime_t now, remaining, iv; in common_timer_get() local 671 now = timespec64_to_ktime(ts64); in common_timer_get() 678 timr->it_overrun += kc->timer_forward(timr, now); in common_timer_get() [all …]
|
D | tick-broadcast.c | 607 ktime_t now, next_event; in tick_handle_oneshot_broadcast() local 615 now = ktime_get(); in tick_handle_oneshot_broadcast() 627 if (td->evtdev->next_event <= now) { in tick_handle_oneshot_broadcast() 714 ktime_t now; in __tick_broadcast_oneshot_control() local 839 now = ktime_get(); in __tick_broadcast_oneshot_control() 840 if (dev->next_event <= now) { in __tick_broadcast_oneshot_control()
|
D | posix-timers.h | 22 s64 (*timer_forward)(struct k_itimer *timr, ktime_t now); 23 ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now);
|
D | clocksource.c | 558 u64 now, delta, nsec = 0; in clocksource_stop_suspend_timing() local 569 now = cycle_now; in clocksource_stop_suspend_timing() 571 now = suspend_clocksource->read(suspend_clocksource); in clocksource_stop_suspend_timing() 573 if (now > suspend_start) { in clocksource_stop_suspend_timing() 574 delta = clocksource_delta(now, suspend_start, in clocksource_stop_suspend_timing()
|
/kernel/sched/ |
D | psi.c | 242 u64 now, state_start; in get_recent_times() local 252 now = cpu_clock(cpu); in get_recent_times() 271 times[s] += now - state_start; in get_recent_times() 357 static u64 update_averages(struct psi_group *group, u64 now) in update_averages() argument 366 if (now - expires >= psi_period) in update_averages() 367 missed_periods = div_u64(now - expires, psi_period); in update_averages() 377 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages() 378 group->avg_last_update = now; in update_averages() 416 u64 now; in psi_avgs_work() local 423 now = sched_clock(); in psi_avgs_work() [all …]
|
D | pelt.c | 176 ___update_load_sum(u64 now, struct sched_avg *sa, in ___update_load_sum() argument 181 delta = now - sa->last_update_time; in ___update_load_sum() 187 sa->last_update_time = now; in ___update_load_sum() 266 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se() argument 268 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se() 277 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument 279 if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq, in __update_load_avg_se() 291 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument 293 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq() 317 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument [all …]
|
D | pelt.h | 4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); 5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); 6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 144 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument 150 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 156 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
|
D | wait_bit.c | 218 unsigned long now = READ_ONCE(jiffies); in bit_wait_timeout() local 220 if (time_after_eq(now, word->timeout)) in bit_wait_timeout() 222 schedule_timeout(word->timeout - now); in bit_wait_timeout() 232 unsigned long now = READ_ONCE(jiffies); in bit_wait_io_timeout() local 234 if (time_after_eq(now, word->timeout)) in bit_wait_io_timeout() 236 io_schedule_timeout(word->timeout - now); in bit_wait_io_timeout()
|
D | stats.h | 158 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_dequeued() local 162 delta = now - t->sched_info.last_queued; in sched_info_dequeued() 177 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_arrive() local 180 delta = now - t->sched_info.last_queued; in sched_info_arrive() 183 t->sched_info.last_arrival = now; in sched_info_arrive()
|
D | clock.c | 267 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local() local 271 now = sched_clock(); in sched_clock_local() 272 delta = now - scd->tick_raw; in sched_clock_local()
|
/kernel/trace/ |
D | trace_clock.c | 98 u64 now; in trace_clock_global() local 103 now = sched_clock_cpu(this_cpu); in trace_clock_global() 118 if ((s64)(now - trace_clock_struct.prev_time) < 0) in trace_clock_global() 119 now = trace_clock_struct.prev_time + 1; in trace_clock_global() 121 trace_clock_struct.prev_time = now; in trace_clock_global() 128 return now; in trace_clock_global()
|
/kernel/irq/ |
D | timings.c | 382 static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now) in __irq_timings_next_event() argument 386 if ((now - irqs->last_ts) >= NSEC_PER_SEC) { in __irq_timings_next_event() 531 u64 irq_timings_next_event(u64 now) in irq_timings_next_event() argument 578 ts = __irq_timings_next_event(irqs, i, now); in irq_timings_next_event() 579 if (ts <= now) in irq_timings_next_event() 580 return now; in irq_timings_next_event()
|
/kernel/ |
D | watchdog_hld.c | 78 ktime_t delta, now = ktime_get_mono_fast_ns(); in watchdog_check_timestamp() local 80 delta = now - __this_cpu_read(last_timestamp); in watchdog_check_timestamp() 91 __this_cpu_write(last_timestamp, now); in watchdog_check_timestamp()
|
D | watchdog.c | 313 unsigned long now = get_timestamp(); in is_softlockup() local 317 if (time_after(now, touch_ts + get_softlockup_thresh())) in is_softlockup() 318 return now - touch_ts; in is_softlockup()
|
/kernel/power/ |
D | suspend_test.c | 73 unsigned long now; in test_wakealarm() local 84 rtc_tm_to_time(&alm.time, &now); in test_wakealarm() 87 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); in test_wakealarm()
|
D | wakelock.c | 106 ktime_t now; in __wakelocks_gc() local 110 now = ktime_get(); in __wakelocks_gc() 116 idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time)); in __wakelocks_gc()
|
/kernel/events/ |
D | core.c | 620 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument 623 u64 delta = now - event->tstamp; in __perf_update_times() 636 u64 now = perf_event_time(event); in perf_event_update_time() local 638 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time() 640 event->tstamp = now; in perf_event_update_time() 716 u64 now; in __update_cgrp_time() local 718 now = perf_clock(); in __update_cgrp_time() 722 info->time += now - info->timestamp; in __update_cgrp_time() 723 info->timestamp = now; in __update_cgrp_time() 931 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument [all …]
|