/kernel/time/ |
D | tick-sched.c | 117 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) in tick_sched_do_timer() argument 144 if (ts->inidle) in tick_sched_do_timer() 145 ts->got_idle_tick = 1; in tick_sched_do_timer() 148 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) in tick_sched_handle() argument 159 if (ts->tick_stopped) { in tick_sched_handle() 162 ts->idle_jiffies++; in tick_sched_handle() 168 ts->next_tick = 0; in tick_sched_handle() 208 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) in can_stop_full_tick() argument 218 if (check_tick_dependency(&ts->tick_dep_mask)) in can_stop_full_tick() 313 struct tick_sched *ts; in tick_nohz_dep_set_cpu() local [all …]
|
D | time.c | 144 struct timespec64 ts; in SYSCALL_DEFINE2() local 146 ktime_get_real_ts64(&ts); in SYSCALL_DEFINE2() 147 if (put_user(ts.tv_sec, &tv->tv_sec) || in SYSCALL_DEFINE2() 148 put_user(ts.tv_nsec / 1000, &tv->tv_usec)) in SYSCALL_DEFINE2() 229 struct timespec64 ts; in COMPAT_SYSCALL_DEFINE2() local 231 ktime_get_real_ts64(&ts); in COMPAT_SYSCALL_DEFINE2() 232 if (put_user(ts.tv_sec, &tv->tv_sec) || in COMPAT_SYSCALL_DEFINE2() 233 put_user(ts.tv_nsec / 1000, &tv->tv_usec)) in COMPAT_SYSCALL_DEFINE2() 462 struct timespec ts; in ns_to_timespec() local 468 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); in ns_to_timespec() [all …]
|
D | timekeeping.c | 109 struct timespec64 ts; in tk_xtime() local 111 ts.tv_sec = tk->xtime_sec; in tk_xtime() 112 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_xtime() 113 return ts; in tk_xtime() 116 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) in tk_set_xtime() argument 118 tk->xtime_sec = ts->tv_sec; in tk_set_xtime() 119 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime() 122 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) in tk_xtime_add() argument 124 tk->xtime_sec += ts->tv_sec; in tk_xtime_add() 125 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add() [all …]
|
D | posix-clock.c | 250 static int pc_clock_gettime(clockid_t id, struct timespec64 *ts) in pc_clock_gettime() argument 260 err = cd.clk->ops.clock_gettime(cd.clk, ts); in pc_clock_gettime() 269 static int pc_clock_getres(clockid_t id, struct timespec64 *ts) in pc_clock_getres() argument 279 err = cd.clk->ops.clock_getres(cd.clk, ts); in pc_clock_getres() 288 static int pc_clock_settime(clockid_t id, const struct timespec64 *ts) in pc_clock_settime() argument 303 err = cd.clk->ops.clock_settime(cd.clk, ts); in pc_clock_settime()
|
D | test_udelay.c | 82 struct timespec64 ts; in udelay_test_show() local 84 ktime_get_ts64(&ts); in udelay_test_show() 86 loops_per_jiffy, (s64)ts.tv_sec, ts.tv_nsec); in udelay_test_show()
|
D | ntp.c | 714 int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts, in __do_adjtimex() argument 774 txc->time.tv_sec = ts->tv_sec; in __do_adjtimex() 775 txc->time.tv_usec = ts->tv_nsec; in __do_adjtimex() 777 txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC; in __do_adjtimex() 780 if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) { in __do_adjtimex() 792 (ts->tv_sec == ntp_next_leap_sec)) { in __do_adjtimex() 813 static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) in pps_normalize_ts() argument 816 .sec = ts.tv_sec, in pps_normalize_ts() 817 .nsec = ts.tv_nsec in pps_normalize_ts()
|
D | posix-timers.c | 1131 struct timespec64 ts; in SYSCALL_DEFINE2() local 1136 if (get_old_timespec32(&ts, tp)) in SYSCALL_DEFINE2() 1139 return kc->clock_set(which_clock, &ts); in SYSCALL_DEFINE2() 1146 struct timespec64 ts; in SYSCALL_DEFINE2() local 1152 err = kc->clock_get(which_clock, &ts); in SYSCALL_DEFINE2() 1154 if (!err && put_old_timespec32(&ts, tp)) in SYSCALL_DEFINE2() 1182 struct timespec64 ts; in SYSCALL_DEFINE2() local 1188 err = kc->clock_getres(which_clock, &ts); in SYSCALL_DEFINE2() 1189 if (!err && tp && put_old_timespec32(&ts, tp)) in SYSCALL_DEFINE2()
|
D | timer_list.c | 161 (unsigned long long)(ts->x)) in print_cpu() 164 (unsigned long long)(ktime_to_ns(ts->x))) in print_cpu() 166 struct tick_sched *ts = tick_get_tick_sched(cpu); in print_cpu() local
|
D | ntp_internal.h | 12 const struct timespec64 *ts,
|
D | hrtimer.c | 1846 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) in nanosleep_copyout() argument 1851 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) in nanosleep_copyout() 1856 if (put_timespec64(ts, restart->nanosleep.rmtp)) in nanosleep_copyout()
|
/kernel/trace/ |
D | trace_stat.c | 34 struct tracer_stat *ts; member 52 if (session->ts->stat_release) in __reset_stat_session() 53 session->ts->stat_release(snode->stat); in __reset_stat_session() 127 struct tracer_stat *ts = session->ts; in stat_seq_init() local 136 if (!ts->stat_cmp) in stat_seq_init() 137 ts->stat_cmp = dummy_cmp; in stat_seq_init() 139 stat = ts->stat_start(ts); in stat_seq_init() 143 ret = insert_stat(root, stat, ts->stat_cmp); in stat_seq_init() 151 stat = ts->stat_next(stat, i); in stat_seq_init() 157 ret = insert_stat(root, stat, ts->stat_cmp); in stat_seq_init() [all …]
|
D | ring_buffer.c | 289 u64 ts; in ring_buffer_event_time_stamp() local 291 ts = event->array[0]; in ring_buffer_event_time_stamp() 292 ts <<= TS_SHIFT; in ring_buffer_event_time_stamp() 293 ts += event->time_delta; in ring_buffer_event_time_stamp() 295 return ts; in ring_buffer_event_time_stamp() 416 u64 ts; member 764 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument 767 *ts >>= DEBUG_SHIFT; in ring_buffer_normalize_time_stamp() 2805 (unsigned long long)info->ts, in rb_handle_timestamp() 2862 tail_page->page->time_stamp = info->ts; in __rb_reserve_next() [all …]
|
D | ring_buffer_benchmark.c | 16 u64 ts; member 89 u64 ts; in read_event() local 91 event = ring_buffer_consume(buffer, cpu, &ts, NULL); in read_event()
|
D | trace_mmiotrace.c | 173 unsigned long long t = ns2usecs(iter->ts); in mmio_print_rw() 218 unsigned long long t = ns2usecs(iter->ts); in mmio_print_map() 252 unsigned long long t = ns2usecs(iter->ts); in mmio_print_mark()
|
D | trace_output.c | 526 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; in lat_print_timestamp() 527 unsigned long long rel_ts = next_ts - iter->ts; in lat_print_timestamp() 543 ns2usecs(iter->ts), in lat_print_timestamp() 550 iter->ts, abs_ts, rel_ts); in lat_print_timestamp() 593 t = ns2usecs(iter->ts); in trace_print_context() 598 trace_seq_printf(s, " %12llu: ", iter->ts); in trace_print_context() 619 next_ts = iter->ts; in trace_print_lat_context()
|
D | trace.c | 593 u64 ts; in buffer_ftrace_now() local 599 ts = ring_buffer_time_stamp(buf->buffer, cpu); in buffer_ftrace_now() 600 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); in buffer_ftrace_now() 602 return ts; in buffer_ftrace_now() 3274 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, in peek_next_entry() argument 3281 event = ring_buffer_iter_peek(buf_iter, ts); in peek_next_entry() 3283 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, in peek_next_entry() 3302 u64 next_ts = 0, ts; in __find_next_entry() local 3326 ent = peek_next_entry(iter, cpu, &ts, &lost_events); in __find_next_entry() 3331 if (ent && (!next || ts < next_ts)) { in __find_next_entry() [all …]
|
D | blktrace.c | 1239 unsigned long long ts = iter->ts; in blk_log_action_classic() local 1240 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); in blk_log_action_classic() 1241 unsigned secs = (unsigned long)ts; in blk_log_action_classic() 1505 .time = iter->ts, in blk_trace_synthesize_old_trace()
|
D | trace_functions_graph.c | 506 usecs = iter->ts - iter->trace_buffer->time_start; in print_graph_rel_time() 527 print_graph_abs_time(iter->ts, s); in print_graph_irq() 739 print_graph_abs_time(iter->ts, s); in print_graph_prologue()
|
/kernel/ |
D | compat.c | 43 static int __compat_get_timespec(struct timespec *ts, const struct old_timespec32 __user *cts) in __compat_get_timespec() argument 46 __get_user(ts->tv_sec, &cts->tv_sec) || in __compat_get_timespec() 47 __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; in __compat_get_timespec() 50 static int __compat_put_timespec(const struct timespec *ts, struct old_timespec32 __user *cts) in __compat_put_timespec() argument 53 __put_user(ts->tv_sec, &cts->tv_sec) || in __compat_put_timespec() 54 __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; in __compat_put_timespec() 75 int compat_get_timespec(struct timespec *ts, const void __user *uts) in compat_get_timespec() argument 78 return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0; in compat_get_timespec() 80 return __compat_get_timespec(ts, uts); in compat_get_timespec() 84 int compat_put_timespec(const struct timespec *ts, void __user *uts) in compat_put_timespec() argument [all …]
|
D | futex.c | 3914 struct timespec64 ts; in SYSCALL_DEFINE6() local 3924 if (get_timespec64(&ts, utime)) in SYSCALL_DEFINE6() 3926 if (!timespec64_valid(&ts)) in SYSCALL_DEFINE6() 3929 t = timespec64_to_ktime(ts); in SYSCALL_DEFINE6() 4108 struct timespec64 ts; in SYSCALL_DEFINE6() local 4116 if (get_old_timespec32(&ts, utime)) in SYSCALL_DEFINE6() 4118 if (!timespec64_valid(&ts)) in SYSCALL_DEFINE6() 4121 t = timespec64_to_ktime(ts); in SYSCALL_DEFINE6()
|
D | signal.c | 3433 const struct timespec64 *ts) in do_sigtimedwait() argument 3440 if (ts) { in do_sigtimedwait() 3441 if (!timespec64_valid(ts)) in do_sigtimedwait() 3443 timeout = timespec64_to_ktime(*ts); in do_sigtimedwait() 3496 struct timespec64 ts; in SYSCALL_DEFINE4() local 3508 if (get_timespec64(&ts, uts)) in SYSCALL_DEFINE4() 3512 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); in SYSCALL_DEFINE4() 3529 struct timespec64 ts; in SYSCALL_DEFINE4() local 3540 if (get_old_timespec32(&ts, uts)) in SYSCALL_DEFINE4() 3544 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); in SYSCALL_DEFINE4()
|
D | workqueue.c | 5738 unsigned long pool_ts, touched, ts; in wq_watchdog_timer_fn() local 5748 ts = pool_ts; in wq_watchdog_timer_fn() 5750 ts = touched; in wq_watchdog_timer_fn() 5756 if (time_after(cpu_touched, ts)) in wq_watchdog_timer_fn() 5757 ts = cpu_touched; in wq_watchdog_timer_fn() 5761 if (time_after(jiffies, ts + thresh)) { in wq_watchdog_timer_fn()
|
/kernel/irq/ |
D | timings.c | 468 static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts) in irq_timings_store() argument 477 irqs->last_ts = ts; in irq_timings_store() 484 interval = ts - old_ts; in irq_timings_store() 536 u64 ts, next_evt = U64_MAX; in irq_timings_next_event() local 564 irq = irq_timing_decode(irqts->values[i], &ts); in irq_timings_next_event() 567 irq_timings_store(irq, this_cpu_ptr(s), ts); in irq_timings_next_event() 578 ts = __irq_timings_next_event(irqs, i, now); in irq_timings_next_event() 579 if (ts <= now) in irq_timings_next_event() 582 if (ts < next_evt) in irq_timings_next_event() 583 next_evt = ts; in irq_timings_next_event() [all …]
|
D | internals.h | 362 static __always_inline void irq_timings_push(u64 ts, int irq) in irq_timings_push() argument 367 irq_timing_encode(ts, irq); in irq_timings_push()
|
/kernel/rcu/ |
D | rcutorture.c | 220 u64 ts = trace_clock_local(); in rcu_trace_clock_local() local 222 (void)do_div(ts, NSEC_PER_USEC); in rcu_trace_clock_local() 223 return ts; in rcu_trace_clock_local() 335 unsigned long long ts; in rcu_read_delay() local 344 ts = rcu_trace_clock_local(); in rcu_read_delay() 350 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, in rcu_read_delay() 1272 unsigned long long ts; in rcu_torture_one_read() local 1277 ts = rcu_trace_clock_local(); in rcu_torture_one_read() 1300 ts, started, completed); in rcu_torture_one_read()
|