/kernel/sched/ |
D | clock.c | 91 u64 clock; member 161 scd->clock = scd->tick_gtod + __gtod_offset; in __sched_clock_work() 265 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local() local 274 old_clock = scd->clock; in sched_clock_local() 283 clock = gtod + delta; in sched_clock_local() 287 clock = wrap_max(clock, min_clock); in sched_clock_local() 288 clock = wrap_min(clock, max_clock); in sched_clock_local() 290 if (!try_cmpxchg64(&scd->clock, &old_clock, clock)) in sched_clock_local() 293 return clock; in sched_clock_local() 321 remote_clock = cmpxchg64(&scd->clock, 0, 0); in sched_clock_remote() [all …]
|
D | pelt.c | 460 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq, in update_irq_load_avg() 464 ret += ___update_load_sum(rq->clock, &rq->avg_irq, in update_irq_load_avg()
|
D | cputime.c | 675 unsigned long long clock; in vtime_delta() local 677 clock = sched_clock(); in vtime_delta() 678 if (clock < vtime->starttime) in vtime_delta() 681 return clock - vtime->starttime; in vtime_delta()
|
D | sched.h | 1034 u64 clock; member 1532 return rq->clock; in rq_clock()
|
D | debug.c | 749 PN(clock); in print_cpu()
|
D | core.c | 763 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 766 rq->clock += delta; in update_rq_clock()
|
/kernel/time/ |
D | timekeeping.c | 99 .clock = &dummy_clock, \ 193 struct clocksource *clock = READ_ONCE(tkr->clock); in tk_clock_read() local 195 return clock->read(clock); in tk_clock_read() 204 u64 max_cycles = tk->tkr_mono.clock->max_cycles; in timekeeping_check_update() 205 const char *name = tk->tkr_mono.clock->name; in timekeeping_check_update() 258 max = tkr->clock->max_cycles; in timekeeping_get_delta() 275 delta = tkr->clock->max_cycles; in timekeeping_get_delta() 309 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) in tk_setup_internals() argument 316 old_clock = tk->tkr_mono.clock; in tk_setup_internals() 317 tk->tkr_mono.clock = clock; in tk_setup_internals() [all …]
|
D | posix-cpu-timers.c | 57 static struct pid *pid_for_clock(const clockid_t clock, bool gettime) in pid_for_clock() argument 59 const bool thread = !!CPUCLOCK_PERTHREAD(clock); in pid_for_clock() 60 const pid_t upid = CPUCLOCK_PID(clock); in pid_for_clock() 63 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX) in pid_for_clock() 97 static inline int validate_clock_permissions(const clockid_t clock) in validate_clock_permissions() argument 102 ret = pid_for_clock(clock, false) ? 0 : -EINVAL; in validate_clock_permissions() 108 static inline enum pid_type clock_pid_type(const clockid_t clock) in clock_pid_type() argument 110 return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID; in clock_pid_type() 180 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp) in posix_cpu_clock_set() argument 182 int error = validate_clock_permissions(clock); in posix_cpu_clock_set() [all …]
|
D | namespace.c | 331 char *clock; in show_offset() local 335 clock = "boottime"; in show_offset() 338 clock = "monotonic"; in show_offset() 341 clock = "unknown"; in show_offset() 344 seq_printf(m, "%-10s %10lld %9ld\n", clock, ts->tv_sec, ts->tv_nsec); in show_offset()
|
D | Kconfig | 29 # The generic clock events infrastructure 43 # clock event device 47 # Generic update of CMOS clock
|
D | Makefile | 7 obj-y += posix-timers.o posix-cpu-timers.o posix-clock.o itimer.o
|
D | vsyscall.c | 82 clock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_vsyscall()
|
/kernel/trace/ |
D | trace_clock.c | 34 u64 clock; in trace_clock_local() local 42 clock = sched_clock(); in trace_clock_local() 45 return clock; in trace_clock_local()
|
D | trace_events_hist.c | 520 char *clock; member 1431 kfree(attrs->clock); in destroy_hist_trigger_attrs() 1492 attrs->clock = kstrdup(str, GFP_KERNEL); in parse_assignment() 1493 if (!attrs->clock) { in parse_assignment() 1569 if (!attrs->clock) { in parse_hist_trigger_attrs() 1570 attrs->clock = kstrdup("global", GFP_KERNEL); in parse_hist_trigger_attrs() 1571 if (!attrs->clock) { in parse_hist_trigger_attrs() 5954 seq_printf(m, ":clock=%s", hist_data->attrs->clock); in event_hist_trigger_print() 6271 char *clock = hist_data->attrs->clock; in hist_register_trigger() local 6273 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); in hist_register_trigger() [all …]
|
D | ring_buffer.c | 518 u64 (*clock)(void); member 1134 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp() 1137 ts = buffer->clock(); in rb_time_stamp() 1772 buffer->clock = trace_clock_local; in __ring_buffer_alloc() 1866 u64 (*clock)(void)) in ring_buffer_set_clock() 1868 buffer->clock = clock; in ring_buffer_set_clock()
|
/kernel/events/ |
D | core.c | 592 return event->clock(); in perf_event_clock() 11787 event->clock = &local_clock; in perf_event_alloc() 11789 event->clock = parent_event->clock; in perf_event_alloc() 12104 if (output_event->clock != event->clock) in perf_event_set_output() 12165 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock() 12170 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock() 12175 event->clock = &ktime_get_real_ns; in perf_event_set_clock() 12179 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock() 12183 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock() 12451 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
|
/kernel/power/ |
D | Kconfig | 274 CAUTION: this option will cause your machine's real-time clock to be
|
/kernel/rcu/ |
D | Kconfig | 176 scheduling-clock interrupts for energy-efficiency reasons will
|