1 /*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/compiler.h>
26
27 #include "tick-internal.h"
28 #include "ntp_internal.h"
29 #include "timekeeping_internal.h"
30
31 #define TK_CLEAR_NTP (1 << 0)
32 #define TK_MIRROR (1 << 1)
33 #define TK_CLOCK_WAS_SET (1 << 2)
34
35 /*
36 * The most important data for readout fits into a single 64 byte
37 * cache line.
38 */
39 static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42 } tk_core ____cacheline_aligned;
43
44 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45 static struct timekeeper shadow_timekeeper;
46
47 /**
48 * struct tk_fast - NMI safe timekeeper
49 * @seq: Sequence counter for protecting updates. The lowest bit
50 * is the index for the tk_read_base array
51 * @base: tk_read_base array. Access is indexed by the lowest bit of
52 * @seq.
53 *
54 * See @update_fast_timekeeper() below.
55 */
56 struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59 };
60
61 static struct tk_fast tk_fast_mono ____cacheline_aligned;
62
63 /* flag for if timekeeping is suspended */
64 int __read_mostly timekeeping_suspended;
65
66 /* Flag for if there is a persistent clock on this platform */
67 bool __read_mostly persistent_clock_exist = false;
68
tk_normalize_xtime(struct timekeeper * tk)69 static inline void tk_normalize_xtime(struct timekeeper *tk)
70 {
71 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
72 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
73 tk->xtime_sec++;
74 }
75 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
76 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
77 tk->raw_sec++;
78 }
79 }
80
tk_xtime(struct timekeeper * tk)81 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
82 {
83 struct timespec64 ts;
84
85 ts.tv_sec = tk->xtime_sec;
86 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
87 return ts;
88 }
89
tk_set_xtime(struct timekeeper * tk,const struct timespec64 * ts)90 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
91 {
92 tk->xtime_sec = ts->tv_sec;
93 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 }
95
tk_xtime_add(struct timekeeper * tk,const struct timespec64 * ts)96 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
97 {
98 tk->xtime_sec += ts->tv_sec;
99 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
100 tk_normalize_xtime(tk);
101 }
102
tk_set_wall_to_mono(struct timekeeper * tk,struct timespec64 wtm)103 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
104 {
105 struct timespec64 tmp;
106
107 /*
108 * Verify consistency of: offset_real = -wall_to_monotonic
109 * before modifying anything
110 */
111 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
112 -tk->wall_to_monotonic.tv_nsec);
113 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
114 tk->wall_to_monotonic = wtm;
115 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
116 tk->offs_real = timespec64_to_ktime(tmp);
117 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
118 }
119
tk_update_sleep_time(struct timekeeper * tk,ktime_t delta)120 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
121 {
122 tk->offs_boot = ktime_add(tk->offs_boot, delta);
123 }
124
125 /**
126 * tk_setup_internals - Set up internals to use clocksource clock.
127 *
128 * @tk: The target timekeeper to setup.
129 * @clock: Pointer to clocksource.
130 *
131 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
132 * pair and interval request.
133 *
134 * Unless you're the timekeeping code, you should not be using this!
135 */
tk_setup_internals(struct timekeeper * tk,struct clocksource * clock)136 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
137 {
138 cycle_t interval;
139 u64 tmp, ntpinterval;
140 struct clocksource *old_clock;
141
142 old_clock = tk->tkr_mono.clock;
143 tk->tkr_mono.clock = clock;
144 tk->tkr_mono.read = clock->read;
145 tk->tkr_mono.mask = clock->mask;
146 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
147
148 tk->tkr_raw.clock = clock;
149 tk->tkr_raw.read = clock->read;
150 tk->tkr_raw.mask = clock->mask;
151 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
152
153 /* Do the ns -> cycle conversion first, using original mult */
154 tmp = NTP_INTERVAL_LENGTH;
155 tmp <<= clock->shift;
156 ntpinterval = tmp;
157 tmp += clock->mult/2;
158 do_div(tmp, clock->mult);
159 if (tmp == 0)
160 tmp = 1;
161
162 interval = (cycle_t) tmp;
163 tk->cycle_interval = interval;
164
165 /* Go back from cycles -> shifted ns */
166 tk->xtime_interval = (u64) interval * clock->mult;
167 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
168 tk->raw_interval = interval * clock->mult;
169
170 /* if changing clocks, convert xtime_nsec shift units */
171 if (old_clock) {
172 int shift_change = clock->shift - old_clock->shift;
173 if (shift_change < 0) {
174 tk->tkr_mono.xtime_nsec >>= -shift_change;
175 tk->tkr_raw.xtime_nsec >>= -shift_change;
176 } else {
177 tk->tkr_mono.xtime_nsec <<= shift_change;
178 tk->tkr_raw.xtime_nsec <<= shift_change;
179 }
180 }
181
182 tk->tkr_mono.shift = clock->shift;
183 tk->tkr_raw.shift = clock->shift;
184
185 tk->ntp_error = 0;
186 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
187 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
188
189 /*
190 * The timekeeper keeps its own mult values for the currently
191 * active clocksource. These value will be adjusted via NTP
192 * to counteract clock drifting.
193 */
194 tk->tkr_mono.mult = clock->mult;
195 tk->tkr_raw.mult = clock->mult;
196 tk->ntp_err_mult = 0;
197 }
198
199 /* Timekeeper helper functions. */
200
201 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
default_arch_gettimeoffset(void)202 static u32 default_arch_gettimeoffset(void) { return 0; }
203 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
204 #else
arch_gettimeoffset(void)205 static inline u32 arch_gettimeoffset(void) { return 0; }
206 #endif
207
timekeeping_get_ns(struct tk_read_base * tkr)208 static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
209 {
210 cycle_t cycle_now, delta;
211 s64 nsec;
212
213 /* read clocksource: */
214 cycle_now = tkr->read(tkr->clock);
215
216 /* calculate the delta since the last update_wall_time: */
217 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
218
219 nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
220
221 /* If arch requires, add in get_arch_timeoffset() */
222 return nsec + arch_gettimeoffset();
223 }
224
225 /**
226 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
227 * @tk: The timekeeper from which we take the update
228 * @tkf: The fast timekeeper to update
229 * @tbase: The time base for the fast timekeeper (mono/raw)
230 *
231 * We want to use this from any context including NMI and tracing /
232 * instrumenting the timekeeping code itself.
233 *
234 * So we handle this differently than the other timekeeping accessor
235 * functions which retry when the sequence count has changed. The
236 * update side does:
237 *
238 * smp_wmb(); <- Ensure that the last base[1] update is visible
239 * tkf->seq++;
240 * smp_wmb(); <- Ensure that the seqcount update is visible
241 * update(tkf->base[0], tk);
242 * smp_wmb(); <- Ensure that the base[0] update is visible
243 * tkf->seq++;
244 * smp_wmb(); <- Ensure that the seqcount update is visible
245 * update(tkf->base[1], tk);
246 *
247 * The reader side does:
248 *
249 * do {
250 * seq = tkf->seq;
251 * smp_rmb();
252 * idx = seq & 0x01;
253 * now = now(tkf->base[idx]);
254 * smp_rmb();
255 * } while (seq != tkf->seq)
256 *
257 * As long as we update base[0] readers are forced off to
258 * base[1]. Once base[0] is updated readers are redirected to base[0]
259 * and the base[1] update takes place.
260 *
261 * So if a NMI hits the update of base[0] then it will use base[1]
262 * which is still consistent. In the worst case this can result is a
263 * slightly wrong timestamp (a few nanoseconds). See
264 * @ktime_get_mono_fast_ns.
265 */
update_fast_timekeeper(struct timekeeper * tk)266 static void update_fast_timekeeper(struct timekeeper *tk)
267 {
268 struct tk_read_base *base = tk_fast_mono.base;
269
270 /* Force readers off to base[1] */
271 raw_write_seqcount_latch(&tk_fast_mono.seq);
272
273 /* Update base[0] */
274 memcpy(base, &tk->tkr_mono, sizeof(*base));
275
276 /* Force readers back to base[0] */
277 raw_write_seqcount_latch(&tk_fast_mono.seq);
278
279 /* Update base[1] */
280 memcpy(base + 1, base, sizeof(*base));
281 }
282
283 /**
284 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
285 *
286 * This timestamp is not guaranteed to be monotonic across an update.
287 * The timestamp is calculated by:
288 *
289 * now = base_mono + clock_delta * slope
290 *
291 * So if the update lowers the slope, readers who are forced to the
292 * not yet updated second array are still using the old steeper slope.
293 *
294 * tmono
295 * ^
296 * | o n
297 * | o n
298 * | u
299 * | o
300 * |o
301 * |12345678---> reader order
302 *
303 * o = old slope
304 * u = update
305 * n = new slope
306 *
307 * So reader 6 will observe time going backwards versus reader 5.
308 *
309 * While other CPUs are likely to be able observe that, the only way
310 * for a CPU local observation is when an NMI hits in the middle of
311 * the update. Timestamps taken from that NMI context might be ahead
312 * of the following timestamps. Callers need to be aware of that and
313 * deal with it.
314 */
ktime_get_mono_fast_ns(void)315 u64 notrace ktime_get_mono_fast_ns(void)
316 {
317 struct tk_read_base *tkr;
318 unsigned int seq;
319 u64 now;
320
321 do {
322 seq = raw_read_seqcount(&tk_fast_mono.seq);
323 tkr = tk_fast_mono.base + (seq & 0x01);
324 now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
325
326 } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
327 return now;
328 }
329 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
330
331 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
332
update_vsyscall(struct timekeeper * tk)333 static inline void update_vsyscall(struct timekeeper *tk)
334 {
335 struct timespec xt, wm;
336
337 xt = timespec64_to_timespec(tk_xtime(tk));
338 wm = timespec64_to_timespec(tk->wall_to_monotonic);
339 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
340 tk->tkr_mono.cycle_last);
341 }
342
old_vsyscall_fixup(struct timekeeper * tk)343 static inline void old_vsyscall_fixup(struct timekeeper *tk)
344 {
345 s64 remainder;
346
347 /*
348 * Store only full nanoseconds into xtime_nsec after rounding
349 * it up and add the remainder to the error difference.
350 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
351 * by truncating the remainder in vsyscalls. However, it causes
352 * additional work to be done in timekeeping_adjust(). Once
353 * the vsyscall implementations are converted to use xtime_nsec
354 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
355 * users are removed, this can be killed.
356 */
357 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
358 tk->tkr_mono.xtime_nsec -= remainder;
359 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
360 tk->ntp_error += remainder << tk->ntp_error_shift;
361 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
362 }
363 #else
364 #define old_vsyscall_fixup(tk)
365 #endif
366
367 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
368
update_pvclock_gtod(struct timekeeper * tk,bool was_set)369 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
370 {
371 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
372 }
373
374 /**
375 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
376 */
pvclock_gtod_register_notifier(struct notifier_block * nb)377 int pvclock_gtod_register_notifier(struct notifier_block *nb)
378 {
379 struct timekeeper *tk = &tk_core.timekeeper;
380 unsigned long flags;
381 int ret;
382
383 raw_spin_lock_irqsave(&timekeeper_lock, flags);
384 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
385 update_pvclock_gtod(tk, true);
386 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
387
388 return ret;
389 }
390 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
391
392 /**
393 * pvclock_gtod_unregister_notifier - unregister a pvclock
394 * timedata update listener
395 */
pvclock_gtod_unregister_notifier(struct notifier_block * nb)396 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
397 {
398 unsigned long flags;
399 int ret;
400
401 raw_spin_lock_irqsave(&timekeeper_lock, flags);
402 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
403 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
404
405 return ret;
406 }
407 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
408
409 /*
410 * Update the ktime_t based scalar nsec members of the timekeeper
411 */
tk_update_ktime_data(struct timekeeper * tk)412 static inline void tk_update_ktime_data(struct timekeeper *tk)
413 {
414 u64 seconds;
415 s64 nsec;
416
417 /*
418 * The xtime based monotonic readout is:
419 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
420 * The ktime based monotonic readout is:
421 * nsec = base_mono + now();
422 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
423 */
424 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
425 nsec *= NSEC_PER_SEC;
426 nsec += tk->wall_to_monotonic.tv_nsec;
427 tk->tkr_mono.base = ns_to_ktime(nsec);
428
429 /* Update the monotonic raw base */
430 seconds = tk->raw_sec;
431 nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift);
432 tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
433 }
434
435 /* must hold timekeeper_lock */
timekeeping_update(struct timekeeper * tk,unsigned int action)436 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
437 {
438 if (action & TK_CLEAR_NTP) {
439 tk->ntp_error = 0;
440 ntp_clear();
441 }
442
443 tk_update_ktime_data(tk);
444
445 update_vsyscall(tk);
446 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
447
448 if (action & TK_MIRROR)
449 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
450 sizeof(tk_core.timekeeper));
451
452 update_fast_timekeeper(tk);
453 }
454
455 /**
456 * timekeeping_forward_now - update clock to the current time
457 *
458 * Forward the current clock to update its state since the last call to
459 * update_wall_time(). This is useful before significant clock changes,
460 * as it avoids having to deal with this time offset explicitly.
461 */
timekeeping_forward_now(struct timekeeper * tk)462 static void timekeeping_forward_now(struct timekeeper *tk)
463 {
464 struct clocksource *clock = tk->tkr_mono.clock;
465 cycle_t cycle_now, delta;
466
467 cycle_now = tk->tkr_mono.read(clock);
468 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
469 tk->tkr_mono.cycle_last = cycle_now;
470 tk->tkr_raw.cycle_last = cycle_now;
471
472 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
473
474 /* If arch requires, add in get_arch_timeoffset() */
475 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
476
477
478 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
479
480 /* If arch requires, add in get_arch_timeoffset() */
481 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
482
483 tk_normalize_xtime(tk);
484 }
485
486 /**
487 * __getnstimeofday64 - Returns the time of day in a timespec64.
488 * @ts: pointer to the timespec to be set
489 *
490 * Updates the time of day in the timespec.
491 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
492 */
__getnstimeofday64(struct timespec64 * ts)493 int __getnstimeofday64(struct timespec64 *ts)
494 {
495 struct timekeeper *tk = &tk_core.timekeeper;
496 unsigned long seq;
497 s64 nsecs = 0;
498
499 do {
500 seq = read_seqcount_begin(&tk_core.seq);
501
502 ts->tv_sec = tk->xtime_sec;
503 nsecs = timekeeping_get_ns(&tk->tkr_mono);
504
505 } while (read_seqcount_retry(&tk_core.seq, seq));
506
507 ts->tv_nsec = 0;
508 timespec64_add_ns(ts, nsecs);
509
510 /*
511 * Do not bail out early, in case there were callers still using
512 * the value, even in the face of the WARN_ON.
513 */
514 if (unlikely(timekeeping_suspended))
515 return -EAGAIN;
516 return 0;
517 }
518 EXPORT_SYMBOL(__getnstimeofday64);
519
520 /**
521 * getnstimeofday64 - Returns the time of day in a timespec64.
522 * @ts: pointer to the timespec to be set
523 *
524 * Returns the time of day in a timespec (WARN if suspended).
525 */
getnstimeofday64(struct timespec64 * ts)526 void getnstimeofday64(struct timespec64 *ts)
527 {
528 WARN_ON(__getnstimeofday64(ts));
529 }
530 EXPORT_SYMBOL(getnstimeofday64);
531
ktime_get(void)532 ktime_t ktime_get(void)
533 {
534 struct timekeeper *tk = &tk_core.timekeeper;
535 unsigned int seq;
536 ktime_t base;
537 s64 nsecs;
538
539 WARN_ON(timekeeping_suspended);
540
541 do {
542 seq = read_seqcount_begin(&tk_core.seq);
543 base = tk->tkr_mono.base;
544 nsecs = timekeeping_get_ns(&tk->tkr_mono);
545
546 } while (read_seqcount_retry(&tk_core.seq, seq));
547
548 return ktime_add_ns(base, nsecs);
549 }
550 EXPORT_SYMBOL_GPL(ktime_get);
551
552 static ktime_t *offsets[TK_OFFS_MAX] = {
553 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
554 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
555 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
556 };
557
ktime_get_with_offset(enum tk_offsets offs)558 ktime_t ktime_get_with_offset(enum tk_offsets offs)
559 {
560 struct timekeeper *tk = &tk_core.timekeeper;
561 unsigned int seq;
562 ktime_t base, *offset = offsets[offs];
563 s64 nsecs;
564
565 WARN_ON(timekeeping_suspended);
566
567 do {
568 seq = read_seqcount_begin(&tk_core.seq);
569 base = ktime_add(tk->tkr_mono.base, *offset);
570 nsecs = timekeeping_get_ns(&tk->tkr_mono);
571
572 } while (read_seqcount_retry(&tk_core.seq, seq));
573
574 return ktime_add_ns(base, nsecs);
575
576 }
577 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
578
579 /**
580 * ktime_mono_to_any() - convert mononotic time to any other time
581 * @tmono: time to convert.
582 * @offs: which offset to use
583 */
ktime_mono_to_any(ktime_t tmono,enum tk_offsets offs)584 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
585 {
586 ktime_t *offset = offsets[offs];
587 unsigned long seq;
588 ktime_t tconv;
589
590 do {
591 seq = read_seqcount_begin(&tk_core.seq);
592 tconv = ktime_add(tmono, *offset);
593 } while (read_seqcount_retry(&tk_core.seq, seq));
594
595 return tconv;
596 }
597 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
598
599 /**
600 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
601 */
ktime_get_raw(void)602 ktime_t ktime_get_raw(void)
603 {
604 struct timekeeper *tk = &tk_core.timekeeper;
605 unsigned int seq;
606 ktime_t base;
607 s64 nsecs;
608
609 do {
610 seq = read_seqcount_begin(&tk_core.seq);
611 base = tk->tkr_raw.base;
612 nsecs = timekeeping_get_ns(&tk->tkr_raw);
613
614 } while (read_seqcount_retry(&tk_core.seq, seq));
615
616 return ktime_add_ns(base, nsecs);
617 }
618 EXPORT_SYMBOL_GPL(ktime_get_raw);
619
620 /**
621 * ktime_get_ts64 - get the monotonic clock in timespec64 format
622 * @ts: pointer to timespec variable
623 *
624 * The function calculates the monotonic clock from the realtime
625 * clock and the wall_to_monotonic offset and stores the result
626 * in normalized timespec format in the variable pointed to by @ts.
627 */
ktime_get_ts64(struct timespec64 * ts)628 void ktime_get_ts64(struct timespec64 *ts)
629 {
630 struct timekeeper *tk = &tk_core.timekeeper;
631 struct timespec64 tomono;
632 s64 nsec;
633 unsigned int seq;
634
635 WARN_ON(timekeeping_suspended);
636
637 do {
638 seq = read_seqcount_begin(&tk_core.seq);
639 ts->tv_sec = tk->xtime_sec;
640 nsec = timekeeping_get_ns(&tk->tkr_mono);
641 tomono = tk->wall_to_monotonic;
642
643 } while (read_seqcount_retry(&tk_core.seq, seq));
644
645 ts->tv_sec += tomono.tv_sec;
646 ts->tv_nsec = 0;
647 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
648 }
649 EXPORT_SYMBOL_GPL(ktime_get_ts64);
650
651 #ifdef CONFIG_NTP_PPS
652
653 /**
654 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
655 * @ts_raw: pointer to the timespec to be set to raw monotonic time
656 * @ts_real: pointer to the timespec to be set to the time of day
657 *
658 * This function reads both the time of day and raw monotonic time at the
659 * same time atomically and stores the resulting timestamps in timespec
660 * format.
661 */
getnstime_raw_and_real(struct timespec * ts_raw,struct timespec * ts_real)662 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
663 {
664 struct timekeeper *tk = &tk_core.timekeeper;
665 unsigned long seq;
666 s64 nsecs_raw, nsecs_real;
667
668 WARN_ON_ONCE(timekeeping_suspended);
669
670 do {
671 seq = read_seqcount_begin(&tk_core.seq);
672
673 *ts_raw = timespec64_to_timespec(tk->raw_time);
674 ts_real->tv_sec = tk->xtime_sec;
675 ts_real->tv_nsec = 0;
676
677 nsecs_raw = timekeeping_get_ns(&tk->tkr_raw);
678 nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
679
680 } while (read_seqcount_retry(&tk_core.seq, seq));
681
682 timespec_add_ns(ts_raw, nsecs_raw);
683 timespec_add_ns(ts_real, nsecs_real);
684 }
685 EXPORT_SYMBOL(getnstime_raw_and_real);
686
687 #endif /* CONFIG_NTP_PPS */
688
689 /**
690 * do_gettimeofday - Returns the time of day in a timeval
691 * @tv: pointer to the timeval to be set
692 *
693 * NOTE: Users should be converted to using getnstimeofday()
694 */
do_gettimeofday(struct timeval * tv)695 void do_gettimeofday(struct timeval *tv)
696 {
697 struct timespec64 now;
698
699 getnstimeofday64(&now);
700 tv->tv_sec = now.tv_sec;
701 tv->tv_usec = now.tv_nsec/1000;
702 }
703 EXPORT_SYMBOL(do_gettimeofday);
704
705 /**
706 * do_settimeofday - Sets the time of day
707 * @tv: pointer to the timespec variable containing the new time
708 *
709 * Sets the time of day to the new time and update NTP and notify hrtimers
710 */
do_settimeofday(const struct timespec * tv)711 int do_settimeofday(const struct timespec *tv)
712 {
713 struct timekeeper *tk = &tk_core.timekeeper;
714 struct timespec64 ts_delta, xt, tmp;
715 unsigned long flags;
716 int ret = 0;
717
718 if (!timespec_valid_strict(tv))
719 return -EINVAL;
720
721 raw_spin_lock_irqsave(&timekeeper_lock, flags);
722 write_seqcount_begin(&tk_core.seq);
723
724 timekeeping_forward_now(tk);
725
726 xt = tk_xtime(tk);
727 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
728 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
729
730 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
731 ret = -EINVAL;
732 goto out;
733 }
734
735 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
736
737 tmp = timespec_to_timespec64(*tv);
738 tk_set_xtime(tk, &tmp);
739 out:
740 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
741
742 write_seqcount_end(&tk_core.seq);
743 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
744
745 /* signal hrtimers about time change */
746 clock_was_set();
747
748 return ret;
749 }
750 EXPORT_SYMBOL(do_settimeofday);
751
752 /**
753 * timekeeping_inject_offset - Adds or subtracts from the current time.
754 * @tv: pointer to the timespec variable containing the offset
755 *
756 * Adds or subtracts an offset value from the current time.
757 */
timekeeping_inject_offset(struct timespec * ts)758 int timekeeping_inject_offset(struct timespec *ts)
759 {
760 struct timekeeper *tk = &tk_core.timekeeper;
761 unsigned long flags;
762 struct timespec64 ts64, tmp;
763 int ret = 0;
764
765 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
766 return -EINVAL;
767
768 ts64 = timespec_to_timespec64(*ts);
769
770 raw_spin_lock_irqsave(&timekeeper_lock, flags);
771 write_seqcount_begin(&tk_core.seq);
772
773 timekeeping_forward_now(tk);
774
775 /* Make sure the proposed value is valid */
776 tmp = timespec64_add(tk_xtime(tk), ts64);
777 if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
778 !timespec64_valid_strict(&tmp)) {
779 ret = -EINVAL;
780 goto error;
781 }
782
783 tk_xtime_add(tk, &ts64);
784 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
785
786 error: /* even if we error out, we forwarded the time, so call update */
787 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
788
789 write_seqcount_end(&tk_core.seq);
790 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
791
792 /* signal hrtimers about time change */
793 clock_was_set();
794
795 return ret;
796 }
797 EXPORT_SYMBOL(timekeeping_inject_offset);
798
799
800 /**
801 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
802 *
803 */
timekeeping_get_tai_offset(void)804 s32 timekeeping_get_tai_offset(void)
805 {
806 struct timekeeper *tk = &tk_core.timekeeper;
807 unsigned int seq;
808 s32 ret;
809
810 do {
811 seq = read_seqcount_begin(&tk_core.seq);
812 ret = tk->tai_offset;
813 } while (read_seqcount_retry(&tk_core.seq, seq));
814
815 return ret;
816 }
817
818 /**
819 * __timekeeping_set_tai_offset - Lock free worker function
820 *
821 */
__timekeeping_set_tai_offset(struct timekeeper * tk,s32 tai_offset)822 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
823 {
824 tk->tai_offset = tai_offset;
825 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
826 }
827
828 /**
829 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
830 *
831 */
timekeeping_set_tai_offset(s32 tai_offset)832 void timekeeping_set_tai_offset(s32 tai_offset)
833 {
834 struct timekeeper *tk = &tk_core.timekeeper;
835 unsigned long flags;
836
837 raw_spin_lock_irqsave(&timekeeper_lock, flags);
838 write_seqcount_begin(&tk_core.seq);
839 __timekeeping_set_tai_offset(tk, tai_offset);
840 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
841 write_seqcount_end(&tk_core.seq);
842 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
843 clock_was_set();
844 }
845
846 /**
847 * change_clocksource - Swaps clocksources if a new one is available
848 *
849 * Accumulates current time interval and initializes new clocksource
850 */
change_clocksource(void * data)851 static int change_clocksource(void *data)
852 {
853 struct timekeeper *tk = &tk_core.timekeeper;
854 struct clocksource *new, *old;
855 unsigned long flags;
856
857 new = (struct clocksource *) data;
858
859 raw_spin_lock_irqsave(&timekeeper_lock, flags);
860 write_seqcount_begin(&tk_core.seq);
861
862 timekeeping_forward_now(tk);
863 /*
864 * If the cs is in module, get a module reference. Succeeds
865 * for built-in code (owner == NULL) as well.
866 */
867 if (try_module_get(new->owner)) {
868 if (!new->enable || new->enable(new) == 0) {
869 old = tk->tkr_mono.clock;
870 tk_setup_internals(tk, new);
871 if (old->disable)
872 old->disable(old);
873 module_put(old->owner);
874 } else {
875 module_put(new->owner);
876 }
877 }
878 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
879
880 write_seqcount_end(&tk_core.seq);
881 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
882
883 return 0;
884 }
885
886 /**
887 * timekeeping_notify - Install a new clock source
888 * @clock: pointer to the clock source
889 *
890 * This function is called from clocksource.c after a new, better clock
891 * source has been registered. The caller holds the clocksource_mutex.
892 */
timekeeping_notify(struct clocksource * clock)893 int timekeeping_notify(struct clocksource *clock)
894 {
895 struct timekeeper *tk = &tk_core.timekeeper;
896
897 if (tk->tkr_mono.clock == clock)
898 return 0;
899 stop_machine(change_clocksource, clock, NULL);
900 tick_clock_notify();
901 return tk->tkr_mono.clock == clock ? 0 : -1;
902 }
903
904 /**
905 * getrawmonotonic - Returns the raw monotonic time in a timespec
906 * @ts: pointer to the timespec to be set
907 *
908 * Returns the raw monotonic time (completely un-modified by ntp)
909 */
getrawmonotonic(struct timespec * ts)910 void getrawmonotonic(struct timespec *ts)
911 {
912 struct timekeeper *tk = &tk_core.timekeeper;
913 struct timespec64 ts64;
914 unsigned long seq;
915 s64 nsecs;
916
917 do {
918 seq = read_seqcount_begin(&tk_core.seq);
919 ts64.tv_sec = tk->raw_sec;
920 nsecs = timekeeping_get_ns(&tk->tkr_raw);
921
922 } while (read_seqcount_retry(&tk_core.seq, seq));
923
924 ts64.tv_nsec = 0;
925 timespec64_add_ns(&ts64, nsecs);
926 *ts = timespec64_to_timespec(ts64);
927 }
928 EXPORT_SYMBOL(getrawmonotonic);
929
930 /**
931 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
932 */
timekeeping_valid_for_hres(void)933 int timekeeping_valid_for_hres(void)
934 {
935 struct timekeeper *tk = &tk_core.timekeeper;
936 unsigned long seq;
937 int ret;
938
939 do {
940 seq = read_seqcount_begin(&tk_core.seq);
941
942 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
943
944 } while (read_seqcount_retry(&tk_core.seq, seq));
945
946 return ret;
947 }
948
949 /**
950 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
951 */
timekeeping_max_deferment(void)952 u64 timekeeping_max_deferment(void)
953 {
954 struct timekeeper *tk = &tk_core.timekeeper;
955 unsigned long seq;
956 u64 ret;
957
958 do {
959 seq = read_seqcount_begin(&tk_core.seq);
960
961 ret = tk->tkr_mono.clock->max_idle_ns;
962
963 } while (read_seqcount_retry(&tk_core.seq, seq));
964
965 return ret;
966 }
967
968 /**
969 * read_persistent_clock - Return time from the persistent clock.
970 *
971 * Weak dummy function for arches that do not yet support it.
972 * Reads the time from the battery backed persistent clock.
973 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
974 *
975 * XXX - Do be sure to remove it once all arches implement it.
976 */
read_persistent_clock(struct timespec * ts)977 void __weak read_persistent_clock(struct timespec *ts)
978 {
979 ts->tv_sec = 0;
980 ts->tv_nsec = 0;
981 }
982
983 /**
984 * read_boot_clock - Return time of the system start.
985 *
986 * Weak dummy function for arches that do not yet support it.
987 * Function to read the exact time the system has been started.
988 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
989 *
990 * XXX - Do be sure to remove it once all arches implement it.
991 */
read_boot_clock(struct timespec * ts)992 void __weak read_boot_clock(struct timespec *ts)
993 {
994 ts->tv_sec = 0;
995 ts->tv_nsec = 0;
996 }
997
998 /*
999 * timekeeping_init - Initializes the clocksource and common timekeeping values
1000 */
timekeeping_init(void)1001 void __init timekeeping_init(void)
1002 {
1003 struct timekeeper *tk = &tk_core.timekeeper;
1004 struct clocksource *clock;
1005 unsigned long flags;
1006 struct timespec64 now, boot, tmp;
1007 struct timespec ts;
1008
1009 read_persistent_clock(&ts);
1010 now = timespec_to_timespec64(ts);
1011 if (!timespec64_valid_strict(&now)) {
1012 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1013 " Check your CMOS/BIOS settings.\n");
1014 now.tv_sec = 0;
1015 now.tv_nsec = 0;
1016 } else if (now.tv_sec || now.tv_nsec)
1017 persistent_clock_exist = true;
1018
1019 read_boot_clock(&ts);
1020 boot = timespec_to_timespec64(ts);
1021 if (!timespec64_valid_strict(&boot)) {
1022 pr_warn("WARNING: Boot clock returned invalid value!\n"
1023 " Check your CMOS/BIOS settings.\n");
1024 boot.tv_sec = 0;
1025 boot.tv_nsec = 0;
1026 }
1027
1028 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1029 write_seqcount_begin(&tk_core.seq);
1030 ntp_init();
1031
1032 clock = clocksource_default_clock();
1033 if (clock->enable)
1034 clock->enable(clock);
1035 tk_setup_internals(tk, clock);
1036
1037 tk_set_xtime(tk, &now);
1038 tk->raw_sec = 0;
1039 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1040 boot = tk_xtime(tk);
1041
1042 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1043 tk_set_wall_to_mono(tk, tmp);
1044
1045 timekeeping_update(tk, TK_MIRROR);
1046
1047 write_seqcount_end(&tk_core.seq);
1048 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1049 }
1050
1051 /* time in seconds when suspend began */
1052 static struct timespec64 timekeeping_suspend_time;
1053
1054 /**
1055 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1056 * @delta: pointer to a timespec delta value
1057 *
1058 * Takes a timespec offset measuring a suspend interval and properly
1059 * adds the sleep offset to the timekeeping variables.
1060 */
__timekeeping_inject_sleeptime(struct timekeeper * tk,struct timespec64 * delta)1061 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1062 struct timespec64 *delta)
1063 {
1064 if (!timespec64_valid_strict(delta)) {
1065 printk_deferred(KERN_WARNING
1066 "__timekeeping_inject_sleeptime: Invalid "
1067 "sleep delta value!\n");
1068 return;
1069 }
1070 tk_xtime_add(tk, delta);
1071 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1072 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1073 tk_debug_account_sleep_time(delta);
1074 }
1075
1076 /**
1077 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
1078 * @delta: pointer to a timespec delta value
1079 *
1080 * This hook is for architectures that cannot support read_persistent_clock
1081 * because their RTC/persistent clock is only accessible when irqs are enabled.
1082 *
1083 * This function should only be called by rtc_resume(), and allows
1084 * a suspend offset to be injected into the timekeeping values.
1085 */
timekeeping_inject_sleeptime(struct timespec * delta)1086 void timekeeping_inject_sleeptime(struct timespec *delta)
1087 {
1088 struct timekeeper *tk = &tk_core.timekeeper;
1089 struct timespec64 tmp;
1090 unsigned long flags;
1091
1092 /*
1093 * Make sure we don't set the clock twice, as timekeeping_resume()
1094 * already did it
1095 */
1096 if (has_persistent_clock())
1097 return;
1098
1099 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1100 write_seqcount_begin(&tk_core.seq);
1101
1102 timekeeping_forward_now(tk);
1103
1104 tmp = timespec_to_timespec64(*delta);
1105 __timekeeping_inject_sleeptime(tk, &tmp);
1106
1107 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1108
1109 write_seqcount_end(&tk_core.seq);
1110 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1111
1112 /* signal hrtimers about time change */
1113 clock_was_set();
1114 }
1115
1116 /**
1117 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1118 *
1119 * This is for the generic clocksource timekeeping.
1120 * xtime/wall_to_monotonic/jiffies/etc are
1121 * still managed by arch specific suspend/resume code.
1122 */
timekeeping_resume(void)1123 static void timekeeping_resume(void)
1124 {
1125 struct timekeeper *tk = &tk_core.timekeeper;
1126 struct clocksource *clock = tk->tkr_mono.clock;
1127 unsigned long flags;
1128 struct timespec64 ts_new, ts_delta;
1129 struct timespec tmp;
1130 cycle_t cycle_now, cycle_delta;
1131 bool suspendtime_found = false;
1132
1133 read_persistent_clock(&tmp);
1134 ts_new = timespec_to_timespec64(tmp);
1135
1136 clockevents_resume();
1137 clocksource_resume();
1138
1139 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1140 write_seqcount_begin(&tk_core.seq);
1141
1142 /*
1143 * After system resumes, we need to calculate the suspended time and
1144 * compensate it for the OS time. There are 3 sources that could be
1145 * used: Nonstop clocksource during suspend, persistent clock and rtc
1146 * device.
1147 *
1148 * One specific platform may have 1 or 2 or all of them, and the
1149 * preference will be:
1150 * suspend-nonstop clocksource -> persistent clock -> rtc
1151 * The less preferred source will only be tried if there is no better
1152 * usable source. The rtc part is handled separately in rtc core code.
1153 */
1154 cycle_now = tk->tkr_mono.read(clock);
1155 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1156 cycle_now > tk->tkr_mono.cycle_last) {
1157 u64 num, max = ULLONG_MAX;
1158 u32 mult = clock->mult;
1159 u32 shift = clock->shift;
1160 s64 nsec = 0;
1161
1162 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1163 tk->tkr_mono.mask);
1164
1165 /*
1166 * "cycle_delta * mutl" may cause 64 bits overflow, if the
1167 * suspended time is too long. In that case we need do the
1168 * 64 bits math carefully
1169 */
1170 do_div(max, mult);
1171 if (cycle_delta > max) {
1172 num = div64_u64(cycle_delta, max);
1173 nsec = (((u64) max * mult) >> shift) * num;
1174 cycle_delta -= num * max;
1175 }
1176 nsec += ((u64) cycle_delta * mult) >> shift;
1177
1178 ts_delta = ns_to_timespec64(nsec);
1179 suspendtime_found = true;
1180 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1181 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1182 suspendtime_found = true;
1183 }
1184
1185 if (suspendtime_found)
1186 __timekeeping_inject_sleeptime(tk, &ts_delta);
1187
1188 /* Re-base the last cycle value */
1189 tk->tkr_mono.cycle_last = cycle_now;
1190 tk->tkr_raw.cycle_last = cycle_now;
1191
1192 tk->ntp_error = 0;
1193 timekeeping_suspended = 0;
1194 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1195 write_seqcount_end(&tk_core.seq);
1196 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1197
1198 touch_softlockup_watchdog();
1199
1200 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
1201
1202 /* Resume hrtimers */
1203 hrtimers_resume();
1204 }
1205
timekeeping_suspend(void)1206 static int timekeeping_suspend(void)
1207 {
1208 struct timekeeper *tk = &tk_core.timekeeper;
1209 unsigned long flags;
1210 struct timespec64 delta, delta_delta;
1211 static struct timespec64 old_delta;
1212 struct timespec tmp;
1213
1214 read_persistent_clock(&tmp);
1215 timekeeping_suspend_time = timespec_to_timespec64(tmp);
1216
1217 /*
1218 * On some systems the persistent_clock can not be detected at
1219 * timekeeping_init by its return value, so if we see a valid
1220 * value returned, update the persistent_clock_exists flag.
1221 */
1222 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1223 persistent_clock_exist = true;
1224
1225 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1226 write_seqcount_begin(&tk_core.seq);
1227 timekeeping_forward_now(tk);
1228 timekeeping_suspended = 1;
1229
1230 /*
1231 * To avoid drift caused by repeated suspend/resumes,
1232 * which each can add ~1 second drift error,
1233 * try to compensate so the difference in system time
1234 * and persistent_clock time stays close to constant.
1235 */
1236 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1237 delta_delta = timespec64_sub(delta, old_delta);
1238 if (abs(delta_delta.tv_sec) >= 2) {
1239 /*
1240 * if delta_delta is too large, assume time correction
1241 * has occured and set old_delta to the current delta.
1242 */
1243 old_delta = delta;
1244 } else {
1245 /* Otherwise try to adjust old_system to compensate */
1246 timekeeping_suspend_time =
1247 timespec64_add(timekeeping_suspend_time, delta_delta);
1248 }
1249
1250 timekeeping_update(tk, TK_MIRROR);
1251 write_seqcount_end(&tk_core.seq);
1252 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1253
1254 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1255 clocksource_suspend();
1256 clockevents_suspend();
1257
1258 return 0;
1259 }
1260
1261 /* sysfs resume/suspend bits for timekeeping */
1262 static struct syscore_ops timekeeping_syscore_ops = {
1263 .resume = timekeeping_resume,
1264 .suspend = timekeeping_suspend,
1265 };
1266
timekeeping_init_ops(void)1267 static int __init timekeeping_init_ops(void)
1268 {
1269 register_syscore_ops(&timekeeping_syscore_ops);
1270 return 0;
1271 }
1272 device_initcall(timekeeping_init_ops);
1273
1274 /*
1275 * Apply a multiplier adjustment to the timekeeper
1276 */
timekeeping_apply_adjustment(struct timekeeper * tk,s64 offset,bool negative,int adj_scale)1277 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1278 s64 offset,
1279 bool negative,
1280 int adj_scale)
1281 {
1282 s64 interval = tk->cycle_interval;
1283 s32 mult_adj = 1;
1284
1285 if (negative) {
1286 mult_adj = -mult_adj;
1287 interval = -interval;
1288 offset = -offset;
1289 }
1290 mult_adj <<= adj_scale;
1291 interval <<= adj_scale;
1292 offset <<= adj_scale;
1293
1294 /*
1295 * So the following can be confusing.
1296 *
1297 * To keep things simple, lets assume mult_adj == 1 for now.
1298 *
1299 * When mult_adj != 1, remember that the interval and offset values
1300 * have been appropriately scaled so the math is the same.
1301 *
1302 * The basic idea here is that we're increasing the multiplier
1303 * by one, this causes the xtime_interval to be incremented by
1304 * one cycle_interval. This is because:
1305 * xtime_interval = cycle_interval * mult
1306 * So if mult is being incremented by one:
1307 * xtime_interval = cycle_interval * (mult + 1)
1308 * Its the same as:
1309 * xtime_interval = (cycle_interval * mult) + cycle_interval
1310 * Which can be shortened to:
1311 * xtime_interval += cycle_interval
1312 *
1313 * So offset stores the non-accumulated cycles. Thus the current
1314 * time (in shifted nanoseconds) is:
1315 * now = (offset * adj) + xtime_nsec
1316 * Now, even though we're adjusting the clock frequency, we have
1317 * to keep time consistent. In other words, we can't jump back
1318 * in time, and we also want to avoid jumping forward in time.
1319 *
1320 * So given the same offset value, we need the time to be the same
1321 * both before and after the freq adjustment.
1322 * now = (offset * adj_1) + xtime_nsec_1
1323 * now = (offset * adj_2) + xtime_nsec_2
1324 * So:
1325 * (offset * adj_1) + xtime_nsec_1 =
1326 * (offset * adj_2) + xtime_nsec_2
1327 * And we know:
1328 * adj_2 = adj_1 + 1
1329 * So:
1330 * (offset * adj_1) + xtime_nsec_1 =
1331 * (offset * (adj_1+1)) + xtime_nsec_2
1332 * (offset * adj_1) + xtime_nsec_1 =
1333 * (offset * adj_1) + offset + xtime_nsec_2
1334 * Canceling the sides:
1335 * xtime_nsec_1 = offset + xtime_nsec_2
1336 * Which gives us:
1337 * xtime_nsec_2 = xtime_nsec_1 - offset
1338 * Which simplfies to:
1339 * xtime_nsec -= offset
1340 *
1341 * XXX - TODO: Doc ntp_error calculation.
1342 */
1343 tk->tkr_mono.mult += mult_adj;
1344 tk->xtime_interval += interval;
1345 tk->tkr_mono.xtime_nsec -= offset;
1346 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1347 }
1348
1349 /*
1350 * Calculate the multiplier adjustment needed to match the frequency
1351 * specified by NTP
1352 */
timekeeping_freqadjust(struct timekeeper * tk,s64 offset)1353 static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1354 s64 offset)
1355 {
1356 s64 interval = tk->cycle_interval;
1357 s64 xinterval = tk->xtime_interval;
1358 s64 tick_error;
1359 bool negative;
1360 u32 adj;
1361
1362 /* Remove any current error adj from freq calculation */
1363 if (tk->ntp_err_mult)
1364 xinterval -= tk->cycle_interval;
1365
1366 tk->ntp_tick = ntp_tick_length();
1367
1368 /* Calculate current error per tick */
1369 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1370 tick_error -= (xinterval + tk->xtime_remainder);
1371
1372 /* Don't worry about correcting it if its small */
1373 if (likely((tick_error >= 0) && (tick_error <= interval)))
1374 return;
1375
1376 /* preserve the direction of correction */
1377 negative = (tick_error < 0);
1378
1379 /* Sort out the magnitude of the correction */
1380 tick_error = abs64(tick_error);
1381 for (adj = 0; tick_error > interval; adj++)
1382 tick_error >>= 1;
1383
1384 /* scale the corrections */
1385 timekeeping_apply_adjustment(tk, offset, negative, adj);
1386 }
1387
1388 /*
1389 * Adjust the timekeeper's multiplier to the correct frequency
1390 * and also to reduce the accumulated error value.
1391 */
timekeeping_adjust(struct timekeeper * tk,s64 offset)1392 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1393 {
1394 /* Correct for the current frequency error */
1395 timekeeping_freqadjust(tk, offset);
1396
1397 /* Next make a small adjustment to fix any cumulative error */
1398 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1399 tk->ntp_err_mult = 1;
1400 timekeeping_apply_adjustment(tk, offset, 0, 0);
1401 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1402 /* Undo any existing error adjustment */
1403 timekeeping_apply_adjustment(tk, offset, 1, 0);
1404 tk->ntp_err_mult = 0;
1405 }
1406
1407 if (unlikely(tk->tkr_mono.clock->maxadj &&
1408 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1409 > tk->tkr_mono.clock->maxadj))) {
1410 printk_once(KERN_WARNING
1411 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1412 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1413 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1414 }
1415
1416 /*
1417 * It may be possible that when we entered this function, xtime_nsec
1418 * was very small. Further, if we're slightly speeding the clocksource
1419 * in the code above, its possible the required corrective factor to
1420 * xtime_nsec could cause it to underflow.
1421 *
1422 * Now, since we already accumulated the second, cannot simply roll
1423 * the accumulated second back, since the NTP subsystem has been
1424 * notified via second_overflow. So instead we push xtime_nsec forward
1425 * by the amount we underflowed, and add that amount into the error.
1426 *
1427 * We'll correct this error next time through this function, when
1428 * xtime_nsec is not as small.
1429 */
1430 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1431 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1432 tk->tkr_mono.xtime_nsec = 0;
1433 tk->ntp_error += neg << tk->ntp_error_shift;
1434 }
1435 }
1436
1437 /**
1438 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1439 *
1440 * Helper function that accumulates a the nsecs greater then a second
1441 * from the xtime_nsec field to the xtime_secs field.
1442 * It also calls into the NTP code to handle leapsecond processing.
1443 *
1444 */
accumulate_nsecs_to_secs(struct timekeeper * tk)1445 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1446 {
1447 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1448 unsigned int clock_set = 0;
1449
1450 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1451 int leap;
1452
1453 tk->tkr_mono.xtime_nsec -= nsecps;
1454 tk->xtime_sec++;
1455
1456 /* Figure out if its a leap sec and apply if needed */
1457 leap = second_overflow(tk->xtime_sec);
1458 if (unlikely(leap)) {
1459 struct timespec64 ts;
1460
1461 tk->xtime_sec += leap;
1462
1463 ts.tv_sec = leap;
1464 ts.tv_nsec = 0;
1465 tk_set_wall_to_mono(tk,
1466 timespec64_sub(tk->wall_to_monotonic, ts));
1467
1468 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1469
1470 clock_set = TK_CLOCK_WAS_SET;
1471 }
1472 }
1473 return clock_set;
1474 }
1475
1476 /**
1477 * logarithmic_accumulation - shifted accumulation of cycles
1478 *
1479 * This functions accumulates a shifted interval of cycles into
1480 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1481 * loop.
1482 *
1483 * Returns the unconsumed cycles.
1484 */
logarithmic_accumulation(struct timekeeper * tk,cycle_t offset,u32 shift,unsigned int * clock_set)1485 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1486 u32 shift,
1487 unsigned int *clock_set)
1488 {
1489 cycle_t interval = tk->cycle_interval << shift;
1490 u64 snsec_per_sec;
1491
1492 /* If the offset is smaller then a shifted interval, do nothing */
1493 if (offset < interval)
1494 return offset;
1495
1496 /* Accumulate one shifted interval */
1497 offset -= interval;
1498 tk->tkr_mono.cycle_last += interval;
1499 tk->tkr_raw.cycle_last += interval;
1500
1501 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
1502 *clock_set |= accumulate_nsecs_to_secs(tk);
1503
1504 /* Accumulate raw time */
1505 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
1506 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
1507 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
1508 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
1509 tk->raw_sec++;
1510 }
1511
1512 /* Accumulate error between NTP and clock interval */
1513 tk->ntp_error += tk->ntp_tick << shift;
1514 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1515 (tk->ntp_error_shift + shift);
1516
1517 return offset;
1518 }
1519
1520 /**
1521 * update_wall_time - Uses the current clocksource to increment the wall time
1522 *
1523 */
update_wall_time(void)1524 void update_wall_time(void)
1525 {
1526 struct timekeeper *real_tk = &tk_core.timekeeper;
1527 struct timekeeper *tk = &shadow_timekeeper;
1528 cycle_t offset;
1529 int shift = 0, maxshift;
1530 unsigned int clock_set = 0;
1531 unsigned long flags;
1532
1533 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1534
1535 /* Make sure we're fully resumed: */
1536 if (unlikely(timekeeping_suspended))
1537 goto out;
1538
1539 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1540 offset = real_tk->cycle_interval;
1541 #else
1542 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
1543 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
1544 #endif
1545
1546 /* Check if there's really nothing to do */
1547 if (offset < real_tk->cycle_interval)
1548 goto out;
1549
1550 /*
1551 * With NO_HZ we may have to accumulate many cycle_intervals
1552 * (think "ticks") worth of time at once. To do this efficiently,
1553 * we calculate the largest doubling multiple of cycle_intervals
1554 * that is smaller than the offset. We then accumulate that
1555 * chunk in one go, and then try to consume the next smaller
1556 * doubled multiple.
1557 */
1558 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1559 shift = max(0, shift);
1560 /* Bound shift to one less than what overflows tick_length */
1561 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1562 shift = min(shift, maxshift);
1563 while (offset >= tk->cycle_interval) {
1564 offset = logarithmic_accumulation(tk, offset, shift,
1565 &clock_set);
1566 if (offset < tk->cycle_interval<<shift)
1567 shift--;
1568 }
1569
1570 /* correct the clock when NTP error is too big */
1571 timekeeping_adjust(tk, offset);
1572
1573 /*
1574 * XXX This can be killed once everyone converts
1575 * to the new update_vsyscall.
1576 */
1577 old_vsyscall_fixup(tk);
1578
1579 /*
1580 * Finally, make sure that after the rounding
1581 * xtime_nsec isn't larger than NSEC_PER_SEC
1582 */
1583 clock_set |= accumulate_nsecs_to_secs(tk);
1584
1585 write_seqcount_begin(&tk_core.seq);
1586 /*
1587 * Update the real timekeeper.
1588 *
1589 * We could avoid this memcpy by switching pointers, but that
1590 * requires changes to all other timekeeper usage sites as
1591 * well, i.e. move the timekeeper pointer getter into the
1592 * spinlocked/seqcount protected sections. And we trade this
1593 * memcpy under the tk_core.seq against one before we start
1594 * updating.
1595 */
1596 memcpy(real_tk, tk, sizeof(*tk));
1597 timekeeping_update(real_tk, clock_set);
1598 write_seqcount_end(&tk_core.seq);
1599 out:
1600 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1601 if (clock_set)
1602 /* Have to call _delayed version, since in irq context*/
1603 clock_was_set_delayed();
1604 }
1605
1606 /**
1607 * getboottime - Return the real time of system boot.
1608 * @ts: pointer to the timespec to be set
1609 *
1610 * Returns the wall-time of boot in a timespec.
1611 *
1612 * This is based on the wall_to_monotonic offset and the total suspend
1613 * time. Calls to settimeofday will affect the value returned (which
1614 * basically means that however wrong your real time clock is at boot time,
1615 * you get the right time here).
1616 */
getboottime(struct timespec * ts)1617 void getboottime(struct timespec *ts)
1618 {
1619 struct timekeeper *tk = &tk_core.timekeeper;
1620 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1621
1622 *ts = ktime_to_timespec(t);
1623 }
1624 EXPORT_SYMBOL_GPL(getboottime);
1625
get_seconds(void)1626 unsigned long get_seconds(void)
1627 {
1628 struct timekeeper *tk = &tk_core.timekeeper;
1629
1630 return tk->xtime_sec;
1631 }
1632 EXPORT_SYMBOL(get_seconds);
1633
__current_kernel_time(void)1634 struct timespec __current_kernel_time(void)
1635 {
1636 struct timekeeper *tk = &tk_core.timekeeper;
1637
1638 return timespec64_to_timespec(tk_xtime(tk));
1639 }
1640
current_kernel_time(void)1641 struct timespec current_kernel_time(void)
1642 {
1643 struct timekeeper *tk = &tk_core.timekeeper;
1644 struct timespec64 now;
1645 unsigned long seq;
1646
1647 do {
1648 seq = read_seqcount_begin(&tk_core.seq);
1649
1650 now = tk_xtime(tk);
1651 } while (read_seqcount_retry(&tk_core.seq, seq));
1652
1653 return timespec64_to_timespec(now);
1654 }
1655 EXPORT_SYMBOL(current_kernel_time);
1656
get_monotonic_coarse(void)1657 struct timespec get_monotonic_coarse(void)
1658 {
1659 struct timekeeper *tk = &tk_core.timekeeper;
1660 struct timespec64 now, mono;
1661 unsigned long seq;
1662
1663 do {
1664 seq = read_seqcount_begin(&tk_core.seq);
1665
1666 now = tk_xtime(tk);
1667 mono = tk->wall_to_monotonic;
1668 } while (read_seqcount_retry(&tk_core.seq, seq));
1669
1670 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1671 now.tv_nsec + mono.tv_nsec);
1672
1673 return timespec64_to_timespec(now);
1674 }
1675
1676 /*
1677 * Must hold jiffies_lock
1678 */
do_timer(unsigned long ticks)1679 void do_timer(unsigned long ticks)
1680 {
1681 jiffies_64 += ticks;
1682 calc_global_load(ticks);
1683 }
1684
1685 /**
1686 * ktime_get_update_offsets_tick - hrtimer helper
1687 * @offs_real: pointer to storage for monotonic -> realtime offset
1688 * @offs_boot: pointer to storage for monotonic -> boottime offset
1689 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1690 *
1691 * Returns monotonic time at last tick and various offsets
1692 */
ktime_get_update_offsets_tick(ktime_t * offs_real,ktime_t * offs_boot,ktime_t * offs_tai)1693 ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1694 ktime_t *offs_tai)
1695 {
1696 struct timekeeper *tk = &tk_core.timekeeper;
1697 unsigned int seq;
1698 ktime_t base;
1699 u64 nsecs;
1700
1701 do {
1702 seq = read_seqcount_begin(&tk_core.seq);
1703
1704 base = tk->tkr_mono.base;
1705 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
1706
1707 *offs_real = tk->offs_real;
1708 *offs_boot = tk->offs_boot;
1709 *offs_tai = tk->offs_tai;
1710 } while (read_seqcount_retry(&tk_core.seq, seq));
1711
1712 return ktime_add_ns(base, nsecs);
1713 }
1714
1715 #ifdef CONFIG_HIGH_RES_TIMERS
1716 /**
1717 * ktime_get_update_offsets_now - hrtimer helper
1718 * @offs_real: pointer to storage for monotonic -> realtime offset
1719 * @offs_boot: pointer to storage for monotonic -> boottime offset
1720 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1721 *
1722 * Returns current monotonic time and updates the offsets
1723 * Called from hrtimer_interrupt() or retrigger_next_event()
1724 */
ktime_get_update_offsets_now(ktime_t * offs_real,ktime_t * offs_boot,ktime_t * offs_tai)1725 ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1726 ktime_t *offs_tai)
1727 {
1728 struct timekeeper *tk = &tk_core.timekeeper;
1729 unsigned int seq;
1730 ktime_t base;
1731 u64 nsecs;
1732
1733 do {
1734 seq = read_seqcount_begin(&tk_core.seq);
1735
1736 base = tk->tkr_mono.base;
1737 nsecs = timekeeping_get_ns(&tk->tkr_mono);
1738
1739 *offs_real = tk->offs_real;
1740 *offs_boot = tk->offs_boot;
1741 *offs_tai = tk->offs_tai;
1742 } while (read_seqcount_retry(&tk_core.seq, seq));
1743
1744 return ktime_add_ns(base, nsecs);
1745 }
1746 #endif
1747
1748 /**
1749 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1750 */
do_adjtimex(struct timex * txc)1751 int do_adjtimex(struct timex *txc)
1752 {
1753 struct timekeeper *tk = &tk_core.timekeeper;
1754 unsigned long flags;
1755 struct timespec64 ts;
1756 s32 orig_tai, tai;
1757 int ret;
1758
1759 /* Validate the data before disabling interrupts */
1760 ret = ntp_validate_timex(txc);
1761 if (ret)
1762 return ret;
1763
1764 if (txc->modes & ADJ_SETOFFSET) {
1765 struct timespec delta;
1766 delta.tv_sec = txc->time.tv_sec;
1767 delta.tv_nsec = txc->time.tv_usec;
1768 if (!(txc->modes & ADJ_NANO))
1769 delta.tv_nsec *= 1000;
1770 ret = timekeeping_inject_offset(&delta);
1771 if (ret)
1772 return ret;
1773 }
1774
1775 getnstimeofday64(&ts);
1776
1777 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1778 write_seqcount_begin(&tk_core.seq);
1779
1780 orig_tai = tai = tk->tai_offset;
1781 ret = __do_adjtimex(txc, &ts, &tai);
1782
1783 if (tai != orig_tai) {
1784 __timekeeping_set_tai_offset(tk, tai);
1785 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1786 }
1787 write_seqcount_end(&tk_core.seq);
1788 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1789
1790 if (tai != orig_tai)
1791 clock_was_set();
1792
1793 ntp_notify_cmos_timer();
1794
1795 return ret;
1796 }
1797
1798 #ifdef CONFIG_NTP_PPS
1799 /**
1800 * hardpps() - Accessor function to NTP __hardpps function
1801 */
hardpps(const struct timespec * phase_ts,const struct timespec * raw_ts)1802 void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1803 {
1804 unsigned long flags;
1805
1806 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1807 write_seqcount_begin(&tk_core.seq);
1808
1809 __hardpps(phase_ts, raw_ts);
1810
1811 write_seqcount_end(&tk_core.seq);
1812 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1813 }
1814 EXPORT_SYMBOL(hardpps);
1815 #endif
1816
1817 /**
1818 * xtime_update() - advances the timekeeping infrastructure
1819 * @ticks: number of ticks, that have elapsed since the last call.
1820 *
1821 * Must be called with interrupts disabled.
1822 */
xtime_update(unsigned long ticks)1823 void xtime_update(unsigned long ticks)
1824 {
1825 write_seqlock(&jiffies_lock);
1826 do_timer(ticks);
1827 write_sequnlock(&jiffies_lock);
1828 update_wall_time();
1829 }
1830