• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/kernel/time/timekeeping.c
3  *
4  *  Kernel timekeeping code and accessor functions
5  *
6  *  This code was moved from linux/kernel/timer.c.
7  *  Please see that file for copyright and history logs.
8  *
9  */
10 
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
25 
26 #include "tick-internal.h"
27 #include "ntp_internal.h"
28 
29 static struct timekeeper timekeeper;
30 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
31 static seqcount_t timekeeper_seq;
32 static struct timekeeper shadow_timekeeper;
33 
34 /* flag for if timekeeping is suspended */
35 int __read_mostly timekeeping_suspended;
36 
37 /* Flag for if there is a persistent clock on this platform */
38 bool __read_mostly persistent_clock_exist = false;
39 
tk_normalize_xtime(struct timekeeper * tk)40 static inline void tk_normalize_xtime(struct timekeeper *tk)
41 {
42 	while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
43 		tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
44 		tk->xtime_sec++;
45 	}
46 }
47 
tk_set_xtime(struct timekeeper * tk,const struct timespec * ts)48 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
49 {
50 	tk->xtime_sec = ts->tv_sec;
51 	tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
52 }
53 
tk_xtime_add(struct timekeeper * tk,const struct timespec * ts)54 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
55 {
56 	tk->xtime_sec += ts->tv_sec;
57 	tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
58 	tk_normalize_xtime(tk);
59 }
60 
tk_set_wall_to_mono(struct timekeeper * tk,struct timespec wtm)61 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
62 {
63 	struct timespec tmp;
64 
65 	/*
66 	 * Verify consistency of: offset_real = -wall_to_monotonic
67 	 * before modifying anything
68 	 */
69 	set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
70 					-tk->wall_to_monotonic.tv_nsec);
71 	WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
72 	tk->wall_to_monotonic = wtm;
73 	set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
74 	tk->offs_real = timespec_to_ktime(tmp);
75 	tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
76 }
77 
tk_set_sleep_time(struct timekeeper * tk,struct timespec t)78 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
79 {
80 	/* Verify consistency before modifying */
81 	WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
82 
83 	tk->total_sleep_time	= t;
84 	tk->offs_boot		= timespec_to_ktime(t);
85 }
86 
87 /**
88  * timekeeper_setup_internals - Set up internals to use clocksource clock.
89  *
90  * @clock:		Pointer to clocksource.
91  *
92  * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
93  * pair and interval request.
94  *
95  * Unless you're the timekeeping code, you should not be using this!
96  */
tk_setup_internals(struct timekeeper * tk,struct clocksource * clock)97 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
98 {
99 	cycle_t interval;
100 	u64 tmp, ntpinterval;
101 	struct clocksource *old_clock;
102 
103 	old_clock = tk->clock;
104 	tk->clock = clock;
105 	tk->cycle_last = clock->cycle_last = clock->read(clock);
106 
107 	/* Do the ns -> cycle conversion first, using original mult */
108 	tmp = NTP_INTERVAL_LENGTH;
109 	tmp <<= clock->shift;
110 	ntpinterval = tmp;
111 	tmp += clock->mult/2;
112 	do_div(tmp, clock->mult);
113 	if (tmp == 0)
114 		tmp = 1;
115 
116 	interval = (cycle_t) tmp;
117 	tk->cycle_interval = interval;
118 
119 	/* Go back from cycles -> shifted ns */
120 	tk->xtime_interval = (u64) interval * clock->mult;
121 	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
122 	tk->raw_interval =
123 		((u64) interval * clock->mult) >> clock->shift;
124 
125 	 /* if changing clocks, convert xtime_nsec shift units */
126 	if (old_clock) {
127 		int shift_change = clock->shift - old_clock->shift;
128 		if (shift_change < 0)
129 			tk->xtime_nsec >>= -shift_change;
130 		else
131 			tk->xtime_nsec <<= shift_change;
132 	}
133 	tk->shift = clock->shift;
134 
135 	tk->ntp_error = 0;
136 	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
137 
138 	/*
139 	 * The timekeeper keeps its own mult values for the currently
140 	 * active clocksource. These value will be adjusted via NTP
141 	 * to counteract clock drifting.
142 	 */
143 	tk->mult = clock->mult;
144 }
145 
146 /* Timekeeper helper functions. */
147 
148 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
149 u32 (*arch_gettimeoffset)(void);
150 
get_arch_timeoffset(void)151 u32 get_arch_timeoffset(void)
152 {
153 	if (likely(arch_gettimeoffset))
154 		return arch_gettimeoffset();
155 	return 0;
156 }
157 #else
get_arch_timeoffset(void)158 static inline u32 get_arch_timeoffset(void) { return 0; }
159 #endif
160 
timekeeping_get_ns(struct timekeeper * tk)161 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
162 {
163 	cycle_t cycle_now, cycle_delta;
164 	struct clocksource *clock;
165 	s64 nsec;
166 
167 	/* read clocksource: */
168 	clock = tk->clock;
169 	cycle_now = clock->read(clock);
170 
171 	/* calculate the delta since the last update_wall_time: */
172 	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
173 
174 	nsec = cycle_delta * tk->mult + tk->xtime_nsec;
175 	nsec >>= tk->shift;
176 
177 	/* If arch requires, add in get_arch_timeoffset() */
178 	return nsec + get_arch_timeoffset();
179 }
180 
timekeeping_get_ns_raw(struct timekeeper * tk)181 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
182 {
183 	cycle_t cycle_now, cycle_delta;
184 	struct clocksource *clock;
185 	s64 nsec;
186 
187 	/* read clocksource: */
188 	clock = tk->clock;
189 	cycle_now = clock->read(clock);
190 
191 	/* calculate the delta since the last update_wall_time: */
192 	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
193 
194 	/* convert delta to nanoseconds. */
195 	nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
196 
197 	/* If arch requires, add in get_arch_timeoffset() */
198 	return nsec + get_arch_timeoffset();
199 }
200 
201 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
202 
update_pvclock_gtod(struct timekeeper * tk)203 static void update_pvclock_gtod(struct timekeeper *tk)
204 {
205 	raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
206 }
207 
208 /**
209  * pvclock_gtod_register_notifier - register a pvclock timedata update listener
210  */
pvclock_gtod_register_notifier(struct notifier_block * nb)211 int pvclock_gtod_register_notifier(struct notifier_block *nb)
212 {
213 	struct timekeeper *tk = &timekeeper;
214 	unsigned long flags;
215 	int ret;
216 
217 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
218 	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
219 	update_pvclock_gtod(tk);
220 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
221 
222 	return ret;
223 }
224 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
225 
226 /**
227  * pvclock_gtod_unregister_notifier - unregister a pvclock
228  * timedata update listener
229  */
pvclock_gtod_unregister_notifier(struct notifier_block * nb)230 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
231 {
232 	unsigned long flags;
233 	int ret;
234 
235 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
236 	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
237 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
238 
239 	return ret;
240 }
241 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
242 
243 /* must hold timekeeper_lock */
timekeeping_update(struct timekeeper * tk,bool clearntp,bool mirror)244 static void timekeeping_update(struct timekeeper *tk, bool clearntp, bool mirror)
245 {
246 	if (clearntp) {
247 		tk->ntp_error = 0;
248 		ntp_clear();
249 	}
250 	update_vsyscall(tk);
251 	update_pvclock_gtod(tk);
252 
253 	if (mirror)
254 		memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
255 }
256 
257 /**
258  * timekeeping_forward_now - update clock to the current time
259  *
260  * Forward the current clock to update its state since the last call to
261  * update_wall_time(). This is useful before significant clock changes,
262  * as it avoids having to deal with this time offset explicitly.
263  */
timekeeping_forward_now(struct timekeeper * tk)264 static void timekeeping_forward_now(struct timekeeper *tk)
265 {
266 	cycle_t cycle_now, cycle_delta;
267 	struct clocksource *clock;
268 	s64 nsec;
269 
270 	clock = tk->clock;
271 	cycle_now = clock->read(clock);
272 	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
273 	tk->cycle_last = clock->cycle_last = cycle_now;
274 
275 	tk->xtime_nsec += cycle_delta * tk->mult;
276 
277 	/* If arch requires, add in get_arch_timeoffset() */
278 	tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
279 
280 	tk_normalize_xtime(tk);
281 
282 	nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
283 	timespec_add_ns(&tk->raw_time, nsec);
284 }
285 
286 /**
287  * __getnstimeofday - Returns the time of day in a timespec.
288  * @ts:		pointer to the timespec to be set
289  *
290  * Updates the time of day in the timespec.
291  * Returns 0 on success, or -ve when suspended (timespec will be undefined).
292  */
__getnstimeofday(struct timespec * ts)293 int __getnstimeofday(struct timespec *ts)
294 {
295 	struct timekeeper *tk = &timekeeper;
296 	unsigned long seq;
297 	s64 nsecs = 0;
298 
299 	do {
300 		seq = read_seqcount_begin(&timekeeper_seq);
301 
302 		ts->tv_sec = tk->xtime_sec;
303 		nsecs = timekeeping_get_ns(tk);
304 
305 	} while (read_seqcount_retry(&timekeeper_seq, seq));
306 
307 	ts->tv_nsec = 0;
308 	timespec_add_ns(ts, nsecs);
309 
310 	/*
311 	 * Do not bail out early, in case there were callers still using
312 	 * the value, even in the face of the WARN_ON.
313 	 */
314 	if (unlikely(timekeeping_suspended))
315 		return -EAGAIN;
316 	return 0;
317 }
318 EXPORT_SYMBOL(__getnstimeofday);
319 
320 /**
321  * getnstimeofday - Returns the time of day in a timespec.
322  * @ts:		pointer to the timespec to be set
323  *
324  * Returns the time of day in a timespec (WARN if suspended).
325  */
getnstimeofday(struct timespec * ts)326 void getnstimeofday(struct timespec *ts)
327 {
328 	WARN_ON(__getnstimeofday(ts));
329 }
330 EXPORT_SYMBOL(getnstimeofday);
331 
ktime_get(void)332 ktime_t ktime_get(void)
333 {
334 	struct timekeeper *tk = &timekeeper;
335 	unsigned int seq;
336 	s64 secs, nsecs;
337 
338 	WARN_ON(timekeeping_suspended);
339 
340 	do {
341 		seq = read_seqcount_begin(&timekeeper_seq);
342 		secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
343 		nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
344 
345 	} while (read_seqcount_retry(&timekeeper_seq, seq));
346 	/*
347 	 * Use ktime_set/ktime_add_ns to create a proper ktime on
348 	 * 32-bit architectures without CONFIG_KTIME_SCALAR.
349 	 */
350 	return ktime_add_ns(ktime_set(secs, 0), nsecs);
351 }
352 EXPORT_SYMBOL_GPL(ktime_get);
353 
354 /**
355  * ktime_get_ts - get the monotonic clock in timespec format
356  * @ts:		pointer to timespec variable
357  *
358  * The function calculates the monotonic clock from the realtime
359  * clock and the wall_to_monotonic offset and stores the result
360  * in normalized timespec format in the variable pointed to by @ts.
361  */
ktime_get_ts(struct timespec * ts)362 void ktime_get_ts(struct timespec *ts)
363 {
364 	struct timekeeper *tk = &timekeeper;
365 	struct timespec tomono;
366 	s64 nsec;
367 	unsigned int seq;
368 
369 	WARN_ON(timekeeping_suspended);
370 
371 	do {
372 		seq = read_seqcount_begin(&timekeeper_seq);
373 		ts->tv_sec = tk->xtime_sec;
374 		nsec = timekeeping_get_ns(tk);
375 		tomono = tk->wall_to_monotonic;
376 
377 	} while (read_seqcount_retry(&timekeeper_seq, seq));
378 
379 	ts->tv_sec += tomono.tv_sec;
380 	ts->tv_nsec = 0;
381 	timespec_add_ns(ts, nsec + tomono.tv_nsec);
382 }
383 EXPORT_SYMBOL_GPL(ktime_get_ts);
384 
385 
386 /**
387  * timekeeping_clocktai - Returns the TAI time of day in a timespec
388  * @ts:		pointer to the timespec to be set
389  *
390  * Returns the time of day in a timespec.
391  */
timekeeping_clocktai(struct timespec * ts)392 void timekeeping_clocktai(struct timespec *ts)
393 {
394 	struct timekeeper *tk = &timekeeper;
395 	unsigned long seq;
396 	u64 nsecs;
397 
398 	WARN_ON(timekeeping_suspended);
399 
400 	do {
401 		seq = read_seqcount_begin(&timekeeper_seq);
402 
403 		ts->tv_sec = tk->xtime_sec + tk->tai_offset;
404 		nsecs = timekeeping_get_ns(tk);
405 
406 	} while (read_seqcount_retry(&timekeeper_seq, seq));
407 
408 	ts->tv_nsec = 0;
409 	timespec_add_ns(ts, nsecs);
410 
411 }
412 EXPORT_SYMBOL(timekeeping_clocktai);
413 
414 
415 /**
416  * ktime_get_clocktai - Returns the TAI time of day in a ktime
417  *
418  * Returns the time of day in a ktime.
419  */
ktime_get_clocktai(void)420 ktime_t ktime_get_clocktai(void)
421 {
422 	struct timespec ts;
423 
424 	timekeeping_clocktai(&ts);
425 	return timespec_to_ktime(ts);
426 }
427 EXPORT_SYMBOL(ktime_get_clocktai);
428 
429 #ifdef CONFIG_NTP_PPS
430 
431 /**
432  * getnstime_raw_and_real - get day and raw monotonic time in timespec format
433  * @ts_raw:	pointer to the timespec to be set to raw monotonic time
434  * @ts_real:	pointer to the timespec to be set to the time of day
435  *
436  * This function reads both the time of day and raw monotonic time at the
437  * same time atomically and stores the resulting timestamps in timespec
438  * format.
439  */
getnstime_raw_and_real(struct timespec * ts_raw,struct timespec * ts_real)440 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
441 {
442 	struct timekeeper *tk = &timekeeper;
443 	unsigned long seq;
444 	s64 nsecs_raw, nsecs_real;
445 
446 	WARN_ON_ONCE(timekeeping_suspended);
447 
448 	do {
449 		seq = read_seqcount_begin(&timekeeper_seq);
450 
451 		*ts_raw = tk->raw_time;
452 		ts_real->tv_sec = tk->xtime_sec;
453 		ts_real->tv_nsec = 0;
454 
455 		nsecs_raw = timekeeping_get_ns_raw(tk);
456 		nsecs_real = timekeeping_get_ns(tk);
457 
458 	} while (read_seqcount_retry(&timekeeper_seq, seq));
459 
460 	timespec_add_ns(ts_raw, nsecs_raw);
461 	timespec_add_ns(ts_real, nsecs_real);
462 }
463 EXPORT_SYMBOL(getnstime_raw_and_real);
464 
465 #endif /* CONFIG_NTP_PPS */
466 
467 /**
468  * do_gettimeofday - Returns the time of day in a timeval
469  * @tv:		pointer to the timeval to be set
470  *
471  * NOTE: Users should be converted to using getnstimeofday()
472  */
do_gettimeofday(struct timeval * tv)473 void do_gettimeofday(struct timeval *tv)
474 {
475 	struct timespec now;
476 
477 	getnstimeofday(&now);
478 	tv->tv_sec = now.tv_sec;
479 	tv->tv_usec = now.tv_nsec/1000;
480 }
481 EXPORT_SYMBOL(do_gettimeofday);
482 
483 /**
484  * do_settimeofday - Sets the time of day
485  * @tv:		pointer to the timespec variable containing the new time
486  *
487  * Sets the time of day to the new time and update NTP and notify hrtimers
488  */
do_settimeofday(const struct timespec * tv)489 int do_settimeofday(const struct timespec *tv)
490 {
491 	struct timekeeper *tk = &timekeeper;
492 	struct timespec ts_delta, xt;
493 	unsigned long flags;
494 
495 	if (!timespec_valid_strict(tv))
496 		return -EINVAL;
497 
498 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
499 	write_seqcount_begin(&timekeeper_seq);
500 
501 	timekeeping_forward_now(tk);
502 
503 	xt = tk_xtime(tk);
504 	ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
505 	ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
506 
507 	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
508 
509 	tk_set_xtime(tk, tv);
510 
511 	timekeeping_update(tk, true, true);
512 
513 	write_seqcount_end(&timekeeper_seq);
514 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
515 
516 	/* signal hrtimers about time change */
517 	clock_was_set();
518 
519 	return 0;
520 }
521 EXPORT_SYMBOL(do_settimeofday);
522 
523 /**
524  * timekeeping_inject_offset - Adds or subtracts from the current time.
525  * @tv:		pointer to the timespec variable containing the offset
526  *
527  * Adds or subtracts an offset value from the current time.
528  */
timekeeping_inject_offset(struct timespec * ts)529 int timekeeping_inject_offset(struct timespec *ts)
530 {
531 	struct timekeeper *tk = &timekeeper;
532 	unsigned long flags;
533 	struct timespec tmp;
534 	int ret = 0;
535 
536 	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
537 		return -EINVAL;
538 
539 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
540 	write_seqcount_begin(&timekeeper_seq);
541 
542 	timekeeping_forward_now(tk);
543 
544 	/* Make sure the proposed value is valid */
545 	tmp = timespec_add(tk_xtime(tk),  *ts);
546 	if (!timespec_valid_strict(&tmp)) {
547 		ret = -EINVAL;
548 		goto error;
549 	}
550 
551 	tk_xtime_add(tk, ts);
552 	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
553 
554 error: /* even if we error out, we forwarded the time, so call update */
555 	timekeeping_update(tk, true, true);
556 
557 	write_seqcount_end(&timekeeper_seq);
558 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
559 
560 	/* signal hrtimers about time change */
561 	clock_was_set();
562 
563 	return ret;
564 }
565 EXPORT_SYMBOL(timekeeping_inject_offset);
566 
567 
568 /**
569  * timekeeping_get_tai_offset - Returns current TAI offset from UTC
570  *
571  */
timekeeping_get_tai_offset(void)572 s32 timekeeping_get_tai_offset(void)
573 {
574 	struct timekeeper *tk = &timekeeper;
575 	unsigned int seq;
576 	s32 ret;
577 
578 	do {
579 		seq = read_seqcount_begin(&timekeeper_seq);
580 		ret = tk->tai_offset;
581 	} while (read_seqcount_retry(&timekeeper_seq, seq));
582 
583 	return ret;
584 }
585 
586 /**
587  * __timekeeping_set_tai_offset - Lock free worker function
588  *
589  */
__timekeeping_set_tai_offset(struct timekeeper * tk,s32 tai_offset)590 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
591 {
592 	tk->tai_offset = tai_offset;
593 	tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
594 }
595 
596 /**
597  * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
598  *
599  */
timekeeping_set_tai_offset(s32 tai_offset)600 void timekeeping_set_tai_offset(s32 tai_offset)
601 {
602 	struct timekeeper *tk = &timekeeper;
603 	unsigned long flags;
604 
605 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
606 	write_seqcount_begin(&timekeeper_seq);
607 	__timekeeping_set_tai_offset(tk, tai_offset);
608 	write_seqcount_end(&timekeeper_seq);
609 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
610 	clock_was_set();
611 }
612 
613 /**
614  * change_clocksource - Swaps clocksources if a new one is available
615  *
616  * Accumulates current time interval and initializes new clocksource
617  */
change_clocksource(void * data)618 static int change_clocksource(void *data)
619 {
620 	struct timekeeper *tk = &timekeeper;
621 	struct clocksource *new, *old;
622 	unsigned long flags;
623 
624 	new = (struct clocksource *) data;
625 
626 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
627 	write_seqcount_begin(&timekeeper_seq);
628 
629 	timekeeping_forward_now(tk);
630 	if (!new->enable || new->enable(new) == 0) {
631 		old = tk->clock;
632 		tk_setup_internals(tk, new);
633 		if (old->disable)
634 			old->disable(old);
635 	}
636 	timekeeping_update(tk, true, true);
637 
638 	write_seqcount_end(&timekeeper_seq);
639 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
640 
641 	return 0;
642 }
643 
644 /**
645  * timekeeping_notify - Install a new clock source
646  * @clock:		pointer to the clock source
647  *
648  * This function is called from clocksource.c after a new, better clock
649  * source has been registered. The caller holds the clocksource_mutex.
650  */
timekeeping_notify(struct clocksource * clock)651 void timekeeping_notify(struct clocksource *clock)
652 {
653 	struct timekeeper *tk = &timekeeper;
654 
655 	if (tk->clock == clock)
656 		return;
657 	stop_machine(change_clocksource, clock, NULL);
658 	tick_clock_notify();
659 }
660 
661 /**
662  * ktime_get_real - get the real (wall-) time in ktime_t format
663  *
664  * returns the time in ktime_t format
665  */
ktime_get_real(void)666 ktime_t ktime_get_real(void)
667 {
668 	struct timespec now;
669 
670 	getnstimeofday(&now);
671 
672 	return timespec_to_ktime(now);
673 }
674 EXPORT_SYMBOL_GPL(ktime_get_real);
675 
676 /**
677  * getrawmonotonic - Returns the raw monotonic time in a timespec
678  * @ts:		pointer to the timespec to be set
679  *
680  * Returns the raw monotonic time (completely un-modified by ntp)
681  */
getrawmonotonic(struct timespec * ts)682 void getrawmonotonic(struct timespec *ts)
683 {
684 	struct timekeeper *tk = &timekeeper;
685 	unsigned long seq;
686 	s64 nsecs;
687 
688 	do {
689 		seq = read_seqcount_begin(&timekeeper_seq);
690 		nsecs = timekeeping_get_ns_raw(tk);
691 		*ts = tk->raw_time;
692 
693 	} while (read_seqcount_retry(&timekeeper_seq, seq));
694 
695 	timespec_add_ns(ts, nsecs);
696 }
697 EXPORT_SYMBOL(getrawmonotonic);
698 
699 /**
700  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
701  */
timekeeping_valid_for_hres(void)702 int timekeeping_valid_for_hres(void)
703 {
704 	struct timekeeper *tk = &timekeeper;
705 	unsigned long seq;
706 	int ret;
707 
708 	do {
709 		seq = read_seqcount_begin(&timekeeper_seq);
710 
711 		ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
712 
713 	} while (read_seqcount_retry(&timekeeper_seq, seq));
714 
715 	return ret;
716 }
717 
718 /**
719  * timekeeping_max_deferment - Returns max time the clocksource can be deferred
720  */
timekeeping_max_deferment(void)721 u64 timekeeping_max_deferment(void)
722 {
723 	struct timekeeper *tk = &timekeeper;
724 	unsigned long seq;
725 	u64 ret;
726 
727 	do {
728 		seq = read_seqcount_begin(&timekeeper_seq);
729 
730 		ret = tk->clock->max_idle_ns;
731 
732 	} while (read_seqcount_retry(&timekeeper_seq, seq));
733 
734 	return ret;
735 }
736 
737 /**
738  * read_persistent_clock -  Return time from the persistent clock.
739  *
740  * Weak dummy function for arches that do not yet support it.
741  * Reads the time from the battery backed persistent clock.
742  * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
743  *
744  *  XXX - Do be sure to remove it once all arches implement it.
745  */
read_persistent_clock(struct timespec * ts)746 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
747 {
748 	ts->tv_sec = 0;
749 	ts->tv_nsec = 0;
750 }
751 
752 /**
753  * read_boot_clock -  Return time of the system start.
754  *
755  * Weak dummy function for arches that do not yet support it.
756  * Function to read the exact time the system has been started.
757  * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
758  *
759  *  XXX - Do be sure to remove it once all arches implement it.
760  */
read_boot_clock(struct timespec * ts)761 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
762 {
763 	ts->tv_sec = 0;
764 	ts->tv_nsec = 0;
765 }
766 
767 /*
768  * timekeeping_init - Initializes the clocksource and common timekeeping values
769  */
timekeeping_init(void)770 void __init timekeeping_init(void)
771 {
772 	struct timekeeper *tk = &timekeeper;
773 	struct clocksource *clock;
774 	unsigned long flags;
775 	struct timespec now, boot, tmp;
776 
777 	read_persistent_clock(&now);
778 
779 	if (!timespec_valid_strict(&now)) {
780 		pr_warn("WARNING: Persistent clock returned invalid value!\n"
781 			"         Check your CMOS/BIOS settings.\n");
782 		now.tv_sec = 0;
783 		now.tv_nsec = 0;
784 	} else if (now.tv_sec || now.tv_nsec)
785 		persistent_clock_exist = true;
786 
787 	read_boot_clock(&boot);
788 	if (!timespec_valid_strict(&boot)) {
789 		pr_warn("WARNING: Boot clock returned invalid value!\n"
790 			"         Check your CMOS/BIOS settings.\n");
791 		boot.tv_sec = 0;
792 		boot.tv_nsec = 0;
793 	}
794 
795 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
796 	write_seqcount_begin(&timekeeper_seq);
797 	ntp_init();
798 
799 	clock = clocksource_default_clock();
800 	if (clock->enable)
801 		clock->enable(clock);
802 	tk_setup_internals(tk, clock);
803 
804 	tk_set_xtime(tk, &now);
805 	tk->raw_time.tv_sec = 0;
806 	tk->raw_time.tv_nsec = 0;
807 	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
808 		boot = tk_xtime(tk);
809 
810 	set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
811 	tk_set_wall_to_mono(tk, tmp);
812 
813 	tmp.tv_sec = 0;
814 	tmp.tv_nsec = 0;
815 	tk_set_sleep_time(tk, tmp);
816 
817 	memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
818 
819 	write_seqcount_end(&timekeeper_seq);
820 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
821 }
822 
823 /* time in seconds when suspend began */
824 static struct timespec timekeeping_suspend_time;
825 
826 /**
827  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
828  * @delta: pointer to a timespec delta value
829  *
830  * Takes a timespec offset measuring a suspend interval and properly
831  * adds the sleep offset to the timekeeping variables.
832  */
__timekeeping_inject_sleeptime(struct timekeeper * tk,struct timespec * delta)833 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
834 							struct timespec *delta)
835 {
836 	if (!timespec_valid_strict(delta)) {
837 		printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
838 					"sleep delta value!\n");
839 		return;
840 	}
841 	tk_xtime_add(tk, delta);
842 	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
843 	tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
844 }
845 
846 /**
847  * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
848  * @delta: pointer to a timespec delta value
849  *
850  * This hook is for architectures that cannot support read_persistent_clock
851  * because their RTC/persistent clock is only accessible when irqs are enabled.
852  *
853  * This function should only be called by rtc_resume(), and allows
854  * a suspend offset to be injected into the timekeeping values.
855  */
timekeeping_inject_sleeptime(struct timespec * delta)856 void timekeeping_inject_sleeptime(struct timespec *delta)
857 {
858 	struct timekeeper *tk = &timekeeper;
859 	unsigned long flags;
860 
861 	/*
862 	 * Make sure we don't set the clock twice, as timekeeping_resume()
863 	 * already did it
864 	 */
865 	if (has_persistent_clock())
866 		return;
867 
868 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
869 	write_seqcount_begin(&timekeeper_seq);
870 
871 	timekeeping_forward_now(tk);
872 
873 	__timekeeping_inject_sleeptime(tk, delta);
874 
875 	timekeeping_update(tk, true, true);
876 
877 	write_seqcount_end(&timekeeper_seq);
878 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
879 
880 	/* signal hrtimers about time change */
881 	clock_was_set();
882 }
883 
884 /**
885  * timekeeping_resume - Resumes the generic timekeeping subsystem.
886  *
887  * This is for the generic clocksource timekeeping.
888  * xtime/wall_to_monotonic/jiffies/etc are
889  * still managed by arch specific suspend/resume code.
890  */
timekeeping_resume(void)891 static void timekeeping_resume(void)
892 {
893 	struct timekeeper *tk = &timekeeper;
894 	struct clocksource *clock = tk->clock;
895 	unsigned long flags;
896 	struct timespec ts_new, ts_delta;
897 	cycle_t cycle_now, cycle_delta;
898 	bool suspendtime_found = false;
899 
900 	read_persistent_clock(&ts_new);
901 
902 	clockevents_resume();
903 	clocksource_resume();
904 
905 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
906 	write_seqcount_begin(&timekeeper_seq);
907 
908 	/*
909 	 * After system resumes, we need to calculate the suspended time and
910 	 * compensate it for the OS time. There are 3 sources that could be
911 	 * used: Nonstop clocksource during suspend, persistent clock and rtc
912 	 * device.
913 	 *
914 	 * One specific platform may have 1 or 2 or all of them, and the
915 	 * preference will be:
916 	 *	suspend-nonstop clocksource -> persistent clock -> rtc
917 	 * The less preferred source will only be tried if there is no better
918 	 * usable source. The rtc part is handled separately in rtc core code.
919 	 */
920 	cycle_now = clock->read(clock);
921 	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
922 		cycle_now > clock->cycle_last) {
923 		u64 num, max = ULLONG_MAX;
924 		u32 mult = clock->mult;
925 		u32 shift = clock->shift;
926 		s64 nsec = 0;
927 
928 		cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
929 
930 		/*
931 		 * "cycle_delta * mutl" may cause 64 bits overflow, if the
932 		 * suspended time is too long. In that case we need do the
933 		 * 64 bits math carefully
934 		 */
935 		do_div(max, mult);
936 		if (cycle_delta > max) {
937 			num = div64_u64(cycle_delta, max);
938 			nsec = (((u64) max * mult) >> shift) * num;
939 			cycle_delta -= num * max;
940 		}
941 		nsec += ((u64) cycle_delta * mult) >> shift;
942 
943 		ts_delta = ns_to_timespec(nsec);
944 		suspendtime_found = true;
945 	} else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
946 		ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
947 		suspendtime_found = true;
948 	}
949 
950 	if (suspendtime_found)
951 		__timekeeping_inject_sleeptime(tk, &ts_delta);
952 
953 	/* Re-base the last cycle value */
954 	tk->cycle_last = clock->cycle_last = cycle_now;
955 	tk->ntp_error = 0;
956 	timekeeping_suspended = 0;
957 	timekeeping_update(tk, false, true);
958 	write_seqcount_end(&timekeeper_seq);
959 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
960 
961 	touch_softlockup_watchdog();
962 
963 	clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
964 
965 	/* Resume hrtimers */
966 	hrtimers_resume();
967 }
968 
timekeeping_suspend(void)969 static int timekeeping_suspend(void)
970 {
971 	struct timekeeper *tk = &timekeeper;
972 	unsigned long flags;
973 	struct timespec		delta, delta_delta;
974 	static struct timespec	old_delta;
975 
976 	read_persistent_clock(&timekeeping_suspend_time);
977 
978 	/*
979 	 * On some systems the persistent_clock can not be detected at
980 	 * timekeeping_init by its return value, so if we see a valid
981 	 * value returned, update the persistent_clock_exists flag.
982 	 */
983 	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
984 		persistent_clock_exist = true;
985 
986 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
987 	write_seqcount_begin(&timekeeper_seq);
988 	timekeeping_forward_now(tk);
989 	timekeeping_suspended = 1;
990 
991 	/*
992 	 * To avoid drift caused by repeated suspend/resumes,
993 	 * which each can add ~1 second drift error,
994 	 * try to compensate so the difference in system time
995 	 * and persistent_clock time stays close to constant.
996 	 */
997 	delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
998 	delta_delta = timespec_sub(delta, old_delta);
999 	if (abs(delta_delta.tv_sec)  >= 2) {
1000 		/*
1001 		 * if delta_delta is too large, assume time correction
1002 		 * has occured and set old_delta to the current delta.
1003 		 */
1004 		old_delta = delta;
1005 	} else {
1006 		/* Otherwise try to adjust old_system to compensate */
1007 		timekeeping_suspend_time =
1008 			timespec_add(timekeeping_suspend_time, delta_delta);
1009 	}
1010 	write_seqcount_end(&timekeeper_seq);
1011 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1012 
1013 	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1014 	clocksource_suspend();
1015 	clockevents_suspend();
1016 
1017 	return 0;
1018 }
1019 
1020 /* sysfs resume/suspend bits for timekeeping */
1021 static struct syscore_ops timekeeping_syscore_ops = {
1022 	.resume		= timekeeping_resume,
1023 	.suspend	= timekeeping_suspend,
1024 };
1025 
timekeeping_init_ops(void)1026 static int __init timekeeping_init_ops(void)
1027 {
1028 	register_syscore_ops(&timekeeping_syscore_ops);
1029 	return 0;
1030 }
1031 
1032 device_initcall(timekeeping_init_ops);
1033 
1034 /*
1035  * If the error is already larger, we look ahead even further
1036  * to compensate for late or lost adjustments.
1037  */
timekeeping_bigadjust(struct timekeeper * tk,s64 error,s64 * interval,s64 * offset)1038 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
1039 						 s64 error, s64 *interval,
1040 						 s64 *offset)
1041 {
1042 	s64 tick_error, i;
1043 	u32 look_ahead, adj;
1044 	s32 error2, mult;
1045 
1046 	/*
1047 	 * Use the current error value to determine how much to look ahead.
1048 	 * The larger the error the slower we adjust for it to avoid problems
1049 	 * with losing too many ticks, otherwise we would overadjust and
1050 	 * produce an even larger error.  The smaller the adjustment the
1051 	 * faster we try to adjust for it, as lost ticks can do less harm
1052 	 * here.  This is tuned so that an error of about 1 msec is adjusted
1053 	 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1054 	 */
1055 	error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1056 	error2 = abs(error2);
1057 	for (look_ahead = 0; error2 > 0; look_ahead++)
1058 		error2 >>= 2;
1059 
1060 	/*
1061 	 * Now calculate the error in (1 << look_ahead) ticks, but first
1062 	 * remove the single look ahead already included in the error.
1063 	 */
1064 	tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1065 	tick_error -= tk->xtime_interval >> 1;
1066 	error = ((error - tick_error) >> look_ahead) + tick_error;
1067 
1068 	/* Finally calculate the adjustment shift value.  */
1069 	i = *interval;
1070 	mult = 1;
1071 	if (error < 0) {
1072 		error = -error;
1073 		*interval = -*interval;
1074 		*offset = -*offset;
1075 		mult = -1;
1076 	}
1077 	for (adj = 0; error > i; adj++)
1078 		error >>= 1;
1079 
1080 	*interval <<= adj;
1081 	*offset <<= adj;
1082 	return mult << adj;
1083 }
1084 
1085 /*
1086  * Adjust the multiplier to reduce the error value,
1087  * this is optimized for the most common adjustments of -1,0,1,
1088  * for other values we can do a bit more work.
1089  */
timekeeping_adjust(struct timekeeper * tk,s64 offset)1090 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1091 {
1092 	s64 error, interval = tk->cycle_interval;
1093 	int adj;
1094 
1095 	/*
1096 	 * The point of this is to check if the error is greater than half
1097 	 * an interval.
1098 	 *
1099 	 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1100 	 *
1101 	 * Note we subtract one in the shift, so that error is really error*2.
1102 	 * This "saves" dividing(shifting) interval twice, but keeps the
1103 	 * (error > interval) comparison as still measuring if error is
1104 	 * larger than half an interval.
1105 	 *
1106 	 * Note: It does not "save" on aggravation when reading the code.
1107 	 */
1108 	error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1109 	if (error > interval) {
1110 		/*
1111 		 * We now divide error by 4(via shift), which checks if
1112 		 * the error is greater than twice the interval.
1113 		 * If it is greater, we need a bigadjust, if its smaller,
1114 		 * we can adjust by 1.
1115 		 */
1116 		error >>= 2;
1117 		/*
1118 		 * XXX - In update_wall_time, we round up to the next
1119 		 * nanosecond, and store the amount rounded up into
1120 		 * the error. This causes the likely below to be unlikely.
1121 		 *
1122 		 * The proper fix is to avoid rounding up by using
1123 		 * the high precision tk->xtime_nsec instead of
1124 		 * xtime.tv_nsec everywhere. Fixing this will take some
1125 		 * time.
1126 		 */
1127 		if (likely(error <= interval))
1128 			adj = 1;
1129 		else
1130 			adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1131 	} else {
1132 		if (error < -interval) {
1133 			/* See comment above, this is just switched for the negative */
1134 			error >>= 2;
1135 			if (likely(error >= -interval)) {
1136 				adj = -1;
1137 				interval = -interval;
1138 				offset = -offset;
1139 			} else {
1140 				adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1141 			}
1142 		} else {
1143 			goto out_adjust;
1144 		}
1145 	}
1146 
1147 	if (unlikely(tk->clock->maxadj &&
1148 		(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1149 		printk_once(KERN_WARNING
1150 			"Adjusting %s more than 11%% (%ld vs %ld)\n",
1151 			tk->clock->name, (long)tk->mult + adj,
1152 			(long)tk->clock->mult + tk->clock->maxadj);
1153 	}
1154 	/*
1155 	 * So the following can be confusing.
1156 	 *
1157 	 * To keep things simple, lets assume adj == 1 for now.
1158 	 *
1159 	 * When adj != 1, remember that the interval and offset values
1160 	 * have been appropriately scaled so the math is the same.
1161 	 *
1162 	 * The basic idea here is that we're increasing the multiplier
1163 	 * by one, this causes the xtime_interval to be incremented by
1164 	 * one cycle_interval. This is because:
1165 	 *	xtime_interval = cycle_interval * mult
1166 	 * So if mult is being incremented by one:
1167 	 *	xtime_interval = cycle_interval * (mult + 1)
1168 	 * Its the same as:
1169 	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
1170 	 * Which can be shortened to:
1171 	 *	xtime_interval += cycle_interval
1172 	 *
1173 	 * So offset stores the non-accumulated cycles. Thus the current
1174 	 * time (in shifted nanoseconds) is:
1175 	 *	now = (offset * adj) + xtime_nsec
1176 	 * Now, even though we're adjusting the clock frequency, we have
1177 	 * to keep time consistent. In other words, we can't jump back
1178 	 * in time, and we also want to avoid jumping forward in time.
1179 	 *
1180 	 * So given the same offset value, we need the time to be the same
1181 	 * both before and after the freq adjustment.
1182 	 *	now = (offset * adj_1) + xtime_nsec_1
1183 	 *	now = (offset * adj_2) + xtime_nsec_2
1184 	 * So:
1185 	 *	(offset * adj_1) + xtime_nsec_1 =
1186 	 *		(offset * adj_2) + xtime_nsec_2
1187 	 * And we know:
1188 	 *	adj_2 = adj_1 + 1
1189 	 * So:
1190 	 *	(offset * adj_1) + xtime_nsec_1 =
1191 	 *		(offset * (adj_1+1)) + xtime_nsec_2
1192 	 *	(offset * adj_1) + xtime_nsec_1 =
1193 	 *		(offset * adj_1) + offset + xtime_nsec_2
1194 	 * Canceling the sides:
1195 	 *	xtime_nsec_1 = offset + xtime_nsec_2
1196 	 * Which gives us:
1197 	 *	xtime_nsec_2 = xtime_nsec_1 - offset
1198 	 * Which simplfies to:
1199 	 *	xtime_nsec -= offset
1200 	 *
1201 	 * XXX - TODO: Doc ntp_error calculation.
1202 	 */
1203 	tk->mult += adj;
1204 	tk->xtime_interval += interval;
1205 	tk->xtime_nsec -= offset;
1206 	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1207 
1208 out_adjust:
1209 	/*
1210 	 * It may be possible that when we entered this function, xtime_nsec
1211 	 * was very small.  Further, if we're slightly speeding the clocksource
1212 	 * in the code above, its possible the required corrective factor to
1213 	 * xtime_nsec could cause it to underflow.
1214 	 *
1215 	 * Now, since we already accumulated the second, cannot simply roll
1216 	 * the accumulated second back, since the NTP subsystem has been
1217 	 * notified via second_overflow. So instead we push xtime_nsec forward
1218 	 * by the amount we underflowed, and add that amount into the error.
1219 	 *
1220 	 * We'll correct this error next time through this function, when
1221 	 * xtime_nsec is not as small.
1222 	 */
1223 	if (unlikely((s64)tk->xtime_nsec < 0)) {
1224 		s64 neg = -(s64)tk->xtime_nsec;
1225 		tk->xtime_nsec = 0;
1226 		tk->ntp_error += neg << tk->ntp_error_shift;
1227 	}
1228 
1229 }
1230 
1231 /**
1232  * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1233  *
1234  * Helper function that accumulates a the nsecs greater then a second
1235  * from the xtime_nsec field to the xtime_secs field.
1236  * It also calls into the NTP code to handle leapsecond processing.
1237  *
1238  */
accumulate_nsecs_to_secs(struct timekeeper * tk)1239 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1240 {
1241 	u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1242 
1243 	while (tk->xtime_nsec >= nsecps) {
1244 		int leap;
1245 
1246 		tk->xtime_nsec -= nsecps;
1247 		tk->xtime_sec++;
1248 
1249 		/* Figure out if its a leap sec and apply if needed */
1250 		leap = second_overflow(tk->xtime_sec);
1251 		if (unlikely(leap)) {
1252 			struct timespec ts;
1253 
1254 			tk->xtime_sec += leap;
1255 
1256 			ts.tv_sec = leap;
1257 			ts.tv_nsec = 0;
1258 			tk_set_wall_to_mono(tk,
1259 				timespec_sub(tk->wall_to_monotonic, ts));
1260 
1261 			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1262 
1263 			clock_was_set_delayed();
1264 		}
1265 	}
1266 }
1267 
1268 /**
1269  * logarithmic_accumulation - shifted accumulation of cycles
1270  *
1271  * This functions accumulates a shifted interval of cycles into
1272  * into a shifted interval nanoseconds. Allows for O(log) accumulation
1273  * loop.
1274  *
1275  * Returns the unconsumed cycles.
1276  */
logarithmic_accumulation(struct timekeeper * tk,cycle_t offset,u32 shift)1277 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1278 						u32 shift)
1279 {
1280 	cycle_t interval = tk->cycle_interval << shift;
1281 	u64 raw_nsecs;
1282 
1283 	/* If the offset is smaller then a shifted interval, do nothing */
1284 	if (offset < interval)
1285 		return offset;
1286 
1287 	/* Accumulate one shifted interval */
1288 	offset -= interval;
1289 	tk->cycle_last += interval;
1290 
1291 	tk->xtime_nsec += tk->xtime_interval << shift;
1292 	accumulate_nsecs_to_secs(tk);
1293 
1294 	/* Accumulate raw time */
1295 	raw_nsecs = (u64)tk->raw_interval << shift;
1296 	raw_nsecs += tk->raw_time.tv_nsec;
1297 	if (raw_nsecs >= NSEC_PER_SEC) {
1298 		u64 raw_secs = raw_nsecs;
1299 		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1300 		tk->raw_time.tv_sec += raw_secs;
1301 	}
1302 	tk->raw_time.tv_nsec = raw_nsecs;
1303 
1304 	/* Accumulate error between NTP and clock interval */
1305 	tk->ntp_error += ntp_tick_length() << shift;
1306 	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1307 						(tk->ntp_error_shift + shift);
1308 
1309 	return offset;
1310 }
1311 
1312 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
old_vsyscall_fixup(struct timekeeper * tk)1313 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1314 {
1315 	s64 remainder;
1316 
1317 	/*
1318 	* Store only full nanoseconds into xtime_nsec after rounding
1319 	* it up and add the remainder to the error difference.
1320 	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
1321 	* by truncating the remainder in vsyscalls. However, it causes
1322 	* additional work to be done in timekeeping_adjust(). Once
1323 	* the vsyscall implementations are converted to use xtime_nsec
1324 	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1325 	* users are removed, this can be killed.
1326 	*/
1327 	remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1328 	tk->xtime_nsec -= remainder;
1329 	tk->xtime_nsec += 1ULL << tk->shift;
1330 	tk->ntp_error += remainder << tk->ntp_error_shift;
1331 
1332 }
1333 #else
1334 #define old_vsyscall_fixup(tk)
1335 #endif
1336 
1337 
1338 
1339 /**
1340  * update_wall_time - Uses the current clocksource to increment the wall time
1341  *
1342  */
update_wall_time(void)1343 static void update_wall_time(void)
1344 {
1345 	struct clocksource *clock;
1346 	struct timekeeper *real_tk = &timekeeper;
1347 	struct timekeeper *tk = &shadow_timekeeper;
1348 	cycle_t offset;
1349 	int shift = 0, maxshift;
1350 	unsigned long flags;
1351 
1352 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1353 
1354 	/* Make sure we're fully resumed: */
1355 	if (unlikely(timekeeping_suspended))
1356 		goto out;
1357 
1358 	clock = real_tk->clock;
1359 
1360 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1361 	offset = real_tk->cycle_interval;
1362 #else
1363 	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1364 #endif
1365 
1366 	/* Check if there's really nothing to do */
1367 	if (offset < real_tk->cycle_interval)
1368 		goto out;
1369 
1370 	/*
1371 	 * With NO_HZ we may have to accumulate many cycle_intervals
1372 	 * (think "ticks") worth of time at once. To do this efficiently,
1373 	 * we calculate the largest doubling multiple of cycle_intervals
1374 	 * that is smaller than the offset.  We then accumulate that
1375 	 * chunk in one go, and then try to consume the next smaller
1376 	 * doubled multiple.
1377 	 */
1378 	shift = ilog2(offset) - ilog2(tk->cycle_interval);
1379 	shift = max(0, shift);
1380 	/* Bound shift to one less than what overflows tick_length */
1381 	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1382 	shift = min(shift, maxshift);
1383 	while (offset >= tk->cycle_interval) {
1384 		offset = logarithmic_accumulation(tk, offset, shift);
1385 		if (offset < tk->cycle_interval<<shift)
1386 			shift--;
1387 	}
1388 
1389 	/* correct the clock when NTP error is too big */
1390 	timekeeping_adjust(tk, offset);
1391 
1392 	/*
1393 	 * XXX This can be killed once everyone converts
1394 	 * to the new update_vsyscall.
1395 	 */
1396 	old_vsyscall_fixup(tk);
1397 
1398 	/*
1399 	 * Finally, make sure that after the rounding
1400 	 * xtime_nsec isn't larger than NSEC_PER_SEC
1401 	 */
1402 	accumulate_nsecs_to_secs(tk);
1403 
1404 	write_seqcount_begin(&timekeeper_seq);
1405 	/* Update clock->cycle_last with the new value */
1406 	clock->cycle_last = tk->cycle_last;
1407 	/*
1408 	 * Update the real timekeeper.
1409 	 *
1410 	 * We could avoid this memcpy by switching pointers, but that
1411 	 * requires changes to all other timekeeper usage sites as
1412 	 * well, i.e. move the timekeeper pointer getter into the
1413 	 * spinlocked/seqcount protected sections. And we trade this
1414 	 * memcpy under the timekeeper_seq against one before we start
1415 	 * updating.
1416 	 */
1417 	memcpy(real_tk, tk, sizeof(*tk));
1418 	timekeeping_update(real_tk, false, false);
1419 	write_seqcount_end(&timekeeper_seq);
1420 out:
1421 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1422 }
1423 
1424 /**
1425  * getboottime - Return the real time of system boot.
1426  * @ts:		pointer to the timespec to be set
1427  *
1428  * Returns the wall-time of boot in a timespec.
1429  *
1430  * This is based on the wall_to_monotonic offset and the total suspend
1431  * time. Calls to settimeofday will affect the value returned (which
1432  * basically means that however wrong your real time clock is at boot time,
1433  * you get the right time here).
1434  */
getboottime(struct timespec * ts)1435 void getboottime(struct timespec *ts)
1436 {
1437 	struct timekeeper *tk = &timekeeper;
1438 	struct timespec boottime = {
1439 		.tv_sec = tk->wall_to_monotonic.tv_sec +
1440 				tk->total_sleep_time.tv_sec,
1441 		.tv_nsec = tk->wall_to_monotonic.tv_nsec +
1442 				tk->total_sleep_time.tv_nsec
1443 	};
1444 
1445 	set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1446 }
1447 EXPORT_SYMBOL_GPL(getboottime);
1448 
1449 /**
1450  * get_monotonic_boottime - Returns monotonic time since boot
1451  * @ts:		pointer to the timespec to be set
1452  *
1453  * Returns the monotonic time since boot in a timespec.
1454  *
1455  * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1456  * includes the time spent in suspend.
1457  */
get_monotonic_boottime(struct timespec * ts)1458 void get_monotonic_boottime(struct timespec *ts)
1459 {
1460 	struct timekeeper *tk = &timekeeper;
1461 	struct timespec tomono, sleep;
1462 	s64 nsec;
1463 	unsigned int seq;
1464 
1465 	WARN_ON(timekeeping_suspended);
1466 
1467 	do {
1468 		seq = read_seqcount_begin(&timekeeper_seq);
1469 		ts->tv_sec = tk->xtime_sec;
1470 		nsec = timekeeping_get_ns(tk);
1471 		tomono = tk->wall_to_monotonic;
1472 		sleep = tk->total_sleep_time;
1473 
1474 	} while (read_seqcount_retry(&timekeeper_seq, seq));
1475 
1476 	ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1477 	ts->tv_nsec = 0;
1478 	timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1479 }
1480 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1481 
1482 /**
1483  * ktime_get_boottime - Returns monotonic time since boot in a ktime
1484  *
1485  * Returns the monotonic time since boot in a ktime
1486  *
1487  * This is similar to CLOCK_MONTONIC/ktime_get, but also
1488  * includes the time spent in suspend.
1489  */
ktime_get_boottime(void)1490 ktime_t ktime_get_boottime(void)
1491 {
1492 	struct timespec ts;
1493 
1494 	get_monotonic_boottime(&ts);
1495 	return timespec_to_ktime(ts);
1496 }
1497 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1498 
1499 /**
1500  * monotonic_to_bootbased - Convert the monotonic time to boot based.
1501  * @ts:		pointer to the timespec to be converted
1502  */
monotonic_to_bootbased(struct timespec * ts)1503 void monotonic_to_bootbased(struct timespec *ts)
1504 {
1505 	struct timekeeper *tk = &timekeeper;
1506 
1507 	*ts = timespec_add(*ts, tk->total_sleep_time);
1508 }
1509 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1510 
get_seconds(void)1511 unsigned long get_seconds(void)
1512 {
1513 	struct timekeeper *tk = &timekeeper;
1514 
1515 	return tk->xtime_sec;
1516 }
1517 EXPORT_SYMBOL(get_seconds);
1518 
__current_kernel_time(void)1519 struct timespec __current_kernel_time(void)
1520 {
1521 	struct timekeeper *tk = &timekeeper;
1522 
1523 	return tk_xtime(tk);
1524 }
1525 
current_kernel_time(void)1526 struct timespec current_kernel_time(void)
1527 {
1528 	struct timekeeper *tk = &timekeeper;
1529 	struct timespec now;
1530 	unsigned long seq;
1531 
1532 	do {
1533 		seq = read_seqcount_begin(&timekeeper_seq);
1534 
1535 		now = tk_xtime(tk);
1536 	} while (read_seqcount_retry(&timekeeper_seq, seq));
1537 
1538 	return now;
1539 }
1540 EXPORT_SYMBOL(current_kernel_time);
1541 
get_monotonic_coarse(void)1542 struct timespec get_monotonic_coarse(void)
1543 {
1544 	struct timekeeper *tk = &timekeeper;
1545 	struct timespec now, mono;
1546 	unsigned long seq;
1547 
1548 	do {
1549 		seq = read_seqcount_begin(&timekeeper_seq);
1550 
1551 		now = tk_xtime(tk);
1552 		mono = tk->wall_to_monotonic;
1553 	} while (read_seqcount_retry(&timekeeper_seq, seq));
1554 
1555 	set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1556 				now.tv_nsec + mono.tv_nsec);
1557 	return now;
1558 }
1559 
1560 /*
1561  * Must hold jiffies_lock
1562  */
do_timer(unsigned long ticks)1563 void do_timer(unsigned long ticks)
1564 {
1565 	jiffies_64 += ticks;
1566 	update_wall_time();
1567 	calc_global_load(ticks);
1568 }
1569 
1570 /**
1571  * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1572  *    and sleep offsets.
1573  * @xtim:	pointer to timespec to be set with xtime
1574  * @wtom:	pointer to timespec to be set with wall_to_monotonic
1575  * @sleep:	pointer to timespec to be set with time in suspend
1576  */
get_xtime_and_monotonic_and_sleep_offset(struct timespec * xtim,struct timespec * wtom,struct timespec * sleep)1577 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1578 				struct timespec *wtom, struct timespec *sleep)
1579 {
1580 	struct timekeeper *tk = &timekeeper;
1581 	unsigned long seq;
1582 
1583 	do {
1584 		seq = read_seqcount_begin(&timekeeper_seq);
1585 		*xtim = tk_xtime(tk);
1586 		*wtom = tk->wall_to_monotonic;
1587 		*sleep = tk->total_sleep_time;
1588 	} while (read_seqcount_retry(&timekeeper_seq, seq));
1589 }
1590 
1591 #ifdef CONFIG_HIGH_RES_TIMERS
1592 /**
1593  * ktime_get_update_offsets - hrtimer helper
1594  * @offs_real:	pointer to storage for monotonic -> realtime offset
1595  * @offs_boot:	pointer to storage for monotonic -> boottime offset
1596  *
1597  * Returns current monotonic time and updates the offsets
1598  * Called from hrtimer_interupt() or retrigger_next_event()
1599  */
ktime_get_update_offsets(ktime_t * offs_real,ktime_t * offs_boot,ktime_t * offs_tai)1600 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1601 							ktime_t *offs_tai)
1602 {
1603 	struct timekeeper *tk = &timekeeper;
1604 	ktime_t now;
1605 	unsigned int seq;
1606 	u64 secs, nsecs;
1607 
1608 	do {
1609 		seq = read_seqcount_begin(&timekeeper_seq);
1610 
1611 		secs = tk->xtime_sec;
1612 		nsecs = timekeeping_get_ns(tk);
1613 
1614 		*offs_real = tk->offs_real;
1615 		*offs_boot = tk->offs_boot;
1616 		*offs_tai = tk->offs_tai;
1617 	} while (read_seqcount_retry(&timekeeper_seq, seq));
1618 
1619 	now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1620 	now = ktime_sub(now, *offs_real);
1621 	return now;
1622 }
1623 #endif
1624 
1625 /**
1626  * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1627  */
ktime_get_monotonic_offset(void)1628 ktime_t ktime_get_monotonic_offset(void)
1629 {
1630 	struct timekeeper *tk = &timekeeper;
1631 	unsigned long seq;
1632 	struct timespec wtom;
1633 
1634 	do {
1635 		seq = read_seqcount_begin(&timekeeper_seq);
1636 		wtom = tk->wall_to_monotonic;
1637 	} while (read_seqcount_retry(&timekeeper_seq, seq));
1638 
1639 	return timespec_to_ktime(wtom);
1640 }
1641 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1642 
1643 /**
1644  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1645  */
do_adjtimex(struct timex * txc)1646 int do_adjtimex(struct timex *txc)
1647 {
1648 	struct timekeeper *tk = &timekeeper;
1649 	unsigned long flags;
1650 	struct timespec ts;
1651 	s32 orig_tai, tai;
1652 	int ret;
1653 
1654 	/* Validate the data before disabling interrupts */
1655 	ret = ntp_validate_timex(txc);
1656 	if (ret)
1657 		return ret;
1658 
1659 	if (txc->modes & ADJ_SETOFFSET) {
1660 		struct timespec delta;
1661 		delta.tv_sec  = txc->time.tv_sec;
1662 		delta.tv_nsec = txc->time.tv_usec;
1663 		if (!(txc->modes & ADJ_NANO))
1664 			delta.tv_nsec *= 1000;
1665 		ret = timekeeping_inject_offset(&delta);
1666 		if (ret)
1667 			return ret;
1668 	}
1669 
1670 	getnstimeofday(&ts);
1671 
1672 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1673 	write_seqcount_begin(&timekeeper_seq);
1674 
1675 	orig_tai = tai = tk->tai_offset;
1676 	ret = __do_adjtimex(txc, &ts, &tai);
1677 
1678 	if (tai != orig_tai) {
1679 		__timekeeping_set_tai_offset(tk, tai);
1680 		clock_was_set_delayed();
1681 	}
1682 	write_seqcount_end(&timekeeper_seq);
1683 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1684 
1685 	return ret;
1686 }
1687 
1688 #ifdef CONFIG_NTP_PPS
1689 /**
1690  * hardpps() - Accessor function to NTP __hardpps function
1691  */
hardpps(const struct timespec * phase_ts,const struct timespec * raw_ts)1692 void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1693 {
1694 	unsigned long flags;
1695 
1696 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1697 	write_seqcount_begin(&timekeeper_seq);
1698 
1699 	__hardpps(phase_ts, raw_ts);
1700 
1701 	write_seqcount_end(&timekeeper_seq);
1702 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1703 }
1704 EXPORT_SYMBOL(hardpps);
1705 #endif
1706 
1707 /**
1708  * xtime_update() - advances the timekeeping infrastructure
1709  * @ticks:	number of ticks, that have elapsed since the last call.
1710  *
1711  * Must be called with interrupts disabled.
1712  */
xtime_update(unsigned long ticks)1713 void xtime_update(unsigned long ticks)
1714 {
1715 	write_seqlock(&jiffies_lock);
1716 	do_timer(ticks);
1717 	write_sequnlock(&jiffies_lock);
1718 }
1719