• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Common time routines among all ppc machines.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5  * Paul Mackerras' version and mine for PReP and Pmac.
6  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8  *
9  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10  * to make clock more stable (2.4.0-test5). The only thing
11  * that this code assumes is that the timebases have been synchronized
12  * by firmware on SMP and are never stopped (never do sleep
13  * on SMP then, nap and doze are OK).
14  *
15  * Speeded up do_gettimeofday by getting rid of references to
16  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17  *
18  * TODO (not necessarily in this file):
19  * - improve precision and reproducibility of timebase frequency
20  * measurement at boot time.
21  * - for astronomical applications: add a new function to get
22  * non ambiguous timestamps even around leap seconds. This needs
23  * a new timestamp format and a good name.
24  *
25  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
26  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
27  *
28  *      This program is free software; you can redistribute it and/or
29  *      modify it under the terms of the GNU General Public License
30  *      as published by the Free Software Foundation; either version
31  *      2 of the License, or (at your option) any later version.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/export.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/param.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/timex.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/time.h>
45 #include <linux/clockchips.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 #include <linux/delay.h>
56 #include <linux/irq_work.h>
57 #include <linux/clk-provider.h>
58 #include <asm/trace.h>
59 
60 #include <asm/io.h>
61 #include <asm/processor.h>
62 #include <asm/nvram.h>
63 #include <asm/cache.h>
64 #include <asm/machdep.h>
65 #include <asm/uaccess.h>
66 #include <asm/time.h>
67 #include <asm/prom.h>
68 #include <asm/irq.h>
69 #include <asm/div64.h>
70 #include <asm/smp.h>
71 #include <asm/vdso_datapage.h>
72 #include <asm/firmware.h>
73 #include <asm/cputime.h>
74 
75 /* powerpc clocksource/clockevent code */
76 
77 #include <linux/clockchips.h>
78 #include <linux/timekeeper_internal.h>
79 
80 static cycle_t rtc_read(struct clocksource *);
81 static struct clocksource clocksource_rtc = {
82 	.name         = "rtc",
83 	.rating       = 400,
84 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
85 	.mask         = CLOCKSOURCE_MASK(64),
86 	.read         = rtc_read,
87 };
88 
89 static cycle_t timebase_read(struct clocksource *);
90 static struct clocksource clocksource_timebase = {
91 	.name         = "timebase",
92 	.rating       = 400,
93 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
94 	.mask         = CLOCKSOURCE_MASK(64),
95 	.read         = timebase_read,
96 };
97 
98 #define DECREMENTER_MAX	0x7fffffff
99 
100 static int decrementer_set_next_event(unsigned long evt,
101 				      struct clock_event_device *dev);
102 static int decrementer_shutdown(struct clock_event_device *evt);
103 
104 struct clock_event_device decrementer_clockevent = {
105 	.name			= "decrementer",
106 	.rating			= 200,
107 	.irq			= 0,
108 	.set_next_event		= decrementer_set_next_event,
109 	.set_state_shutdown	= decrementer_shutdown,
110 	.tick_resume		= decrementer_shutdown,
111 	.features		= CLOCK_EVT_FEAT_ONESHOT |
112 				  CLOCK_EVT_FEAT_C3STOP,
113 };
114 EXPORT_SYMBOL(decrementer_clockevent);
115 
116 DEFINE_PER_CPU(u64, decrementers_next_tb);
117 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
118 
119 #define XSEC_PER_SEC (1024*1024)
120 
121 #ifdef CONFIG_PPC64
122 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
123 #else
124 /* compute ((xsec << 12) * max) >> 32 */
125 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
126 #endif
127 
128 unsigned long tb_ticks_per_jiffy;
129 unsigned long tb_ticks_per_usec = 100; /* sane default */
130 EXPORT_SYMBOL(tb_ticks_per_usec);
131 unsigned long tb_ticks_per_sec;
132 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
133 
134 DEFINE_SPINLOCK(rtc_lock);
135 EXPORT_SYMBOL_GPL(rtc_lock);
136 
137 static u64 tb_to_ns_scale __read_mostly;
138 static unsigned tb_to_ns_shift __read_mostly;
139 static u64 boot_tb __read_mostly;
140 
141 extern struct timezone sys_tz;
142 static long timezone_offset;
143 
144 unsigned long ppc_proc_freq;
145 EXPORT_SYMBOL_GPL(ppc_proc_freq);
146 unsigned long ppc_tb_freq;
147 EXPORT_SYMBOL_GPL(ppc_tb_freq);
148 
149 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
150 /*
151  * Factors for converting from cputime_t (timebase ticks) to
152  * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
153  * These are all stored as 0.64 fixed-point binary fractions.
154  */
155 u64 __cputime_jiffies_factor;
156 EXPORT_SYMBOL(__cputime_jiffies_factor);
157 u64 __cputime_usec_factor;
158 EXPORT_SYMBOL(__cputime_usec_factor);
159 u64 __cputime_sec_factor;
160 EXPORT_SYMBOL(__cputime_sec_factor);
161 u64 __cputime_clockt_factor;
162 EXPORT_SYMBOL(__cputime_clockt_factor);
163 DEFINE_PER_CPU(unsigned long, cputime_last_delta);
164 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
165 
166 cputime_t cputime_one_jiffy;
167 
168 void (*dtl_consumer)(struct dtl_entry *, u64);
169 
calc_cputime_factors(void)170 static void calc_cputime_factors(void)
171 {
172 	struct div_result res;
173 
174 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
175 	__cputime_jiffies_factor = res.result_low;
176 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
177 	__cputime_usec_factor = res.result_low;
178 	div128_by_32(1, 0, tb_ticks_per_sec, &res);
179 	__cputime_sec_factor = res.result_low;
180 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
181 	__cputime_clockt_factor = res.result_low;
182 }
183 
184 /*
185  * Read the SPURR on systems that have it, otherwise the PURR,
186  * or if that doesn't exist return the timebase value passed in.
187  */
read_spurr(u64 tb)188 static u64 read_spurr(u64 tb)
189 {
190 	if (cpu_has_feature(CPU_FTR_SPURR))
191 		return mfspr(SPRN_SPURR);
192 	if (cpu_has_feature(CPU_FTR_PURR))
193 		return mfspr(SPRN_PURR);
194 	return tb;
195 }
196 
197 #ifdef CONFIG_PPC_SPLPAR
198 
199 /*
200  * Scan the dispatch trace log and count up the stolen time.
201  * Should be called with interrupts disabled.
202  */
scan_dispatch_log(u64 stop_tb)203 static u64 scan_dispatch_log(u64 stop_tb)
204 {
205 	u64 i = local_paca->dtl_ridx;
206 	struct dtl_entry *dtl = local_paca->dtl_curr;
207 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
208 	struct lppaca *vpa = local_paca->lppaca_ptr;
209 	u64 tb_delta;
210 	u64 stolen = 0;
211 	u64 dtb;
212 
213 	if (!dtl)
214 		return 0;
215 
216 	if (i == be64_to_cpu(vpa->dtl_idx))
217 		return 0;
218 	while (i < be64_to_cpu(vpa->dtl_idx)) {
219 		dtb = be64_to_cpu(dtl->timebase);
220 		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
221 			be32_to_cpu(dtl->ready_to_enqueue_time);
222 		barrier();
223 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
224 			/* buffer has overflowed */
225 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
226 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
227 			continue;
228 		}
229 		if (dtb > stop_tb)
230 			break;
231 		if (dtl_consumer)
232 			dtl_consumer(dtl, i);
233 		stolen += tb_delta;
234 		++i;
235 		++dtl;
236 		if (dtl == dtl_end)
237 			dtl = local_paca->dispatch_log;
238 	}
239 	local_paca->dtl_ridx = i;
240 	local_paca->dtl_curr = dtl;
241 	return stolen;
242 }
243 
244 /*
245  * Accumulate stolen time by scanning the dispatch trace log.
246  * Called on entry from user mode.
247  */
accumulate_stolen_time(void)248 void notrace accumulate_stolen_time(void)
249 {
250 	u64 sst, ust;
251 
252 	u8 save_soft_enabled = local_paca->soft_enabled;
253 
254 	/* We are called early in the exception entry, before
255 	 * soft/hard_enabled are sync'ed to the expected state
256 	 * for the exception. We are hard disabled but the PACA
257 	 * needs to reflect that so various debug stuff doesn't
258 	 * complain
259 	 */
260 	local_paca->soft_enabled = 0;
261 
262 	sst = scan_dispatch_log(local_paca->starttime_user);
263 	ust = scan_dispatch_log(local_paca->starttime);
264 	local_paca->system_time -= sst;
265 	local_paca->user_time -= ust;
266 	local_paca->stolen_time += ust + sst;
267 
268 	local_paca->soft_enabled = save_soft_enabled;
269 }
270 
calculate_stolen_time(u64 stop_tb)271 static inline u64 calculate_stolen_time(u64 stop_tb)
272 {
273 	u64 stolen = 0;
274 
275 	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
276 		stolen = scan_dispatch_log(stop_tb);
277 		get_paca()->system_time -= stolen;
278 	}
279 
280 	stolen += get_paca()->stolen_time;
281 	get_paca()->stolen_time = 0;
282 	return stolen;
283 }
284 
285 #else /* CONFIG_PPC_SPLPAR */
calculate_stolen_time(u64 stop_tb)286 static inline u64 calculate_stolen_time(u64 stop_tb)
287 {
288 	return 0;
289 }
290 
291 #endif /* CONFIG_PPC_SPLPAR */
292 
293 /*
294  * Account time for a transition between system, hard irq
295  * or soft irq state.
296  */
vtime_delta(struct task_struct * tsk,u64 * sys_scaled,u64 * stolen)297 static u64 vtime_delta(struct task_struct *tsk,
298 			u64 *sys_scaled, u64 *stolen)
299 {
300 	u64 now, nowscaled, deltascaled;
301 	u64 udelta, delta, user_scaled;
302 
303 	WARN_ON_ONCE(!irqs_disabled());
304 
305 	now = mftb();
306 	nowscaled = read_spurr(now);
307 	get_paca()->system_time += now - get_paca()->starttime;
308 	get_paca()->starttime = now;
309 	deltascaled = nowscaled - get_paca()->startspurr;
310 	get_paca()->startspurr = nowscaled;
311 
312 	*stolen = calculate_stolen_time(now);
313 
314 	delta = get_paca()->system_time;
315 	get_paca()->system_time = 0;
316 	udelta = get_paca()->user_time - get_paca()->utime_sspurr;
317 	get_paca()->utime_sspurr = get_paca()->user_time;
318 
319 	/*
320 	 * Because we don't read the SPURR on every kernel entry/exit,
321 	 * deltascaled includes both user and system SPURR ticks.
322 	 * Apportion these ticks to system SPURR ticks and user
323 	 * SPURR ticks in the same ratio as the system time (delta)
324 	 * and user time (udelta) values obtained from the timebase
325 	 * over the same interval.  The system ticks get accounted here;
326 	 * the user ticks get saved up in paca->user_time_scaled to be
327 	 * used by account_process_tick.
328 	 */
329 	*sys_scaled = delta;
330 	user_scaled = udelta;
331 	if (deltascaled != delta + udelta) {
332 		if (udelta) {
333 			*sys_scaled = deltascaled * delta / (delta + udelta);
334 			user_scaled = deltascaled - *sys_scaled;
335 		} else {
336 			*sys_scaled = deltascaled;
337 		}
338 	}
339 	get_paca()->user_time_scaled += user_scaled;
340 
341 	return delta;
342 }
343 
vtime_account_system(struct task_struct * tsk)344 void vtime_account_system(struct task_struct *tsk)
345 {
346 	u64 delta, sys_scaled, stolen;
347 
348 	delta = vtime_delta(tsk, &sys_scaled, &stolen);
349 	account_system_time(tsk, 0, delta, sys_scaled);
350 	if (stolen)
351 		account_steal_time(stolen);
352 }
353 EXPORT_SYMBOL_GPL(vtime_account_system);
354 
vtime_account_idle(struct task_struct * tsk)355 void vtime_account_idle(struct task_struct *tsk)
356 {
357 	u64 delta, sys_scaled, stolen;
358 
359 	delta = vtime_delta(tsk, &sys_scaled, &stolen);
360 	account_idle_time(delta + stolen);
361 }
362 
363 /*
364  * Transfer the user time accumulated in the paca
365  * by the exception entry and exit code to the generic
366  * process user time records.
367  * Must be called with interrupts disabled.
368  * Assumes that vtime_account_system/idle() has been called
369  * recently (i.e. since the last entry from usermode) so that
370  * get_paca()->user_time_scaled is up to date.
371  */
vtime_account_user(struct task_struct * tsk)372 void vtime_account_user(struct task_struct *tsk)
373 {
374 	cputime_t utime, utimescaled;
375 
376 	utime = get_paca()->user_time;
377 	utimescaled = get_paca()->user_time_scaled;
378 	get_paca()->user_time = 0;
379 	get_paca()->user_time_scaled = 0;
380 	get_paca()->utime_sspurr = 0;
381 	account_user_time(tsk, utime, utimescaled);
382 }
383 
384 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
385 #define calc_cputime_factors()
386 #endif
387 
__delay(unsigned long loops)388 void __delay(unsigned long loops)
389 {
390 	unsigned long start;
391 	int diff;
392 
393 	if (__USE_RTC()) {
394 		start = get_rtcl();
395 		do {
396 			/* the RTCL register wraps at 1000000000 */
397 			diff = get_rtcl() - start;
398 			if (diff < 0)
399 				diff += 1000000000;
400 		} while (diff < loops);
401 	} else {
402 		start = get_tbl();
403 		while (get_tbl() - start < loops)
404 			HMT_low();
405 		HMT_medium();
406 	}
407 }
408 EXPORT_SYMBOL(__delay);
409 
udelay(unsigned long usecs)410 void udelay(unsigned long usecs)
411 {
412 	__delay(tb_ticks_per_usec * usecs);
413 }
414 EXPORT_SYMBOL(udelay);
415 
416 #ifdef CONFIG_SMP
profile_pc(struct pt_regs * regs)417 unsigned long profile_pc(struct pt_regs *regs)
418 {
419 	unsigned long pc = instruction_pointer(regs);
420 
421 	if (in_lock_functions(pc))
422 		return regs->link;
423 
424 	return pc;
425 }
426 EXPORT_SYMBOL(profile_pc);
427 #endif
428 
429 #ifdef CONFIG_IRQ_WORK
430 
431 /*
432  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
433  */
434 #ifdef CONFIG_PPC64
test_irq_work_pending(void)435 static inline unsigned long test_irq_work_pending(void)
436 {
437 	unsigned long x;
438 
439 	asm volatile("lbz %0,%1(13)"
440 		: "=r" (x)
441 		: "i" (offsetof(struct paca_struct, irq_work_pending)));
442 	return x;
443 }
444 
set_irq_work_pending_flag(void)445 static inline void set_irq_work_pending_flag(void)
446 {
447 	asm volatile("stb %0,%1(13)" : :
448 		"r" (1),
449 		"i" (offsetof(struct paca_struct, irq_work_pending)));
450 }
451 
clear_irq_work_pending(void)452 static inline void clear_irq_work_pending(void)
453 {
454 	asm volatile("stb %0,%1(13)" : :
455 		"r" (0),
456 		"i" (offsetof(struct paca_struct, irq_work_pending)));
457 }
458 
459 #else /* 32-bit */
460 
461 DEFINE_PER_CPU(u8, irq_work_pending);
462 
463 #define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
464 #define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
465 #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
466 
467 #endif /* 32 vs 64 bit */
468 
arch_irq_work_raise(void)469 void arch_irq_work_raise(void)
470 {
471 	preempt_disable();
472 	set_irq_work_pending_flag();
473 	set_dec(1);
474 	preempt_enable();
475 }
476 
477 #else  /* CONFIG_IRQ_WORK */
478 
479 #define test_irq_work_pending()	0
480 #define clear_irq_work_pending()
481 
482 #endif /* CONFIG_IRQ_WORK */
483 
__timer_interrupt(void)484 static void __timer_interrupt(void)
485 {
486 	struct pt_regs *regs = get_irq_regs();
487 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
488 	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
489 	u64 now;
490 
491 	trace_timer_interrupt_entry(regs);
492 
493 	if (test_irq_work_pending()) {
494 		clear_irq_work_pending();
495 		irq_work_run();
496 	}
497 
498 	now = get_tb_or_rtc();
499 	if (now >= *next_tb) {
500 		*next_tb = ~(u64)0;
501 		if (evt->event_handler)
502 			evt->event_handler(evt);
503 		__this_cpu_inc(irq_stat.timer_irqs_event);
504 	} else {
505 		now = *next_tb - now;
506 		if (now <= DECREMENTER_MAX)
507 			set_dec((int)now);
508 		/* We may have raced with new irq work */
509 		if (test_irq_work_pending())
510 			set_dec(1);
511 		__this_cpu_inc(irq_stat.timer_irqs_others);
512 	}
513 
514 #ifdef CONFIG_PPC64
515 	/* collect purr register values often, for accurate calculations */
516 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
517 		struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
518 		cu->current_tb = mfspr(SPRN_PURR);
519 	}
520 #endif
521 
522 	trace_timer_interrupt_exit(regs);
523 }
524 
525 /*
526  * timer_interrupt - gets called when the decrementer overflows,
527  * with interrupts disabled.
528  */
timer_interrupt(struct pt_regs * regs)529 void timer_interrupt(struct pt_regs * regs)
530 {
531 	struct pt_regs *old_regs;
532 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
533 
534 	/* Ensure a positive value is written to the decrementer, or else
535 	 * some CPUs will continue to take decrementer exceptions.
536 	 */
537 	set_dec(DECREMENTER_MAX);
538 
539 	/* Some implementations of hotplug will get timer interrupts while
540 	 * offline, just ignore these and we also need to set
541 	 * decrementers_next_tb as MAX to make sure __check_irq_replay
542 	 * don't replay timer interrupt when return, otherwise we'll trap
543 	 * here infinitely :(
544 	 */
545 	if (!cpu_online(smp_processor_id())) {
546 		*next_tb = ~(u64)0;
547 		return;
548 	}
549 
550 	/* Conditionally hard-enable interrupts now that the DEC has been
551 	 * bumped to its maximum value
552 	 */
553 	may_hard_irq_enable();
554 
555 
556 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
557 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
558 		do_IRQ(regs);
559 #endif
560 
561 	old_regs = set_irq_regs(regs);
562 	irq_enter();
563 
564 	__timer_interrupt();
565 	irq_exit();
566 	set_irq_regs(old_regs);
567 }
568 
569 /*
570  * Hypervisor decrementer interrupts shouldn't occur but are sometimes
571  * left pending on exit from a KVM guest.  We don't need to do anything
572  * to clear them, as they are edge-triggered.
573  */
hdec_interrupt(struct pt_regs * regs)574 void hdec_interrupt(struct pt_regs *regs)
575 {
576 }
577 
578 #ifdef CONFIG_SUSPEND
generic_suspend_disable_irqs(void)579 static void generic_suspend_disable_irqs(void)
580 {
581 	/* Disable the decrementer, so that it doesn't interfere
582 	 * with suspending.
583 	 */
584 
585 	set_dec(DECREMENTER_MAX);
586 	local_irq_disable();
587 	set_dec(DECREMENTER_MAX);
588 }
589 
generic_suspend_enable_irqs(void)590 static void generic_suspend_enable_irqs(void)
591 {
592 	local_irq_enable();
593 }
594 
595 /* Overrides the weak version in kernel/power/main.c */
arch_suspend_disable_irqs(void)596 void arch_suspend_disable_irqs(void)
597 {
598 	if (ppc_md.suspend_disable_irqs)
599 		ppc_md.suspend_disable_irqs();
600 	generic_suspend_disable_irqs();
601 }
602 
603 /* Overrides the weak version in kernel/power/main.c */
arch_suspend_enable_irqs(void)604 void arch_suspend_enable_irqs(void)
605 {
606 	generic_suspend_enable_irqs();
607 	if (ppc_md.suspend_enable_irqs)
608 		ppc_md.suspend_enable_irqs();
609 }
610 #endif
611 
tb_to_ns(unsigned long long ticks)612 unsigned long long tb_to_ns(unsigned long long ticks)
613 {
614 	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
615 }
616 EXPORT_SYMBOL_GPL(tb_to_ns);
617 
618 /*
619  * Scheduler clock - returns current time in nanosec units.
620  *
621  * Note: mulhdu(a, b) (multiply high double unsigned) returns
622  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
623  * are 64-bit unsigned numbers.
624  */
sched_clock(void)625 unsigned long long sched_clock(void)
626 {
627 	if (__USE_RTC())
628 		return get_rtc();
629 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
630 }
631 
632 
633 #ifdef CONFIG_PPC_PSERIES
634 
635 /*
636  * Running clock - attempts to give a view of time passing for a virtualised
637  * kernels.
638  * Uses the VTB register if available otherwise a next best guess.
639  */
running_clock(void)640 unsigned long long running_clock(void)
641 {
642 	/*
643 	 * Don't read the VTB as a host since KVM does not switch in host
644 	 * timebase into the VTB when it takes a guest off the CPU, reading the
645 	 * VTB would result in reading 'last switched out' guest VTB.
646 	 *
647 	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
648 	 * would be unsafe to rely only on the #ifdef above.
649 	 */
650 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
651 	    cpu_has_feature(CPU_FTR_ARCH_207S))
652 		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
653 
654 	/*
655 	 * This is a next best approximation without a VTB.
656 	 * On a host which is running bare metal there should never be any stolen
657 	 * time and on a host which doesn't do any virtualisation TB *should* equal
658 	 * VTB so it makes no difference anyway.
659 	 */
660 	return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]);
661 }
662 #endif
663 
get_freq(char * name,int cells,unsigned long * val)664 static int __init get_freq(char *name, int cells, unsigned long *val)
665 {
666 	struct device_node *cpu;
667 	const __be32 *fp;
668 	int found = 0;
669 
670 	/* The cpu node should have timebase and clock frequency properties */
671 	cpu = of_find_node_by_type(NULL, "cpu");
672 
673 	if (cpu) {
674 		fp = of_get_property(cpu, name, NULL);
675 		if (fp) {
676 			found = 1;
677 			*val = of_read_ulong(fp, cells);
678 		}
679 
680 		of_node_put(cpu);
681 	}
682 
683 	return found;
684 }
685 
start_cpu_decrementer(void)686 static void start_cpu_decrementer(void)
687 {
688 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
689 	unsigned int tcr;
690 
691 	/* Clear any pending timer interrupts */
692 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
693 
694 	tcr = mfspr(SPRN_TCR);
695 	/*
696 	 * The watchdog may have already been enabled by u-boot. So leave
697 	 * TRC[WP] (Watchdog Period) alone.
698 	 */
699 	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
700 	tcr |= TCR_DIE;		/* Enable decrementer */
701 	mtspr(SPRN_TCR, tcr);
702 #endif
703 }
704 
generic_calibrate_decr(void)705 void __init generic_calibrate_decr(void)
706 {
707 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
708 
709 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
710 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
711 
712 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
713 				"(not found)\n");
714 	}
715 
716 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
717 
718 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
719 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
720 
721 		printk(KERN_ERR "WARNING: Estimating processor frequency "
722 				"(not found)\n");
723 	}
724 }
725 
update_persistent_clock(struct timespec now)726 int update_persistent_clock(struct timespec now)
727 {
728 	struct rtc_time tm;
729 
730 	if (!ppc_md.set_rtc_time)
731 		return -ENODEV;
732 
733 	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
734 	tm.tm_year -= 1900;
735 	tm.tm_mon -= 1;
736 
737 	return ppc_md.set_rtc_time(&tm);
738 }
739 
__read_persistent_clock(struct timespec * ts)740 static void __read_persistent_clock(struct timespec *ts)
741 {
742 	struct rtc_time tm;
743 	static int first = 1;
744 
745 	ts->tv_nsec = 0;
746 	/* XXX this is a litle fragile but will work okay in the short term */
747 	if (first) {
748 		first = 0;
749 		if (ppc_md.time_init)
750 			timezone_offset = ppc_md.time_init();
751 
752 		/* get_boot_time() isn't guaranteed to be safe to call late */
753 		if (ppc_md.get_boot_time) {
754 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
755 			return;
756 		}
757 	}
758 	if (!ppc_md.get_rtc_time) {
759 		ts->tv_sec = 0;
760 		return;
761 	}
762 	ppc_md.get_rtc_time(&tm);
763 
764 	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
765 			    tm.tm_hour, tm.tm_min, tm.tm_sec);
766 }
767 
read_persistent_clock(struct timespec * ts)768 void read_persistent_clock(struct timespec *ts)
769 {
770 	__read_persistent_clock(ts);
771 
772 	/* Sanitize it in case real time clock is set below EPOCH */
773 	if (ts->tv_sec < 0) {
774 		ts->tv_sec = 0;
775 		ts->tv_nsec = 0;
776 	}
777 
778 }
779 
780 /* clocksource code */
rtc_read(struct clocksource * cs)781 static cycle_t rtc_read(struct clocksource *cs)
782 {
783 	return (cycle_t)get_rtc();
784 }
785 
timebase_read(struct clocksource * cs)786 static cycle_t timebase_read(struct clocksource *cs)
787 {
788 	return (cycle_t)get_tb();
789 }
790 
update_vsyscall_old(struct timespec * wall_time,struct timespec * wtm,struct clocksource * clock,u32 mult,cycle_t cycle_last)791 void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
792 			 struct clocksource *clock, u32 mult, cycle_t cycle_last)
793 {
794 	u64 new_tb_to_xs, new_stamp_xsec;
795 	u32 frac_sec;
796 
797 	if (clock != &clocksource_timebase)
798 		return;
799 
800 	/* Make userspace gettimeofday spin until we're done. */
801 	++vdso_data->tb_update_count;
802 	smp_mb();
803 
804 	/* 19342813113834067 ~= 2^(20+64) / 1e9 */
805 	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
806 	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
807 	do_div(new_stamp_xsec, 1000000000);
808 	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
809 
810 	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
811 	/* this is tv_nsec / 1e9 as a 0.32 fraction */
812 	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
813 
814 	/*
815 	 * tb_update_count is used to allow the userspace gettimeofday code
816 	 * to assure itself that it sees a consistent view of the tb_to_xs and
817 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
818 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
819 	 * the two values of tb_update_count match and are even then the
820 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
821 	 * loops back and reads them again until this criteria is met.
822 	 * We expect the caller to have done the first increment of
823 	 * vdso_data->tb_update_count already.
824 	 */
825 	vdso_data->tb_orig_stamp = cycle_last;
826 	vdso_data->stamp_xsec = new_stamp_xsec;
827 	vdso_data->tb_to_xs = new_tb_to_xs;
828 	vdso_data->wtom_clock_sec = wtm->tv_sec;
829 	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
830 	vdso_data->stamp_xtime = *wall_time;
831 	vdso_data->stamp_sec_fraction = frac_sec;
832 	vdso_data->hrtimer_res = hrtimer_resolution;
833 	smp_wmb();
834 	++(vdso_data->tb_update_count);
835 }
836 
update_vsyscall_tz(void)837 void update_vsyscall_tz(void)
838 {
839 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
840 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
841 }
842 
clocksource_init(void)843 static void __init clocksource_init(void)
844 {
845 	struct clocksource *clock;
846 
847 	if (__USE_RTC())
848 		clock = &clocksource_rtc;
849 	else
850 		clock = &clocksource_timebase;
851 
852 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
853 		printk(KERN_ERR "clocksource: %s is already registered\n",
854 		       clock->name);
855 		return;
856 	}
857 
858 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
859 	       clock->name, clock->mult, clock->shift);
860 }
861 
decrementer_set_next_event(unsigned long evt,struct clock_event_device * dev)862 static int decrementer_set_next_event(unsigned long evt,
863 				      struct clock_event_device *dev)
864 {
865 	__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
866 	set_dec(evt);
867 
868 	/* We may have raced with new irq work */
869 	if (test_irq_work_pending())
870 		set_dec(1);
871 
872 	return 0;
873 }
874 
decrementer_shutdown(struct clock_event_device * dev)875 static int decrementer_shutdown(struct clock_event_device *dev)
876 {
877 	decrementer_set_next_event(DECREMENTER_MAX, dev);
878 	return 0;
879 }
880 
881 /* Interrupt handler for the timer broadcast IPI */
tick_broadcast_ipi_handler(void)882 void tick_broadcast_ipi_handler(void)
883 {
884 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
885 
886 	*next_tb = get_tb_or_rtc();
887 	__timer_interrupt();
888 }
889 
register_decrementer_clockevent(int cpu)890 static void register_decrementer_clockevent(int cpu)
891 {
892 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
893 
894 	*dec = decrementer_clockevent;
895 	dec->cpumask = cpumask_of(cpu);
896 
897 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
898 		    dec->name, dec->mult, dec->shift, cpu);
899 
900 	clockevents_register_device(dec);
901 }
902 
init_decrementer_clockevent(void)903 static void __init init_decrementer_clockevent(void)
904 {
905 	int cpu = smp_processor_id();
906 
907 	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
908 
909 	decrementer_clockevent.max_delta_ns =
910 		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
911 	decrementer_clockevent.min_delta_ns =
912 		clockevent_delta2ns(2, &decrementer_clockevent);
913 
914 	register_decrementer_clockevent(cpu);
915 }
916 
secondary_cpu_time_init(void)917 void secondary_cpu_time_init(void)
918 {
919 	/* Start the decrementer on CPUs that have manual control
920 	 * such as BookE
921 	 */
922 	start_cpu_decrementer();
923 
924 	/* FIME: Should make unrelatred change to move snapshot_timebase
925 	 * call here ! */
926 	register_decrementer_clockevent(smp_processor_id());
927 }
928 
929 /* This function is only called on the boot processor */
time_init(void)930 void __init time_init(void)
931 {
932 	struct div_result res;
933 	u64 scale;
934 	unsigned shift;
935 
936 	if (__USE_RTC()) {
937 		/* 601 processor: dec counts down by 128 every 128ns */
938 		ppc_tb_freq = 1000000000;
939 	} else {
940 		/* Normal PowerPC with timebase register */
941 		ppc_md.calibrate_decr();
942 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
943 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
944 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
945 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
946 	}
947 
948 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
949 	tb_ticks_per_sec = ppc_tb_freq;
950 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
951 	calc_cputime_factors();
952 	setup_cputime_one_jiffy();
953 
954 	/*
955 	 * Compute scale factor for sched_clock.
956 	 * The calibrate_decr() function has set tb_ticks_per_sec,
957 	 * which is the timebase frequency.
958 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
959 	 * the 128-bit result as a 64.64 fixed-point number.
960 	 * We then shift that number right until it is less than 1.0,
961 	 * giving us the scale factor and shift count to use in
962 	 * sched_clock().
963 	 */
964 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
965 	scale = res.result_low;
966 	for (shift = 0; res.result_high != 0; ++shift) {
967 		scale = (scale >> 1) | (res.result_high << 63);
968 		res.result_high >>= 1;
969 	}
970 	tb_to_ns_scale = scale;
971 	tb_to_ns_shift = shift;
972 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
973 	boot_tb = get_tb_or_rtc();
974 
975 	/* If platform provided a timezone (pmac), we correct the time */
976 	if (timezone_offset) {
977 		sys_tz.tz_minuteswest = -timezone_offset / 60;
978 		sys_tz.tz_dsttime = 0;
979 	}
980 
981 	vdso_data->tb_update_count = 0;
982 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
983 
984 	/* Start the decrementer on CPUs that have manual control
985 	 * such as BookE
986 	 */
987 	start_cpu_decrementer();
988 
989 	/* Register the clocksource */
990 	clocksource_init();
991 
992 	init_decrementer_clockevent();
993 	tick_setup_hrtimer_broadcast();
994 
995 #ifdef CONFIG_COMMON_CLK
996 	of_clk_init(NULL);
997 #endif
998 }
999 
1000 
1001 #define FEBRUARY	2
1002 #define	STARTOFTIME	1970
1003 #define SECDAY		86400L
1004 #define SECYR		(SECDAY * 365)
1005 #define	leapyear(year)		((year) % 4 == 0 && \
1006 				 ((year) % 100 != 0 || (year) % 400 == 0))
1007 #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
1008 #define	days_in_month(a) 	(month_days[(a) - 1])
1009 
1010 static int month_days[12] = {
1011 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1012 };
1013 
1014 /*
1015  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1016  */
GregorianDay(struct rtc_time * tm)1017 void GregorianDay(struct rtc_time * tm)
1018 {
1019 	int leapsToDate;
1020 	int lastYear;
1021 	int day;
1022 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1023 
1024 	lastYear = tm->tm_year - 1;
1025 
1026 	/*
1027 	 * Number of leap corrections to apply up to end of last year
1028 	 */
1029 	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1030 
1031 	/*
1032 	 * This year is a leap year if it is divisible by 4 except when it is
1033 	 * divisible by 100 unless it is divisible by 400
1034 	 *
1035 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1036 	 */
1037 	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1038 
1039 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1040 		   tm->tm_mday;
1041 
1042 	tm->tm_wday = day % 7;
1043 }
1044 EXPORT_SYMBOL_GPL(GregorianDay);
1045 
to_tm(int tim,struct rtc_time * tm)1046 void to_tm(int tim, struct rtc_time * tm)
1047 {
1048 	register int    i;
1049 	register long   hms, day;
1050 
1051 	day = tim / SECDAY;
1052 	hms = tim % SECDAY;
1053 
1054 	/* Hours, minutes, seconds are easy */
1055 	tm->tm_hour = hms / 3600;
1056 	tm->tm_min = (hms % 3600) / 60;
1057 	tm->tm_sec = (hms % 3600) % 60;
1058 
1059 	/* Number of years in days */
1060 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
1061 		day -= days_in_year(i);
1062 	tm->tm_year = i;
1063 
1064 	/* Number of months in days left */
1065 	if (leapyear(tm->tm_year))
1066 		days_in_month(FEBRUARY) = 29;
1067 	for (i = 1; day >= days_in_month(i); i++)
1068 		day -= days_in_month(i);
1069 	days_in_month(FEBRUARY) = 28;
1070 	tm->tm_mon = i;
1071 
1072 	/* Days are what is left over (+1) from all that. */
1073 	tm->tm_mday = day + 1;
1074 
1075 	/*
1076 	 * Determine the day of week
1077 	 */
1078 	GregorianDay(tm);
1079 }
1080 EXPORT_SYMBOL(to_tm);
1081 
1082 /*
1083  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1084  * result.
1085  */
div128_by_32(u64 dividend_high,u64 dividend_low,unsigned divisor,struct div_result * dr)1086 void div128_by_32(u64 dividend_high, u64 dividend_low,
1087 		  unsigned divisor, struct div_result *dr)
1088 {
1089 	unsigned long a, b, c, d;
1090 	unsigned long w, x, y, z;
1091 	u64 ra, rb, rc;
1092 
1093 	a = dividend_high >> 32;
1094 	b = dividend_high & 0xffffffff;
1095 	c = dividend_low >> 32;
1096 	d = dividend_low & 0xffffffff;
1097 
1098 	w = a / divisor;
1099 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1100 
1101 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1102 	x = ra;
1103 
1104 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1105 	y = rb;
1106 
1107 	do_div(rc, divisor);
1108 	z = rc;
1109 
1110 	dr->result_high = ((u64)w << 32) + x;
1111 	dr->result_low  = ((u64)y << 32) + z;
1112 
1113 }
1114 
1115 /* We don't need to calibrate delay, we use the CPU timebase for that */
calibrate_delay(void)1116 void calibrate_delay(void)
1117 {
1118 	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1119 	 * as the number of __delay(1) in a jiffy, so make it so
1120 	 */
1121 	loops_per_jiffy = tb_ticks_per_jiffy;
1122 }
1123 
rtc_init(void)1124 static int __init rtc_init(void)
1125 {
1126 	struct platform_device *pdev;
1127 
1128 	if (!ppc_md.get_rtc_time)
1129 		return -ENODEV;
1130 
1131 	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1132 
1133 	return PTR_ERR_OR_ZERO(pdev);
1134 }
1135 
1136 device_initcall(rtc_init);
1137