• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Common time routines among all ppc machines.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6  * Paul Mackerras' version and mine for PReP and Pmac.
7  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9  *
10  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11  * to make clock more stable (2.4.0-test5). The only thing
12  * that this code assumes is that the timebases have been synchronized
13  * by firmware on SMP and are never stopped (never do sleep
14  * on SMP then, nap and doze are OK).
15  *
16  * Speeded up do_gettimeofday by getting rid of references to
17  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18  *
19  * TODO (not necessarily in this file):
20  * - improve precision and reproducibility of timebase frequency
21  * measurement at boot time.
22  * - for astronomical applications: add a new function to get
23  * non ambiguous timestamps even around leap seconds. This needs
24  * a new timestamp format and a good name.
25  *
26  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
27  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
28  */
29 
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/sched/clock.h>
34 #include <linux/kernel.h>
35 #include <linux/param.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/interrupt.h>
39 #include <linux/timex.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/time.h>
42 #include <linux/init.h>
43 #include <linux/profile.h>
44 #include <linux/cpu.h>
45 #include <linux/security.h>
46 #include <linux/percpu.h>
47 #include <linux/rtc.h>
48 #include <linux/jiffies.h>
49 #include <linux/posix-timers.h>
50 #include <linux/irq.h>
51 #include <linux/delay.h>
52 #include <linux/irq_work.h>
53 #include <linux/clk-provider.h>
54 #include <linux/suspend.h>
55 #include <linux/sched/cputime.h>
56 #include <linux/processor.h>
57 #include <asm/trace.h>
58 
59 #include <asm/io.h>
60 #include <asm/nvram.h>
61 #include <asm/cache.h>
62 #include <asm/machdep.h>
63 #include <linux/uaccess.h>
64 #include <asm/time.h>
65 #include <asm/prom.h>
66 #include <asm/irq.h>
67 #include <asm/div64.h>
68 #include <asm/smp.h>
69 #include <asm/vdso_datapage.h>
70 #include <asm/firmware.h>
71 #include <asm/asm-prototypes.h>
72 
73 /* powerpc clocksource/clockevent code */
74 
75 #include <linux/clockchips.h>
76 #include <linux/timekeeper_internal.h>
77 
78 static u64 rtc_read(struct clocksource *);
79 static struct clocksource clocksource_rtc = {
80 	.name         = "rtc",
81 	.rating       = 400,
82 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
83 	.mask         = CLOCKSOURCE_MASK(64),
84 	.read         = rtc_read,
85 };
86 
87 static u64 timebase_read(struct clocksource *);
88 static struct clocksource clocksource_timebase = {
89 	.name         = "timebase",
90 	.rating       = 400,
91 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
92 	.mask         = CLOCKSOURCE_MASK(64),
93 	.read         = timebase_read,
94 };
95 
96 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
97 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
98 
99 static int decrementer_set_next_event(unsigned long evt,
100 				      struct clock_event_device *dev);
101 static int decrementer_shutdown(struct clock_event_device *evt);
102 
103 struct clock_event_device decrementer_clockevent = {
104 	.name			= "decrementer",
105 	.rating			= 200,
106 	.irq			= 0,
107 	.set_next_event		= decrementer_set_next_event,
108 	.set_state_oneshot_stopped = decrementer_shutdown,
109 	.set_state_shutdown	= decrementer_shutdown,
110 	.tick_resume		= decrementer_shutdown,
111 	.features		= CLOCK_EVT_FEAT_ONESHOT |
112 				  CLOCK_EVT_FEAT_C3STOP,
113 };
114 EXPORT_SYMBOL(decrementer_clockevent);
115 
116 DEFINE_PER_CPU(u64, decrementers_next_tb);
117 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
118 
119 #define XSEC_PER_SEC (1024*1024)
120 
121 #ifdef CONFIG_PPC64
122 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
123 #else
124 /* compute ((xsec << 12) * max) >> 32 */
125 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
126 #endif
127 
128 unsigned long tb_ticks_per_jiffy;
129 unsigned long tb_ticks_per_usec = 100; /* sane default */
130 EXPORT_SYMBOL(tb_ticks_per_usec);
131 unsigned long tb_ticks_per_sec;
132 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
133 
134 DEFINE_SPINLOCK(rtc_lock);
135 EXPORT_SYMBOL_GPL(rtc_lock);
136 
137 static u64 tb_to_ns_scale __read_mostly;
138 static unsigned tb_to_ns_shift __read_mostly;
139 static u64 boot_tb __read_mostly;
140 
141 extern struct timezone sys_tz;
142 static long timezone_offset;
143 
144 unsigned long ppc_proc_freq;
145 EXPORT_SYMBOL_GPL(ppc_proc_freq);
146 unsigned long ppc_tb_freq;
147 EXPORT_SYMBOL_GPL(ppc_tb_freq);
148 
149 bool tb_invalid;
150 
151 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
152 /*
153  * Factor for converting from cputime_t (timebase ticks) to
154  * microseconds. This is stored as 0.64 fixed-point binary fraction.
155  */
156 u64 __cputime_usec_factor;
157 EXPORT_SYMBOL(__cputime_usec_factor);
158 
159 #ifdef CONFIG_PPC_SPLPAR
160 void (*dtl_consumer)(struct dtl_entry *, u64);
161 #endif
162 
calc_cputime_factors(void)163 static void calc_cputime_factors(void)
164 {
165 	struct div_result res;
166 
167 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
168 	__cputime_usec_factor = res.result_low;
169 }
170 
171 /*
172  * Read the SPURR on systems that have it, otherwise the PURR,
173  * or if that doesn't exist return the timebase value passed in.
174  */
read_spurr(unsigned long tb)175 static inline unsigned long read_spurr(unsigned long tb)
176 {
177 	if (cpu_has_feature(CPU_FTR_SPURR))
178 		return mfspr(SPRN_SPURR);
179 	if (cpu_has_feature(CPU_FTR_PURR))
180 		return mfspr(SPRN_PURR);
181 	return tb;
182 }
183 
184 #ifdef CONFIG_PPC_SPLPAR
185 
186 /*
187  * Scan the dispatch trace log and count up the stolen time.
188  * Should be called with interrupts disabled.
189  */
scan_dispatch_log(u64 stop_tb)190 static u64 scan_dispatch_log(u64 stop_tb)
191 {
192 	u64 i = local_paca->dtl_ridx;
193 	struct dtl_entry *dtl = local_paca->dtl_curr;
194 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
195 	struct lppaca *vpa = local_paca->lppaca_ptr;
196 	u64 tb_delta;
197 	u64 stolen = 0;
198 	u64 dtb;
199 
200 	if (!dtl)
201 		return 0;
202 
203 	if (i == be64_to_cpu(vpa->dtl_idx))
204 		return 0;
205 	while (i < be64_to_cpu(vpa->dtl_idx)) {
206 		dtb = be64_to_cpu(dtl->timebase);
207 		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
208 			be32_to_cpu(dtl->ready_to_enqueue_time);
209 		barrier();
210 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
211 			/* buffer has overflowed */
212 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
213 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
214 			continue;
215 		}
216 		if (dtb > stop_tb)
217 			break;
218 		if (dtl_consumer)
219 			dtl_consumer(dtl, i);
220 		stolen += tb_delta;
221 		++i;
222 		++dtl;
223 		if (dtl == dtl_end)
224 			dtl = local_paca->dispatch_log;
225 	}
226 	local_paca->dtl_ridx = i;
227 	local_paca->dtl_curr = dtl;
228 	return stolen;
229 }
230 
231 /*
232  * Accumulate stolen time by scanning the dispatch trace log.
233  * Called on entry from user mode.
234  */
accumulate_stolen_time(void)235 void notrace accumulate_stolen_time(void)
236 {
237 	u64 sst, ust;
238 	unsigned long save_irq_soft_mask = irq_soft_mask_return();
239 	struct cpu_accounting_data *acct = &local_paca->accounting;
240 
241 	/* We are called early in the exception entry, before
242 	 * soft/hard_enabled are sync'ed to the expected state
243 	 * for the exception. We are hard disabled but the PACA
244 	 * needs to reflect that so various debug stuff doesn't
245 	 * complain
246 	 */
247 	irq_soft_mask_set(IRQS_DISABLED);
248 
249 	sst = scan_dispatch_log(acct->starttime_user);
250 	ust = scan_dispatch_log(acct->starttime);
251 	acct->stime -= sst;
252 	acct->utime -= ust;
253 	acct->steal_time += ust + sst;
254 
255 	irq_soft_mask_set(save_irq_soft_mask);
256 }
257 
calculate_stolen_time(u64 stop_tb)258 static inline u64 calculate_stolen_time(u64 stop_tb)
259 {
260 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
261 		return 0;
262 
263 	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
264 		return scan_dispatch_log(stop_tb);
265 
266 	return 0;
267 }
268 
269 #else /* CONFIG_PPC_SPLPAR */
calculate_stolen_time(u64 stop_tb)270 static inline u64 calculate_stolen_time(u64 stop_tb)
271 {
272 	return 0;
273 }
274 
275 #endif /* CONFIG_PPC_SPLPAR */
276 
277 /*
278  * Account time for a transition between system, hard irq
279  * or soft irq state.
280  */
vtime_delta_scaled(struct cpu_accounting_data * acct,unsigned long now,unsigned long stime)281 static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
282 					unsigned long now, unsigned long stime)
283 {
284 	unsigned long stime_scaled = 0;
285 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
286 	unsigned long nowscaled, deltascaled;
287 	unsigned long utime, utime_scaled;
288 
289 	nowscaled = read_spurr(now);
290 	deltascaled = nowscaled - acct->startspurr;
291 	acct->startspurr = nowscaled;
292 	utime = acct->utime - acct->utime_sspurr;
293 	acct->utime_sspurr = acct->utime;
294 
295 	/*
296 	 * Because we don't read the SPURR on every kernel entry/exit,
297 	 * deltascaled includes both user and system SPURR ticks.
298 	 * Apportion these ticks to system SPURR ticks and user
299 	 * SPURR ticks in the same ratio as the system time (delta)
300 	 * and user time (udelta) values obtained from the timebase
301 	 * over the same interval.  The system ticks get accounted here;
302 	 * the user ticks get saved up in paca->user_time_scaled to be
303 	 * used by account_process_tick.
304 	 */
305 	stime_scaled = stime;
306 	utime_scaled = utime;
307 	if (deltascaled != stime + utime) {
308 		if (utime) {
309 			stime_scaled = deltascaled * stime / (stime + utime);
310 			utime_scaled = deltascaled - stime_scaled;
311 		} else {
312 			stime_scaled = deltascaled;
313 		}
314 	}
315 	acct->utime_scaled += utime_scaled;
316 #endif
317 
318 	return stime_scaled;
319 }
320 
vtime_delta(struct task_struct * tsk,unsigned long * stime_scaled,unsigned long * steal_time)321 static unsigned long vtime_delta(struct task_struct *tsk,
322 				 unsigned long *stime_scaled,
323 				 unsigned long *steal_time)
324 {
325 	unsigned long now, stime;
326 	struct cpu_accounting_data *acct = get_accounting(tsk);
327 
328 	WARN_ON_ONCE(!irqs_disabled());
329 
330 	now = mftb();
331 	stime = now - acct->starttime;
332 	acct->starttime = now;
333 
334 	*stime_scaled = vtime_delta_scaled(acct, now, stime);
335 
336 	*steal_time = calculate_stolen_time(now);
337 
338 	return stime;
339 }
340 
vtime_account_system(struct task_struct * tsk)341 void vtime_account_system(struct task_struct *tsk)
342 {
343 	unsigned long stime, stime_scaled, steal_time;
344 	struct cpu_accounting_data *acct = get_accounting(tsk);
345 
346 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
347 
348 	stime -= min(stime, steal_time);
349 	acct->steal_time += steal_time;
350 
351 	if ((tsk->flags & PF_VCPU) && !irq_count()) {
352 		acct->gtime += stime;
353 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
354 		acct->utime_scaled += stime_scaled;
355 #endif
356 	} else {
357 		if (hardirq_count())
358 			acct->hardirq_time += stime;
359 		else if (in_serving_softirq())
360 			acct->softirq_time += stime;
361 		else
362 			acct->stime += stime;
363 
364 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
365 		acct->stime_scaled += stime_scaled;
366 #endif
367 	}
368 }
369 EXPORT_SYMBOL_GPL(vtime_account_system);
370 
vtime_account_idle(struct task_struct * tsk)371 void vtime_account_idle(struct task_struct *tsk)
372 {
373 	unsigned long stime, stime_scaled, steal_time;
374 	struct cpu_accounting_data *acct = get_accounting(tsk);
375 
376 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
377 	acct->idle_time += stime + steal_time;
378 }
379 
vtime_flush_scaled(struct task_struct * tsk,struct cpu_accounting_data * acct)380 static void vtime_flush_scaled(struct task_struct *tsk,
381 			       struct cpu_accounting_data *acct)
382 {
383 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
384 	if (acct->utime_scaled)
385 		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
386 	if (acct->stime_scaled)
387 		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
388 
389 	acct->utime_scaled = 0;
390 	acct->utime_sspurr = 0;
391 	acct->stime_scaled = 0;
392 #endif
393 }
394 
395 /*
396  * Account the whole cputime accumulated in the paca
397  * Must be called with interrupts disabled.
398  * Assumes that vtime_account_system/idle() has been called
399  * recently (i.e. since the last entry from usermode) so that
400  * get_paca()->user_time_scaled is up to date.
401  */
vtime_flush(struct task_struct * tsk)402 void vtime_flush(struct task_struct *tsk)
403 {
404 	struct cpu_accounting_data *acct = get_accounting(tsk);
405 
406 	if (acct->utime)
407 		account_user_time(tsk, cputime_to_nsecs(acct->utime));
408 
409 	if (acct->gtime)
410 		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
411 
412 	if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
413 		account_steal_time(cputime_to_nsecs(acct->steal_time));
414 		acct->steal_time = 0;
415 	}
416 
417 	if (acct->idle_time)
418 		account_idle_time(cputime_to_nsecs(acct->idle_time));
419 
420 	if (acct->stime)
421 		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
422 					  CPUTIME_SYSTEM);
423 
424 	if (acct->hardirq_time)
425 		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
426 					  CPUTIME_IRQ);
427 	if (acct->softirq_time)
428 		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
429 					  CPUTIME_SOFTIRQ);
430 
431 	vtime_flush_scaled(tsk, acct);
432 
433 	acct->utime = 0;
434 	acct->gtime = 0;
435 	acct->idle_time = 0;
436 	acct->stime = 0;
437 	acct->hardirq_time = 0;
438 	acct->softirq_time = 0;
439 }
440 
441 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
442 #define calc_cputime_factors()
443 #endif
444 
__delay(unsigned long loops)445 void __delay(unsigned long loops)
446 {
447 	unsigned long start;
448 	int diff;
449 
450 	spin_begin();
451 	if (__USE_RTC()) {
452 		start = get_rtcl();
453 		do {
454 			/* the RTCL register wraps at 1000000000 */
455 			diff = get_rtcl() - start;
456 			if (diff < 0)
457 				diff += 1000000000;
458 			spin_cpu_relax();
459 		} while (diff < loops);
460 	} else if (tb_invalid) {
461 		/*
462 		 * TB is in error state and isn't ticking anymore.
463 		 * HMI handler was unable to recover from TB error.
464 		 * Return immediately, so that kernel won't get stuck here.
465 		 */
466 		spin_cpu_relax();
467 	} else {
468 		start = get_tbl();
469 		while (get_tbl() - start < loops)
470 			spin_cpu_relax();
471 	}
472 	spin_end();
473 }
474 EXPORT_SYMBOL(__delay);
475 
udelay(unsigned long usecs)476 void udelay(unsigned long usecs)
477 {
478 	__delay(tb_ticks_per_usec * usecs);
479 }
480 EXPORT_SYMBOL(udelay);
481 
482 #ifdef CONFIG_SMP
profile_pc(struct pt_regs * regs)483 unsigned long profile_pc(struct pt_regs *regs)
484 {
485 	unsigned long pc = instruction_pointer(regs);
486 
487 	if (in_lock_functions(pc))
488 		return regs->link;
489 
490 	return pc;
491 }
492 EXPORT_SYMBOL(profile_pc);
493 #endif
494 
495 #ifdef CONFIG_IRQ_WORK
496 
497 /*
498  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
499  */
500 #ifdef CONFIG_PPC64
test_irq_work_pending(void)501 static inline unsigned long test_irq_work_pending(void)
502 {
503 	unsigned long x;
504 
505 	asm volatile("lbz %0,%1(13)"
506 		: "=r" (x)
507 		: "i" (offsetof(struct paca_struct, irq_work_pending)));
508 	return x;
509 }
510 
set_irq_work_pending_flag(void)511 static inline void set_irq_work_pending_flag(void)
512 {
513 	asm volatile("stb %0,%1(13)" : :
514 		"r" (1),
515 		"i" (offsetof(struct paca_struct, irq_work_pending)));
516 }
517 
clear_irq_work_pending(void)518 static inline void clear_irq_work_pending(void)
519 {
520 	asm volatile("stb %0,%1(13)" : :
521 		"r" (0),
522 		"i" (offsetof(struct paca_struct, irq_work_pending)));
523 }
524 
525 #else /* 32-bit */
526 
527 DEFINE_PER_CPU(u8, irq_work_pending);
528 
529 #define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
530 #define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
531 #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
532 
533 #endif /* 32 vs 64 bit */
534 
arch_irq_work_raise(void)535 void arch_irq_work_raise(void)
536 {
537 	/*
538 	 * 64-bit code that uses irq soft-mask can just cause an immediate
539 	 * interrupt here that gets soft masked, if this is called under
540 	 * local_irq_disable(). It might be possible to prevent that happening
541 	 * by noticing interrupts are disabled and setting decrementer pending
542 	 * to be replayed when irqs are enabled. The problem there is that
543 	 * tracing can call irq_work_raise, including in code that does low
544 	 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
545 	 * which could get tangled up if we're messing with the same state
546 	 * here.
547 	 */
548 	preempt_disable();
549 	set_irq_work_pending_flag();
550 	set_dec(1);
551 	preempt_enable();
552 }
553 
554 #else  /* CONFIG_IRQ_WORK */
555 
556 #define test_irq_work_pending()	0
557 #define clear_irq_work_pending()
558 
559 #endif /* CONFIG_IRQ_WORK */
560 
561 /*
562  * timer_interrupt - gets called when the decrementer overflows,
563  * with interrupts disabled.
564  */
timer_interrupt(struct pt_regs * regs)565 void timer_interrupt(struct pt_regs *regs)
566 {
567 	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
568 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
569 	struct pt_regs *old_regs;
570 	u64 now;
571 
572 	/* Some implementations of hotplug will get timer interrupts while
573 	 * offline, just ignore these and we also need to set
574 	 * decrementers_next_tb as MAX to make sure __check_irq_replay
575 	 * don't replay timer interrupt when return, otherwise we'll trap
576 	 * here infinitely :(
577 	 */
578 	if (unlikely(!cpu_online(smp_processor_id()))) {
579 		*next_tb = ~(u64)0;
580 		set_dec(decrementer_max);
581 		return;
582 	}
583 
584 	/* Ensure a positive value is written to the decrementer, or else
585 	 * some CPUs will continue to take decrementer exceptions. When the
586 	 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
587 	 * 31 bits, which is about 4 seconds on most systems, which gives
588 	 * the watchdog a chance of catching timer interrupt hard lockups.
589 	 */
590 	if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
591 		set_dec(0x7fffffff);
592 	else
593 		set_dec(decrementer_max);
594 
595 	/* Conditionally hard-enable interrupts now that the DEC has been
596 	 * bumped to its maximum value
597 	 */
598 	may_hard_irq_enable();
599 
600 
601 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
602 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
603 		do_IRQ(regs);
604 #endif
605 
606 	old_regs = set_irq_regs(regs);
607 	irq_enter();
608 	trace_timer_interrupt_entry(regs);
609 
610 	if (test_irq_work_pending()) {
611 		clear_irq_work_pending();
612 		irq_work_run();
613 	}
614 
615 	now = get_tb_or_rtc();
616 	if (now >= *next_tb) {
617 		*next_tb = ~(u64)0;
618 		if (evt->event_handler)
619 			evt->event_handler(evt);
620 		__this_cpu_inc(irq_stat.timer_irqs_event);
621 	} else {
622 		now = *next_tb - now;
623 		if (now <= decrementer_max)
624 			set_dec(now);
625 		/* We may have raced with new irq work */
626 		if (test_irq_work_pending())
627 			set_dec(1);
628 		__this_cpu_inc(irq_stat.timer_irqs_others);
629 	}
630 
631 	trace_timer_interrupt_exit(regs);
632 	irq_exit();
633 	set_irq_regs(old_regs);
634 }
635 EXPORT_SYMBOL(timer_interrupt);
636 
637 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
timer_broadcast_interrupt(void)638 void timer_broadcast_interrupt(void)
639 {
640 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
641 
642 	*next_tb = ~(u64)0;
643 	tick_receive_broadcast();
644 	__this_cpu_inc(irq_stat.broadcast_irqs_event);
645 }
646 #endif
647 
648 /*
649  * Hypervisor decrementer interrupts shouldn't occur but are sometimes
650  * left pending on exit from a KVM guest.  We don't need to do anything
651  * to clear them, as they are edge-triggered.
652  */
hdec_interrupt(struct pt_regs * regs)653 void hdec_interrupt(struct pt_regs *regs)
654 {
655 }
656 
657 #ifdef CONFIG_SUSPEND
generic_suspend_disable_irqs(void)658 static void generic_suspend_disable_irqs(void)
659 {
660 	/* Disable the decrementer, so that it doesn't interfere
661 	 * with suspending.
662 	 */
663 
664 	set_dec(decrementer_max);
665 	local_irq_disable();
666 	set_dec(decrementer_max);
667 }
668 
generic_suspend_enable_irqs(void)669 static void generic_suspend_enable_irqs(void)
670 {
671 	local_irq_enable();
672 }
673 
674 /* Overrides the weak version in kernel/power/main.c */
arch_suspend_disable_irqs(void)675 void arch_suspend_disable_irqs(void)
676 {
677 	if (ppc_md.suspend_disable_irqs)
678 		ppc_md.suspend_disable_irqs();
679 	generic_suspend_disable_irqs();
680 }
681 
682 /* Overrides the weak version in kernel/power/main.c */
arch_suspend_enable_irqs(void)683 void arch_suspend_enable_irqs(void)
684 {
685 	generic_suspend_enable_irqs();
686 	if (ppc_md.suspend_enable_irqs)
687 		ppc_md.suspend_enable_irqs();
688 }
689 #endif
690 
tb_to_ns(unsigned long long ticks)691 unsigned long long tb_to_ns(unsigned long long ticks)
692 {
693 	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
694 }
695 EXPORT_SYMBOL_GPL(tb_to_ns);
696 
697 /*
698  * Scheduler clock - returns current time in nanosec units.
699  *
700  * Note: mulhdu(a, b) (multiply high double unsigned) returns
701  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
702  * are 64-bit unsigned numbers.
703  */
sched_clock(void)704 notrace unsigned long long sched_clock(void)
705 {
706 	if (__USE_RTC())
707 		return get_rtc();
708 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
709 }
710 
711 
712 #ifdef CONFIG_PPC_PSERIES
713 
714 /*
715  * Running clock - attempts to give a view of time passing for a virtualised
716  * kernels.
717  * Uses the VTB register if available otherwise a next best guess.
718  */
running_clock(void)719 unsigned long long running_clock(void)
720 {
721 	/*
722 	 * Don't read the VTB as a host since KVM does not switch in host
723 	 * timebase into the VTB when it takes a guest off the CPU, reading the
724 	 * VTB would result in reading 'last switched out' guest VTB.
725 	 *
726 	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
727 	 * would be unsafe to rely only on the #ifdef above.
728 	 */
729 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
730 	    cpu_has_feature(CPU_FTR_ARCH_207S))
731 		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
732 
733 	/*
734 	 * This is a next best approximation without a VTB.
735 	 * On a host which is running bare metal there should never be any stolen
736 	 * time and on a host which doesn't do any virtualisation TB *should* equal
737 	 * VTB so it makes no difference anyway.
738 	 */
739 	return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
740 }
741 #endif
742 
get_freq(char * name,int cells,unsigned long * val)743 static int __init get_freq(char *name, int cells, unsigned long *val)
744 {
745 	struct device_node *cpu;
746 	const __be32 *fp;
747 	int found = 0;
748 
749 	/* The cpu node should have timebase and clock frequency properties */
750 	cpu = of_find_node_by_type(NULL, "cpu");
751 
752 	if (cpu) {
753 		fp = of_get_property(cpu, name, NULL);
754 		if (fp) {
755 			found = 1;
756 			*val = of_read_ulong(fp, cells);
757 		}
758 
759 		of_node_put(cpu);
760 	}
761 
762 	return found;
763 }
764 
start_cpu_decrementer(void)765 static void start_cpu_decrementer(void)
766 {
767 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
768 	unsigned int tcr;
769 
770 	/* Clear any pending timer interrupts */
771 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
772 
773 	tcr = mfspr(SPRN_TCR);
774 	/*
775 	 * The watchdog may have already been enabled by u-boot. So leave
776 	 * TRC[WP] (Watchdog Period) alone.
777 	 */
778 	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
779 	tcr |= TCR_DIE;		/* Enable decrementer */
780 	mtspr(SPRN_TCR, tcr);
781 #endif
782 }
783 
generic_calibrate_decr(void)784 void __init generic_calibrate_decr(void)
785 {
786 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
787 
788 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
789 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
790 
791 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
792 				"(not found)\n");
793 	}
794 
795 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
796 
797 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
798 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
799 
800 		printk(KERN_ERR "WARNING: Estimating processor frequency "
801 				"(not found)\n");
802 	}
803 }
804 
update_persistent_clock64(struct timespec64 now)805 int update_persistent_clock64(struct timespec64 now)
806 {
807 	struct rtc_time tm;
808 
809 	if (!ppc_md.set_rtc_time)
810 		return -ENODEV;
811 
812 	rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
813 
814 	return ppc_md.set_rtc_time(&tm);
815 }
816 
__read_persistent_clock(struct timespec64 * ts)817 static void __read_persistent_clock(struct timespec64 *ts)
818 {
819 	struct rtc_time tm;
820 	static int first = 1;
821 
822 	ts->tv_nsec = 0;
823 	/* XXX this is a litle fragile but will work okay in the short term */
824 	if (first) {
825 		first = 0;
826 		if (ppc_md.time_init)
827 			timezone_offset = ppc_md.time_init();
828 
829 		/* get_boot_time() isn't guaranteed to be safe to call late */
830 		if (ppc_md.get_boot_time) {
831 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
832 			return;
833 		}
834 	}
835 	if (!ppc_md.get_rtc_time) {
836 		ts->tv_sec = 0;
837 		return;
838 	}
839 	ppc_md.get_rtc_time(&tm);
840 
841 	ts->tv_sec = rtc_tm_to_time64(&tm);
842 }
843 
read_persistent_clock64(struct timespec64 * ts)844 void read_persistent_clock64(struct timespec64 *ts)
845 {
846 	__read_persistent_clock(ts);
847 
848 	/* Sanitize it in case real time clock is set below EPOCH */
849 	if (ts->tv_sec < 0) {
850 		ts->tv_sec = 0;
851 		ts->tv_nsec = 0;
852 	}
853 
854 }
855 
856 /* clocksource code */
rtc_read(struct clocksource * cs)857 static notrace u64 rtc_read(struct clocksource *cs)
858 {
859 	return (u64)get_rtc();
860 }
861 
timebase_read(struct clocksource * cs)862 static notrace u64 timebase_read(struct clocksource *cs)
863 {
864 	return (u64)get_tb();
865 }
866 
867 
update_vsyscall(struct timekeeper * tk)868 void update_vsyscall(struct timekeeper *tk)
869 {
870 	struct timespec xt;
871 	struct clocksource *clock = tk->tkr_mono.clock;
872 	u32 mult = tk->tkr_mono.mult;
873 	u32 shift = tk->tkr_mono.shift;
874 	u64 cycle_last = tk->tkr_mono.cycle_last;
875 	u64 new_tb_to_xs, new_stamp_xsec;
876 	u64 frac_sec;
877 
878 	if (clock != &clocksource_timebase)
879 		return;
880 
881 	xt.tv_sec = tk->xtime_sec;
882 	xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
883 
884 	/* Make userspace gettimeofday spin until we're done. */
885 	++vdso_data->tb_update_count;
886 	smp_mb();
887 
888 	/*
889 	 * This computes ((2^20 / 1e9) * mult) >> shift as a
890 	 * 0.64 fixed-point fraction.
891 	 * The computation in the else clause below won't overflow
892 	 * (as long as the timebase frequency is >= 1.049 MHz)
893 	 * but loses precision because we lose the low bits of the constant
894 	 * in the shift.  Note that 19342813113834067 ~= 2^(20+64) / 1e9.
895 	 * For a shift of 24 the error is about 0.5e-9, or about 0.5ns
896 	 * over a second.  (Shift values are usually 22, 23 or 24.)
897 	 * For high frequency clocks such as the 512MHz timebase clock
898 	 * on POWER[6789], the mult value is small (e.g. 32768000)
899 	 * and so we can shift the constant by 16 initially
900 	 * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
901 	 * remaining shifts after the multiplication, which gives a
902 	 * more accurate result (e.g. with mult = 32768000, shift = 24,
903 	 * the error is only about 1.2e-12, or 0.7ns over 10 minutes).
904 	 */
905 	if (mult <= 62500000 && clock->shift >= 16)
906 		new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
907 	else
908 		new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
909 
910 	/*
911 	 * Compute the fractional second in units of 2^-32 seconds.
912 	 * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
913 	 * in nanoseconds, so multiplying that by 2^32 / 1e9 gives
914 	 * it in units of 2^-32 seconds.
915 	 * We assume shift <= 32 because clocks_calc_mult_shift()
916 	 * generates shift values in the range 0 - 32.
917 	 */
918 	frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
919 	do_div(frac_sec, NSEC_PER_SEC);
920 
921 	/*
922 	 * Work out new stamp_xsec value for any legacy users of systemcfg.
923 	 * stamp_xsec is in units of 2^-20 seconds.
924 	 */
925 	new_stamp_xsec = frac_sec >> 12;
926 	new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
927 
928 	/*
929 	 * tb_update_count is used to allow the userspace gettimeofday code
930 	 * to assure itself that it sees a consistent view of the tb_to_xs and
931 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
932 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
933 	 * the two values of tb_update_count match and are even then the
934 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
935 	 * loops back and reads them again until this criteria is met.
936 	 */
937 	vdso_data->tb_orig_stamp = cycle_last;
938 	vdso_data->stamp_xsec = new_stamp_xsec;
939 	vdso_data->tb_to_xs = new_tb_to_xs;
940 	vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
941 	vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
942 	vdso_data->stamp_xtime = xt;
943 	vdso_data->stamp_sec_fraction = frac_sec;
944 	vdso_data->hrtimer_res = hrtimer_resolution;
945 	smp_wmb();
946 	++(vdso_data->tb_update_count);
947 }
948 
update_vsyscall_tz(void)949 void update_vsyscall_tz(void)
950 {
951 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
952 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
953 }
954 
clocksource_init(void)955 static void __init clocksource_init(void)
956 {
957 	struct clocksource *clock;
958 
959 	if (__USE_RTC())
960 		clock = &clocksource_rtc;
961 	else
962 		clock = &clocksource_timebase;
963 
964 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
965 		printk(KERN_ERR "clocksource: %s is already registered\n",
966 		       clock->name);
967 		return;
968 	}
969 
970 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
971 	       clock->name, clock->mult, clock->shift);
972 }
973 
decrementer_set_next_event(unsigned long evt,struct clock_event_device * dev)974 static int decrementer_set_next_event(unsigned long evt,
975 				      struct clock_event_device *dev)
976 {
977 	__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
978 	set_dec(evt);
979 
980 	/* We may have raced with new irq work */
981 	if (test_irq_work_pending())
982 		set_dec(1);
983 
984 	return 0;
985 }
986 
decrementer_shutdown(struct clock_event_device * dev)987 static int decrementer_shutdown(struct clock_event_device *dev)
988 {
989 	decrementer_set_next_event(decrementer_max, dev);
990 	return 0;
991 }
992 
register_decrementer_clockevent(int cpu)993 static void register_decrementer_clockevent(int cpu)
994 {
995 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
996 
997 	*dec = decrementer_clockevent;
998 	dec->cpumask = cpumask_of(cpu);
999 
1000 	clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
1001 
1002 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
1003 		    dec->name, dec->mult, dec->shift, cpu);
1004 
1005 	/* Set values for KVM, see kvm_emulate_dec() */
1006 	decrementer_clockevent.mult = dec->mult;
1007 	decrementer_clockevent.shift = dec->shift;
1008 }
1009 
enable_large_decrementer(void)1010 static void enable_large_decrementer(void)
1011 {
1012 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
1013 		return;
1014 
1015 	if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
1016 		return;
1017 
1018 	/*
1019 	 * If we're running as the hypervisor we need to enable the LD manually
1020 	 * otherwise firmware should have done it for us.
1021 	 */
1022 	if (cpu_has_feature(CPU_FTR_HVMODE))
1023 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
1024 }
1025 
set_decrementer_max(void)1026 static void __init set_decrementer_max(void)
1027 {
1028 	struct device_node *cpu;
1029 	u32 bits = 32;
1030 
1031 	/* Prior to ISAv3 the decrementer is always 32 bit */
1032 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
1033 		return;
1034 
1035 	cpu = of_find_node_by_type(NULL, "cpu");
1036 
1037 	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
1038 		if (bits > 64 || bits < 32) {
1039 			pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
1040 			bits = 32;
1041 		}
1042 
1043 		/* calculate the signed maximum given this many bits */
1044 		decrementer_max = (1ul << (bits - 1)) - 1;
1045 	}
1046 
1047 	of_node_put(cpu);
1048 
1049 	pr_info("time_init: %u bit decrementer (max: %llx)\n",
1050 		bits, decrementer_max);
1051 }
1052 
init_decrementer_clockevent(void)1053 static void __init init_decrementer_clockevent(void)
1054 {
1055 	register_decrementer_clockevent(smp_processor_id());
1056 }
1057 
secondary_cpu_time_init(void)1058 void secondary_cpu_time_init(void)
1059 {
1060 	/* Enable and test the large decrementer for this cpu */
1061 	enable_large_decrementer();
1062 
1063 	/* Start the decrementer on CPUs that have manual control
1064 	 * such as BookE
1065 	 */
1066 	start_cpu_decrementer();
1067 
1068 	/* FIME: Should make unrelatred change to move snapshot_timebase
1069 	 * call here ! */
1070 	register_decrementer_clockevent(smp_processor_id());
1071 }
1072 
1073 /* This function is only called on the boot processor */
time_init(void)1074 void __init time_init(void)
1075 {
1076 	struct div_result res;
1077 	u64 scale;
1078 	unsigned shift;
1079 
1080 	if (__USE_RTC()) {
1081 		/* 601 processor: dec counts down by 128 every 128ns */
1082 		ppc_tb_freq = 1000000000;
1083 	} else {
1084 		/* Normal PowerPC with timebase register */
1085 		ppc_md.calibrate_decr();
1086 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1087 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1088 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
1089 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1090 	}
1091 
1092 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1093 	tb_ticks_per_sec = ppc_tb_freq;
1094 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
1095 	calc_cputime_factors();
1096 
1097 	/*
1098 	 * Compute scale factor for sched_clock.
1099 	 * The calibrate_decr() function has set tb_ticks_per_sec,
1100 	 * which is the timebase frequency.
1101 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1102 	 * the 128-bit result as a 64.64 fixed-point number.
1103 	 * We then shift that number right until it is less than 1.0,
1104 	 * giving us the scale factor and shift count to use in
1105 	 * sched_clock().
1106 	 */
1107 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1108 	scale = res.result_low;
1109 	for (shift = 0; res.result_high != 0; ++shift) {
1110 		scale = (scale >> 1) | (res.result_high << 63);
1111 		res.result_high >>= 1;
1112 	}
1113 	tb_to_ns_scale = scale;
1114 	tb_to_ns_shift = shift;
1115 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1116 	boot_tb = get_tb_or_rtc();
1117 
1118 	/* If platform provided a timezone (pmac), we correct the time */
1119 	if (timezone_offset) {
1120 		sys_tz.tz_minuteswest = -timezone_offset / 60;
1121 		sys_tz.tz_dsttime = 0;
1122 	}
1123 
1124 	vdso_data->tb_update_count = 0;
1125 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1126 
1127 	/* initialise and enable the large decrementer (if we have one) */
1128 	set_decrementer_max();
1129 	enable_large_decrementer();
1130 
1131 	/* Start the decrementer on CPUs that have manual control
1132 	 * such as BookE
1133 	 */
1134 	start_cpu_decrementer();
1135 
1136 	/* Register the clocksource */
1137 	clocksource_init();
1138 
1139 	init_decrementer_clockevent();
1140 	tick_setup_hrtimer_broadcast();
1141 
1142 #ifdef CONFIG_COMMON_CLK
1143 	of_clk_init(NULL);
1144 #endif
1145 }
1146 
1147 /*
1148  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1149  * result.
1150  */
div128_by_32(u64 dividend_high,u64 dividend_low,unsigned divisor,struct div_result * dr)1151 void div128_by_32(u64 dividend_high, u64 dividend_low,
1152 		  unsigned divisor, struct div_result *dr)
1153 {
1154 	unsigned long a, b, c, d;
1155 	unsigned long w, x, y, z;
1156 	u64 ra, rb, rc;
1157 
1158 	a = dividend_high >> 32;
1159 	b = dividend_high & 0xffffffff;
1160 	c = dividend_low >> 32;
1161 	d = dividend_low & 0xffffffff;
1162 
1163 	w = a / divisor;
1164 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1165 
1166 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1167 	x = ra;
1168 
1169 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1170 	y = rb;
1171 
1172 	do_div(rc, divisor);
1173 	z = rc;
1174 
1175 	dr->result_high = ((u64)w << 32) + x;
1176 	dr->result_low  = ((u64)y << 32) + z;
1177 
1178 }
1179 
1180 /* We don't need to calibrate delay, we use the CPU timebase for that */
calibrate_delay(void)1181 void calibrate_delay(void)
1182 {
1183 	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1184 	 * as the number of __delay(1) in a jiffy, so make it so
1185 	 */
1186 	loops_per_jiffy = tb_ticks_per_jiffy;
1187 }
1188 
1189 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
rtc_generic_get_time(struct device * dev,struct rtc_time * tm)1190 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1191 {
1192 	ppc_md.get_rtc_time(tm);
1193 	return 0;
1194 }
1195 
rtc_generic_set_time(struct device * dev,struct rtc_time * tm)1196 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1197 {
1198 	if (!ppc_md.set_rtc_time)
1199 		return -EOPNOTSUPP;
1200 
1201 	if (ppc_md.set_rtc_time(tm) < 0)
1202 		return -EOPNOTSUPP;
1203 
1204 	return 0;
1205 }
1206 
1207 static const struct rtc_class_ops rtc_generic_ops = {
1208 	.read_time = rtc_generic_get_time,
1209 	.set_time = rtc_generic_set_time,
1210 };
1211 
rtc_init(void)1212 static int __init rtc_init(void)
1213 {
1214 	struct platform_device *pdev;
1215 
1216 	if (!ppc_md.get_rtc_time)
1217 		return -ENODEV;
1218 
1219 	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1220 					     &rtc_generic_ops,
1221 					     sizeof(rtc_generic_ops));
1222 
1223 	return PTR_ERR_OR_ZERO(pdev);
1224 }
1225 
1226 device_initcall(rtc_init);
1227 #endif
1228