• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/clocksource/arm_arch_timer.c
4  *
5  *  Copyright (C) 2011 ARM Ltd.
6  *  All Rights Reserved
7  */
8 
9 #define pr_fmt(fmt) 	"arch_timer: " fmt
10 
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/device.h>
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/clockchips.h>
18 #include <linux/clocksource.h>
19 #include <linux/clocksource_ids.h>
20 #include <linux/interrupt.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_address.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched_clock.h>
27 #include <linux/acpi.h>
28 #include <linux/arm-smccc.h>
29 #include <linux/ptp_kvm.h>
30 
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33 
34 #include <clocksource/arm_arch_timer.h>
35 
36 #define CNTTIDR		0x08
37 #define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))
38 
39 #define CNTACR(n)	(0x40 + ((n) * 4))
40 #define CNTACR_RPCT	BIT(0)
41 #define CNTACR_RVCT	BIT(1)
42 #define CNTACR_RFRQ	BIT(2)
43 #define CNTACR_RVOFF	BIT(3)
44 #define CNTACR_RWVT	BIT(4)
45 #define CNTACR_RWPT	BIT(5)
46 
47 #define CNTVCT_LO	0x08
48 #define CNTVCT_HI	0x0c
49 #define CNTFRQ		0x10
50 #define CNTP_TVAL	0x28
51 #define CNTP_CTL	0x2c
52 #define CNTV_TVAL	0x38
53 #define CNTV_CTL	0x3c
54 
55 static unsigned arch_timers_present __initdata;
56 
57 static void __iomem *arch_counter_base;
58 
59 struct arch_timer {
60 	void __iomem *base;
61 	struct clock_event_device evt;
62 };
63 
64 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
65 
66 static u32 arch_timer_rate;
67 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
68 
69 static struct clock_event_device __percpu *arch_timer_evt;
70 
71 static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
72 static bool arch_timer_c3stop;
73 static bool arch_timer_mem_use_virtual;
74 static bool arch_counter_suspend_stop;
75 #ifdef CONFIG_GENERIC_GETTIMEOFDAY
76 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
77 #else
78 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
79 #endif /* CONFIG_GENERIC_GETTIMEOFDAY */
80 
81 static cpumask_t evtstrm_available = CPU_MASK_NONE;
82 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
83 
early_evtstrm_cfg(char * buf)84 static int __init early_evtstrm_cfg(char *buf)
85 {
86 	return strtobool(buf, &evtstrm_enable);
87 }
88 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
89 
90 /*
91  * Architected system timer support.
92  */
93 
94 static __always_inline
arch_timer_reg_write(int access,enum arch_timer_reg reg,u32 val,struct clock_event_device * clk)95 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
96 			  struct clock_event_device *clk)
97 {
98 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
99 		struct arch_timer *timer = to_arch_timer(clk);
100 		switch (reg) {
101 		case ARCH_TIMER_REG_CTRL:
102 			writel_relaxed(val, timer->base + CNTP_CTL);
103 			break;
104 		case ARCH_TIMER_REG_TVAL:
105 			writel_relaxed(val, timer->base + CNTP_TVAL);
106 			break;
107 		}
108 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
109 		struct arch_timer *timer = to_arch_timer(clk);
110 		switch (reg) {
111 		case ARCH_TIMER_REG_CTRL:
112 			writel_relaxed(val, timer->base + CNTV_CTL);
113 			break;
114 		case ARCH_TIMER_REG_TVAL:
115 			writel_relaxed(val, timer->base + CNTV_TVAL);
116 			break;
117 		}
118 	} else {
119 		arch_timer_reg_write_cp15(access, reg, val);
120 	}
121 }
122 
123 static __always_inline
arch_timer_reg_read(int access,enum arch_timer_reg reg,struct clock_event_device * clk)124 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
125 			struct clock_event_device *clk)
126 {
127 	u32 val;
128 
129 	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
130 		struct arch_timer *timer = to_arch_timer(clk);
131 		switch (reg) {
132 		case ARCH_TIMER_REG_CTRL:
133 			val = readl_relaxed(timer->base + CNTP_CTL);
134 			break;
135 		case ARCH_TIMER_REG_TVAL:
136 			val = readl_relaxed(timer->base + CNTP_TVAL);
137 			break;
138 		}
139 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
140 		struct arch_timer *timer = to_arch_timer(clk);
141 		switch (reg) {
142 		case ARCH_TIMER_REG_CTRL:
143 			val = readl_relaxed(timer->base + CNTV_CTL);
144 			break;
145 		case ARCH_TIMER_REG_TVAL:
146 			val = readl_relaxed(timer->base + CNTV_TVAL);
147 			break;
148 		}
149 	} else {
150 		val = arch_timer_reg_read_cp15(access, reg);
151 	}
152 
153 	return val;
154 }
155 
arch_counter_get_cntpct_stable(void)156 static notrace u64 arch_counter_get_cntpct_stable(void)
157 {
158 	return __arch_counter_get_cntpct_stable();
159 }
160 
arch_counter_get_cntpct(void)161 static notrace u64 arch_counter_get_cntpct(void)
162 {
163 	return __arch_counter_get_cntpct();
164 }
165 
arch_counter_get_cntvct_stable(void)166 static notrace u64 arch_counter_get_cntvct_stable(void)
167 {
168 	return __arch_counter_get_cntvct_stable();
169 }
170 
arch_counter_get_cntvct(void)171 static notrace u64 arch_counter_get_cntvct(void)
172 {
173 	return __arch_counter_get_cntvct();
174 }
175 
176 /*
177  * Default to cp15 based access because arm64 uses this function for
178  * sched_clock() before DT is probed and the cp15 method is guaranteed
179  * to exist on arm64. arm doesn't use this before DT is probed so even
180  * if we don't have the cp15 accessors we won't have a problem.
181  */
182 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
183 EXPORT_SYMBOL_GPL(arch_timer_read_counter);
184 
arch_counter_read(struct clocksource * cs)185 static u64 arch_counter_read(struct clocksource *cs)
186 {
187 	return arch_timer_read_counter();
188 }
189 
arch_counter_read_cc(const struct cyclecounter * cc)190 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
191 {
192 	return arch_timer_read_counter();
193 }
194 
195 static struct clocksource clocksource_counter = {
196 	.name	= "arch_sys_counter",
197 	.id	= CSID_ARM_ARCH_COUNTER,
198 	.rating	= 400,
199 	.read	= arch_counter_read,
200 	.mask	= CLOCKSOURCE_MASK(56),
201 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
202 };
203 
204 static struct cyclecounter cyclecounter __ro_after_init = {
205 	.read	= arch_counter_read_cc,
206 	.mask	= CLOCKSOURCE_MASK(56),
207 };
208 
209 struct ate_acpi_oem_info {
210 	char oem_id[ACPI_OEM_ID_SIZE + 1];
211 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
212 	u32 oem_revision;
213 };
214 
215 #ifdef CONFIG_FSL_ERRATUM_A008585
216 /*
217  * The number of retries is an arbitrary value well beyond the highest number
218  * of iterations the loop has been observed to take.
219  */
220 #define __fsl_a008585_read_reg(reg) ({			\
221 	u64 _old, _new;					\
222 	int _retries = 200;				\
223 							\
224 	do {						\
225 		_old = read_sysreg(reg);		\
226 		_new = read_sysreg(reg);		\
227 		_retries--;				\
228 	} while (unlikely(_old != _new) && _retries);	\
229 							\
230 	WARN_ON_ONCE(!_retries);			\
231 	_new;						\
232 })
233 
fsl_a008585_read_cntp_tval_el0(void)234 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
235 {
236 	return __fsl_a008585_read_reg(cntp_tval_el0);
237 }
238 
fsl_a008585_read_cntv_tval_el0(void)239 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
240 {
241 	return __fsl_a008585_read_reg(cntv_tval_el0);
242 }
243 
fsl_a008585_read_cntpct_el0(void)244 static u64 notrace fsl_a008585_read_cntpct_el0(void)
245 {
246 	return __fsl_a008585_read_reg(cntpct_el0);
247 }
248 
fsl_a008585_read_cntvct_el0(void)249 static u64 notrace fsl_a008585_read_cntvct_el0(void)
250 {
251 	return __fsl_a008585_read_reg(cntvct_el0);
252 }
253 #endif
254 
255 #ifdef CONFIG_HISILICON_ERRATUM_161010101
256 /*
257  * Verify whether the value of the second read is larger than the first by
258  * less than 32 is the only way to confirm the value is correct, so clear the
259  * lower 5 bits to check whether the difference is greater than 32 or not.
260  * Theoretically the erratum should not occur more than twice in succession
261  * when reading the system counter, but it is possible that some interrupts
262  * may lead to more than twice read errors, triggering the warning, so setting
263  * the number of retries far beyond the number of iterations the loop has been
264  * observed to take.
265  */
266 #define __hisi_161010101_read_reg(reg) ({				\
267 	u64 _old, _new;						\
268 	int _retries = 50;					\
269 								\
270 	do {							\
271 		_old = read_sysreg(reg);			\
272 		_new = read_sysreg(reg);			\
273 		_retries--;					\
274 	} while (unlikely((_new - _old) >> 5) && _retries);	\
275 								\
276 	WARN_ON_ONCE(!_retries);				\
277 	_new;							\
278 })
279 
hisi_161010101_read_cntp_tval_el0(void)280 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
281 {
282 	return __hisi_161010101_read_reg(cntp_tval_el0);
283 }
284 
hisi_161010101_read_cntv_tval_el0(void)285 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
286 {
287 	return __hisi_161010101_read_reg(cntv_tval_el0);
288 }
289 
hisi_161010101_read_cntpct_el0(void)290 static u64 notrace hisi_161010101_read_cntpct_el0(void)
291 {
292 	return __hisi_161010101_read_reg(cntpct_el0);
293 }
294 
hisi_161010101_read_cntvct_el0(void)295 static u64 notrace hisi_161010101_read_cntvct_el0(void)
296 {
297 	return __hisi_161010101_read_reg(cntvct_el0);
298 }
299 
300 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
301 	/*
302 	 * Note that trailing spaces are required to properly match
303 	 * the OEM table information.
304 	 */
305 	{
306 		.oem_id		= "HISI  ",
307 		.oem_table_id	= "HIP05   ",
308 		.oem_revision	= 0,
309 	},
310 	{
311 		.oem_id		= "HISI  ",
312 		.oem_table_id	= "HIP06   ",
313 		.oem_revision	= 0,
314 	},
315 	{
316 		.oem_id		= "HISI  ",
317 		.oem_table_id	= "HIP07   ",
318 		.oem_revision	= 0,
319 	},
320 	{ /* Sentinel indicating the end of the OEM array */ },
321 };
322 #endif
323 
324 #ifdef CONFIG_ARM64_ERRATUM_858921
arm64_858921_read_cntpct_el0(void)325 static u64 notrace arm64_858921_read_cntpct_el0(void)
326 {
327 	u64 old, new;
328 
329 	old = read_sysreg(cntpct_el0);
330 	new = read_sysreg(cntpct_el0);
331 	return (((old ^ new) >> 32) & 1) ? old : new;
332 }
333 
arm64_858921_read_cntvct_el0(void)334 static u64 notrace arm64_858921_read_cntvct_el0(void)
335 {
336 	u64 old, new;
337 
338 	old = read_sysreg(cntvct_el0);
339 	new = read_sysreg(cntvct_el0);
340 	return (((old ^ new) >> 32) & 1) ? old : new;
341 }
342 #endif
343 
344 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
345 /*
346  * The low bits of the counter registers are indeterminate while bit 10 or
347  * greater is rolling over. Since the counter value can jump both backward
348  * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
349  * with all ones or all zeros in the low bits. Bound the loop by the maximum
350  * number of CPU cycles in 3 consecutive 24 MHz counter periods.
351  */
352 #define __sun50i_a64_read_reg(reg) ({					\
353 	u64 _val;							\
354 	int _retries = 150;						\
355 									\
356 	do {								\
357 		_val = read_sysreg(reg);				\
358 		_retries--;						\
359 	} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries);	\
360 									\
361 	WARN_ON_ONCE(!_retries);					\
362 	_val;								\
363 })
364 
sun50i_a64_read_cntpct_el0(void)365 static u64 notrace sun50i_a64_read_cntpct_el0(void)
366 {
367 	return __sun50i_a64_read_reg(cntpct_el0);
368 }
369 
sun50i_a64_read_cntvct_el0(void)370 static u64 notrace sun50i_a64_read_cntvct_el0(void)
371 {
372 	return __sun50i_a64_read_reg(cntvct_el0);
373 }
374 
sun50i_a64_read_cntp_tval_el0(void)375 static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
376 {
377 	return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
378 }
379 
sun50i_a64_read_cntv_tval_el0(void)380 static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
381 {
382 	return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
383 }
384 #endif
385 
386 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
387 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
388 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
389 
390 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
391 
erratum_set_next_event_tval_generic(const int access,unsigned long evt,struct clock_event_device * clk)392 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
393 						struct clock_event_device *clk)
394 {
395 	unsigned long ctrl;
396 	u64 cval;
397 
398 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
399 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
400 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
401 
402 	if (access == ARCH_TIMER_PHYS_ACCESS) {
403 		cval = evt + arch_counter_get_cntpct_stable();
404 		write_sysreg(cval, cntp_cval_el0);
405 	} else {
406 		cval = evt + arch_counter_get_cntvct_stable();
407 		write_sysreg(cval, cntv_cval_el0);
408 	}
409 
410 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
411 }
412 
erratum_set_next_event_tval_virt(unsigned long evt,struct clock_event_device * clk)413 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
414 					    struct clock_event_device *clk)
415 {
416 	erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
417 	return 0;
418 }
419 
erratum_set_next_event_tval_phys(unsigned long evt,struct clock_event_device * clk)420 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
421 					    struct clock_event_device *clk)
422 {
423 	erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
424 	return 0;
425 }
426 
427 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
428 #ifdef CONFIG_FSL_ERRATUM_A008585
429 	{
430 		.match_type = ate_match_dt,
431 		.id = "fsl,erratum-a008585",
432 		.desc = "Freescale erratum a005858",
433 		.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
434 		.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
435 		.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
436 		.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
437 		.set_next_event_phys = erratum_set_next_event_tval_phys,
438 		.set_next_event_virt = erratum_set_next_event_tval_virt,
439 	},
440 #endif
441 #ifdef CONFIG_HISILICON_ERRATUM_161010101
442 	{
443 		.match_type = ate_match_dt,
444 		.id = "hisilicon,erratum-161010101",
445 		.desc = "HiSilicon erratum 161010101",
446 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
447 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
448 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
449 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
450 		.set_next_event_phys = erratum_set_next_event_tval_phys,
451 		.set_next_event_virt = erratum_set_next_event_tval_virt,
452 	},
453 	{
454 		.match_type = ate_match_acpi_oem_info,
455 		.id = hisi_161010101_oem_info,
456 		.desc = "HiSilicon erratum 161010101",
457 		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
458 		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
459 		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
460 		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
461 		.set_next_event_phys = erratum_set_next_event_tval_phys,
462 		.set_next_event_virt = erratum_set_next_event_tval_virt,
463 	},
464 #endif
465 #ifdef CONFIG_ARM64_ERRATUM_858921
466 	{
467 		.match_type = ate_match_local_cap_id,
468 		.id = (void *)ARM64_WORKAROUND_858921,
469 		.desc = "ARM erratum 858921",
470 		.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
471 		.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
472 	},
473 #endif
474 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
475 	{
476 		.match_type = ate_match_dt,
477 		.id = "allwinner,erratum-unknown1",
478 		.desc = "Allwinner erratum UNKNOWN1",
479 		.read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
480 		.read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
481 		.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
482 		.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
483 		.set_next_event_phys = erratum_set_next_event_tval_phys,
484 		.set_next_event_virt = erratum_set_next_event_tval_virt,
485 	},
486 #endif
487 #ifdef CONFIG_ARM64_ERRATUM_1418040
488 	{
489 		.match_type = ate_match_local_cap_id,
490 		.id = (void *)ARM64_WORKAROUND_1418040,
491 		.desc = "ARM erratum 1418040",
492 		.disable_compat_vdso = true,
493 	},
494 #endif
495 };
496 
497 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
498 			       const void *);
499 
500 static
arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround * wa,const void * arg)501 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
502 				 const void *arg)
503 {
504 	const struct device_node *np = arg;
505 
506 	return of_property_read_bool(np, wa->id);
507 }
508 
509 static
arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround * wa,const void * arg)510 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
511 					const void *arg)
512 {
513 	return this_cpu_has_cap((uintptr_t)wa->id);
514 }
515 
516 
517 static
arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround * wa,const void * arg)518 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
519 				       const void *arg)
520 {
521 	static const struct ate_acpi_oem_info empty_oem_info = {};
522 	const struct ate_acpi_oem_info *info = wa->id;
523 	const struct acpi_table_header *table = arg;
524 
525 	/* Iterate over the ACPI OEM info array, looking for a match */
526 	while (memcmp(info, &empty_oem_info, sizeof(*info))) {
527 		if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
528 		    !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
529 		    info->oem_revision == table->oem_revision)
530 			return true;
531 
532 		info++;
533 	}
534 
535 	return false;
536 }
537 
538 static const struct arch_timer_erratum_workaround *
arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,ate_match_fn_t match_fn,void * arg)539 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
540 			  ate_match_fn_t match_fn,
541 			  void *arg)
542 {
543 	int i;
544 
545 	for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
546 		if (ool_workarounds[i].match_type != type)
547 			continue;
548 
549 		if (match_fn(&ool_workarounds[i], arg))
550 			return &ool_workarounds[i];
551 	}
552 
553 	return NULL;
554 }
555 
556 static
arch_timer_enable_workaround(const struct arch_timer_erratum_workaround * wa,bool local)557 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
558 				  bool local)
559 {
560 	int i;
561 
562 	if (local) {
563 		__this_cpu_write(timer_unstable_counter_workaround, wa);
564 	} else {
565 		for_each_possible_cpu(i)
566 			per_cpu(timer_unstable_counter_workaround, i) = wa;
567 	}
568 
569 	if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
570 		atomic_set(&timer_unstable_counter_workaround_in_use, 1);
571 
572 	/*
573 	 * Don't use the vdso fastpath if errata require using the
574 	 * out-of-line counter accessor. We may change our mind pretty
575 	 * late in the game (with a per-CPU erratum, for example), so
576 	 * change both the default value and the vdso itself.
577 	 */
578 	if (wa->read_cntvct_el0) {
579 		clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
580 		vdso_default = VDSO_CLOCKMODE_NONE;
581 	} else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
582 		vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
583 		clocksource_counter.vdso_clock_mode = vdso_default;
584 	}
585 }
586 
arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,void * arg)587 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
588 					    void *arg)
589 {
590 	const struct arch_timer_erratum_workaround *wa, *__wa;
591 	ate_match_fn_t match_fn = NULL;
592 	bool local = false;
593 
594 	switch (type) {
595 	case ate_match_dt:
596 		match_fn = arch_timer_check_dt_erratum;
597 		break;
598 	case ate_match_local_cap_id:
599 		match_fn = arch_timer_check_local_cap_erratum;
600 		local = true;
601 		break;
602 	case ate_match_acpi_oem_info:
603 		match_fn = arch_timer_check_acpi_oem_erratum;
604 		break;
605 	default:
606 		WARN_ON(1);
607 		return;
608 	}
609 
610 	wa = arch_timer_iterate_errata(type, match_fn, arg);
611 	if (!wa)
612 		return;
613 
614 	__wa = __this_cpu_read(timer_unstable_counter_workaround);
615 	if (__wa && wa != __wa)
616 		pr_warn("Can't enable workaround for %s (clashes with %s\n)",
617 			wa->desc, __wa->desc);
618 
619 	if (__wa)
620 		return;
621 
622 	arch_timer_enable_workaround(wa, local);
623 	pr_info("Enabling %s workaround for %s\n",
624 		local ? "local" : "global", wa->desc);
625 }
626 
arch_timer_this_cpu_has_cntvct_wa(void)627 static bool arch_timer_this_cpu_has_cntvct_wa(void)
628 {
629 	return has_erratum_handler(read_cntvct_el0);
630 }
631 
arch_timer_counter_has_wa(void)632 static bool arch_timer_counter_has_wa(void)
633 {
634 	return atomic_read(&timer_unstable_counter_workaround_in_use);
635 }
636 #else
637 #define arch_timer_check_ool_workaround(t,a)		do { } while(0)
638 #define arch_timer_this_cpu_has_cntvct_wa()		({false;})
639 #define arch_timer_counter_has_wa()			({false;})
640 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
641 
timer_handler(const int access,struct clock_event_device * evt)642 static __always_inline irqreturn_t timer_handler(const int access,
643 					struct clock_event_device *evt)
644 {
645 	unsigned long ctrl;
646 
647 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
648 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
649 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
650 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
651 		evt->event_handler(evt);
652 		return IRQ_HANDLED;
653 	}
654 
655 	return IRQ_NONE;
656 }
657 
arch_timer_handler_virt(int irq,void * dev_id)658 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
659 {
660 	struct clock_event_device *evt = dev_id;
661 
662 	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
663 }
664 
arch_timer_handler_phys(int irq,void * dev_id)665 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
666 {
667 	struct clock_event_device *evt = dev_id;
668 
669 	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
670 }
671 
arch_timer_handler_phys_mem(int irq,void * dev_id)672 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
673 {
674 	struct clock_event_device *evt = dev_id;
675 
676 	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
677 }
678 
arch_timer_handler_virt_mem(int irq,void * dev_id)679 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
680 {
681 	struct clock_event_device *evt = dev_id;
682 
683 	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
684 }
685 
timer_shutdown(const int access,struct clock_event_device * clk)686 static __always_inline int timer_shutdown(const int access,
687 					  struct clock_event_device *clk)
688 {
689 	unsigned long ctrl;
690 
691 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
692 	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
693 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
694 
695 	return 0;
696 }
697 
arch_timer_shutdown_virt(struct clock_event_device * clk)698 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
699 {
700 	return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
701 }
702 
arch_timer_shutdown_phys(struct clock_event_device * clk)703 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
704 {
705 	return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
706 }
707 
arch_timer_shutdown_virt_mem(struct clock_event_device * clk)708 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
709 {
710 	return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
711 }
712 
arch_timer_shutdown_phys_mem(struct clock_event_device * clk)713 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
714 {
715 	return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
716 }
717 
set_next_event(const int access,unsigned long evt,struct clock_event_device * clk)718 static __always_inline void set_next_event(const int access, unsigned long evt,
719 					   struct clock_event_device *clk)
720 {
721 	unsigned long ctrl;
722 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
723 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
724 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
725 	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
726 	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
727 }
728 
arch_timer_set_next_event_virt(unsigned long evt,struct clock_event_device * clk)729 static int arch_timer_set_next_event_virt(unsigned long evt,
730 					  struct clock_event_device *clk)
731 {
732 	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
733 	return 0;
734 }
735 
arch_timer_set_next_event_phys(unsigned long evt,struct clock_event_device * clk)736 static int arch_timer_set_next_event_phys(unsigned long evt,
737 					  struct clock_event_device *clk)
738 {
739 	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
740 	return 0;
741 }
742 
arch_timer_set_next_event_virt_mem(unsigned long evt,struct clock_event_device * clk)743 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
744 					      struct clock_event_device *clk)
745 {
746 	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
747 	return 0;
748 }
749 
arch_timer_set_next_event_phys_mem(unsigned long evt,struct clock_event_device * clk)750 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
751 					      struct clock_event_device *clk)
752 {
753 	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
754 	return 0;
755 }
756 
__arch_timer_setup(unsigned type,struct clock_event_device * clk)757 static void __arch_timer_setup(unsigned type,
758 			       struct clock_event_device *clk)
759 {
760 	clk->features = CLOCK_EVT_FEAT_ONESHOT;
761 
762 	if (type == ARCH_TIMER_TYPE_CP15) {
763 		typeof(clk->set_next_event) sne;
764 
765 		arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
766 
767 		if (arch_timer_c3stop)
768 			clk->features |= CLOCK_EVT_FEAT_C3STOP;
769 		clk->name = "arch_sys_timer";
770 		clk->rating = 450;
771 		clk->cpumask = cpumask_of(smp_processor_id());
772 		clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
773 		switch (arch_timer_uses_ppi) {
774 		case ARCH_TIMER_VIRT_PPI:
775 			clk->set_state_shutdown = arch_timer_shutdown_virt;
776 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
777 			sne = erratum_handler(set_next_event_virt);
778 			break;
779 		case ARCH_TIMER_PHYS_SECURE_PPI:
780 		case ARCH_TIMER_PHYS_NONSECURE_PPI:
781 		case ARCH_TIMER_HYP_PPI:
782 			clk->set_state_shutdown = arch_timer_shutdown_phys;
783 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
784 			sne = erratum_handler(set_next_event_phys);
785 			break;
786 		default:
787 			BUG();
788 		}
789 
790 		clk->set_next_event = sne;
791 	} else {
792 		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
793 		clk->name = "arch_mem_timer";
794 		clk->rating = 400;
795 		clk->cpumask = cpu_possible_mask;
796 		if (arch_timer_mem_use_virtual) {
797 			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
798 			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
799 			clk->set_next_event =
800 				arch_timer_set_next_event_virt_mem;
801 		} else {
802 			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
803 			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
804 			clk->set_next_event =
805 				arch_timer_set_next_event_phys_mem;
806 		}
807 	}
808 
809 	clk->set_state_shutdown(clk);
810 
811 	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
812 }
813 
arch_timer_evtstrm_enable(int divider)814 static void arch_timer_evtstrm_enable(int divider)
815 {
816 	u32 cntkctl = arch_timer_get_cntkctl();
817 
818 	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
819 	/* Set the divider and enable virtual event stream */
820 	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
821 			| ARCH_TIMER_VIRT_EVT_EN;
822 	arch_timer_set_cntkctl(cntkctl);
823 	arch_timer_set_evtstrm_feature();
824 	cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
825 }
826 
arch_timer_configure_evtstream(void)827 static void arch_timer_configure_evtstream(void)
828 {
829 	int evt_stream_div, lsb;
830 
831 	/*
832 	 * As the event stream can at most be generated at half the frequency
833 	 * of the counter, use half the frequency when computing the divider.
834 	 */
835 	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
836 
837 	/*
838 	 * Find the closest power of two to the divisor. If the adjacent bit
839 	 * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
840 	 */
841 	lsb = fls(evt_stream_div) - 1;
842 	if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
843 		lsb++;
844 
845 	/* enable event stream */
846 	arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
847 }
848 
arch_counter_set_user_access(void)849 static void arch_counter_set_user_access(void)
850 {
851 	u32 cntkctl = arch_timer_get_cntkctl();
852 
853 	/* Disable user access to the timers and both counters */
854 	/* Also disable virtual event stream */
855 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
856 			| ARCH_TIMER_USR_VT_ACCESS_EN
857 		        | ARCH_TIMER_USR_VCT_ACCESS_EN
858 			| ARCH_TIMER_VIRT_EVT_EN
859 			| ARCH_TIMER_USR_PCT_ACCESS_EN);
860 
861 	/*
862 	 * Enable user access to the virtual counter if it doesn't
863 	 * need to be workaround. The vdso may have been already
864 	 * disabled though.
865 	 */
866 	if (arch_timer_this_cpu_has_cntvct_wa())
867 		pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
868 	else
869 		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
870 
871 	arch_timer_set_cntkctl(cntkctl);
872 }
873 
arch_timer_has_nonsecure_ppi(void)874 static bool arch_timer_has_nonsecure_ppi(void)
875 {
876 	return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
877 		arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
878 }
879 
check_ppi_trigger(int irq)880 static u32 check_ppi_trigger(int irq)
881 {
882 	u32 flags = irq_get_trigger_type(irq);
883 
884 	if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
885 		pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
886 		pr_warn("WARNING: Please fix your firmware\n");
887 		flags = IRQF_TRIGGER_LOW;
888 	}
889 
890 	return flags;
891 }
892 
arch_timer_starting_cpu(unsigned int cpu)893 static int arch_timer_starting_cpu(unsigned int cpu)
894 {
895 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
896 	u32 flags;
897 
898 	__arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
899 
900 	flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
901 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
902 
903 	if (arch_timer_has_nonsecure_ppi()) {
904 		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
905 		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
906 				  flags);
907 	}
908 
909 	arch_counter_set_user_access();
910 	if (evtstrm_enable)
911 		arch_timer_configure_evtstream();
912 
913 	return 0;
914 }
915 
validate_timer_rate(void)916 static int validate_timer_rate(void)
917 {
918 	if (!arch_timer_rate)
919 		return -EINVAL;
920 
921 	/* Arch timer frequency < 1MHz can cause trouble */
922 	WARN_ON(arch_timer_rate < 1000000);
923 
924 	return 0;
925 }
926 
927 /*
928  * For historical reasons, when probing with DT we use whichever (non-zero)
929  * rate was probed first, and don't verify that others match. If the first node
930  * probed has a clock-frequency property, this overrides the HW register.
931  */
arch_timer_of_configure_rate(u32 rate,struct device_node * np)932 static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
933 {
934 	/* Who has more than one independent system counter? */
935 	if (arch_timer_rate)
936 		return;
937 
938 	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
939 		arch_timer_rate = rate;
940 
941 	/* Check the timer frequency. */
942 	if (validate_timer_rate())
943 		pr_warn("frequency not available\n");
944 }
945 
arch_timer_banner(unsigned type)946 static void arch_timer_banner(unsigned type)
947 {
948 	pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
949 		type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
950 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
951 			" and " : "",
952 		type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
953 		(unsigned long)arch_timer_rate / 1000000,
954 		(unsigned long)(arch_timer_rate / 10000) % 100,
955 		type & ARCH_TIMER_TYPE_CP15 ?
956 			(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
957 			"",
958 		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
959 		type & ARCH_TIMER_TYPE_MEM ?
960 			arch_timer_mem_use_virtual ? "virt" : "phys" :
961 			"");
962 }
963 
arch_timer_get_rate(void)964 u32 arch_timer_get_rate(void)
965 {
966 	return arch_timer_rate;
967 }
968 
arch_timer_evtstrm_available(void)969 bool arch_timer_evtstrm_available(void)
970 {
971 	/*
972 	 * We might get called from a preemptible context. This is fine
973 	 * because availability of the event stream should be always the same
974 	 * for a preemptible context and context where we might resume a task.
975 	 */
976 	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
977 }
978 
arch_counter_get_cntvct_mem(void)979 static u64 arch_counter_get_cntvct_mem(void)
980 {
981 	u32 vct_lo, vct_hi, tmp_hi;
982 
983 	do {
984 		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
985 		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
986 		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
987 	} while (vct_hi != tmp_hi);
988 
989 	return ((u64) vct_hi << 32) | vct_lo;
990 }
991 
992 static struct arch_timer_kvm_info arch_timer_kvm_info;
993 
arch_timer_get_kvm_info(void)994 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
995 {
996 	return &arch_timer_kvm_info;
997 }
998 
arch_counter_register(unsigned type)999 static void __init arch_counter_register(unsigned type)
1000 {
1001 	u64 start_count;
1002 
1003 	/* Register the CP15 based counter if we have one */
1004 	if (type & ARCH_TIMER_TYPE_CP15) {
1005 		u64 (*rd)(void);
1006 
1007 		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
1008 		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
1009 			if (arch_timer_counter_has_wa())
1010 				rd = arch_counter_get_cntvct_stable;
1011 			else
1012 				rd = arch_counter_get_cntvct;
1013 		} else {
1014 			if (arch_timer_counter_has_wa())
1015 				rd = arch_counter_get_cntpct_stable;
1016 			else
1017 				rd = arch_counter_get_cntpct;
1018 		}
1019 
1020 		arch_timer_read_counter = rd;
1021 		clocksource_counter.vdso_clock_mode = vdso_default;
1022 	} else {
1023 		arch_timer_read_counter = arch_counter_get_cntvct_mem;
1024 	}
1025 
1026 	if (!arch_counter_suspend_stop)
1027 		clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1028 	start_count = arch_timer_read_counter();
1029 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1030 	cyclecounter.mult = clocksource_counter.mult;
1031 	cyclecounter.shift = clocksource_counter.shift;
1032 	timecounter_init(&arch_timer_kvm_info.timecounter,
1033 			 &cyclecounter, start_count);
1034 
1035 	/* 56 bits minimum, so we assume worst case rollover */
1036 	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
1037 }
1038 
arch_timer_stop(struct clock_event_device * clk)1039 static void arch_timer_stop(struct clock_event_device *clk)
1040 {
1041 	pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
1042 
1043 	disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1044 	if (arch_timer_has_nonsecure_ppi())
1045 		disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
1046 
1047 	clk->set_state_shutdown(clk);
1048 }
1049 
arch_timer_dying_cpu(unsigned int cpu)1050 static int arch_timer_dying_cpu(unsigned int cpu)
1051 {
1052 	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
1053 
1054 	cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1055 
1056 	arch_timer_stop(clk);
1057 	return 0;
1058 }
1059 
1060 #ifdef CONFIG_CPU_PM
1061 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
arch_timer_cpu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)1062 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1063 				    unsigned long action, void *hcpu)
1064 {
1065 	if (action == CPU_PM_ENTER) {
1066 		__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
1067 
1068 		cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1069 	} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1070 		arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1071 
1072 		if (arch_timer_have_evtstrm_feature())
1073 			cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1074 	}
1075 	return NOTIFY_OK;
1076 }
1077 
1078 static struct notifier_block arch_timer_cpu_pm_notifier = {
1079 	.notifier_call = arch_timer_cpu_pm_notify,
1080 };
1081 
arch_timer_cpu_pm_init(void)1082 static int __init arch_timer_cpu_pm_init(void)
1083 {
1084 	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1085 }
1086 
arch_timer_cpu_pm_deinit(void)1087 static void __init arch_timer_cpu_pm_deinit(void)
1088 {
1089 	WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1090 }
1091 
1092 #else
arch_timer_cpu_pm_init(void)1093 static int __init arch_timer_cpu_pm_init(void)
1094 {
1095 	return 0;
1096 }
1097 
arch_timer_cpu_pm_deinit(void)1098 static void __init arch_timer_cpu_pm_deinit(void)
1099 {
1100 }
1101 #endif
1102 
arch_timer_register(void)1103 static int __init arch_timer_register(void)
1104 {
1105 	int err;
1106 	int ppi;
1107 
1108 	arch_timer_evt = alloc_percpu(struct clock_event_device);
1109 	if (!arch_timer_evt) {
1110 		err = -ENOMEM;
1111 		goto out;
1112 	}
1113 
1114 	ppi = arch_timer_ppi[arch_timer_uses_ppi];
1115 	switch (arch_timer_uses_ppi) {
1116 	case ARCH_TIMER_VIRT_PPI:
1117 		err = request_percpu_irq(ppi, arch_timer_handler_virt,
1118 					 "arch_timer", arch_timer_evt);
1119 		break;
1120 	case ARCH_TIMER_PHYS_SECURE_PPI:
1121 	case ARCH_TIMER_PHYS_NONSECURE_PPI:
1122 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1123 					 "arch_timer", arch_timer_evt);
1124 		if (!err && arch_timer_has_nonsecure_ppi()) {
1125 			ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1126 			err = request_percpu_irq(ppi, arch_timer_handler_phys,
1127 						 "arch_timer", arch_timer_evt);
1128 			if (err)
1129 				free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1130 						arch_timer_evt);
1131 		}
1132 		break;
1133 	case ARCH_TIMER_HYP_PPI:
1134 		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1135 					 "arch_timer", arch_timer_evt);
1136 		break;
1137 	default:
1138 		BUG();
1139 	}
1140 
1141 	if (err) {
1142 		pr_err("can't register interrupt %d (%d)\n", ppi, err);
1143 		goto out_free;
1144 	}
1145 
1146 	err = arch_timer_cpu_pm_init();
1147 	if (err)
1148 		goto out_unreg_notify;
1149 
1150 	/* Register and immediately configure the timer on the boot CPU */
1151 	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1152 				"clockevents/arm/arch_timer:starting",
1153 				arch_timer_starting_cpu, arch_timer_dying_cpu);
1154 	if (err)
1155 		goto out_unreg_cpupm;
1156 	return 0;
1157 
1158 out_unreg_cpupm:
1159 	arch_timer_cpu_pm_deinit();
1160 
1161 out_unreg_notify:
1162 	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1163 	if (arch_timer_has_nonsecure_ppi())
1164 		free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1165 				arch_timer_evt);
1166 
1167 out_free:
1168 	free_percpu(arch_timer_evt);
1169 out:
1170 	return err;
1171 }
1172 
arch_timer_mem_register(void __iomem * base,unsigned int irq)1173 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1174 {
1175 	int ret;
1176 	irq_handler_t func;
1177 	struct arch_timer *t;
1178 
1179 	t = kzalloc(sizeof(*t), GFP_KERNEL);
1180 	if (!t)
1181 		return -ENOMEM;
1182 
1183 	t->base = base;
1184 	t->evt.irq = irq;
1185 	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1186 
1187 	if (arch_timer_mem_use_virtual)
1188 		func = arch_timer_handler_virt_mem;
1189 	else
1190 		func = arch_timer_handler_phys_mem;
1191 
1192 	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1193 	if (ret) {
1194 		pr_err("Failed to request mem timer irq\n");
1195 		kfree(t);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static const struct of_device_id arch_timer_of_match[] __initconst = {
1202 	{ .compatible   = "arm,armv7-timer",    },
1203 	{ .compatible   = "arm,armv8-timer",    },
1204 	{},
1205 };
1206 
1207 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1208 	{ .compatible   = "arm,armv7-timer-mem", },
1209 	{},
1210 };
1211 
arch_timer_needs_of_probing(void)1212 static bool __init arch_timer_needs_of_probing(void)
1213 {
1214 	struct device_node *dn;
1215 	bool needs_probing = false;
1216 	unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1217 
1218 	/* We have two timers, and both device-tree nodes are probed. */
1219 	if ((arch_timers_present & mask) == mask)
1220 		return false;
1221 
1222 	/*
1223 	 * Only one type of timer is probed,
1224 	 * check if we have another type of timer node in device-tree.
1225 	 */
1226 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1227 		dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1228 	else
1229 		dn = of_find_matching_node(NULL, arch_timer_of_match);
1230 
1231 	if (dn && of_device_is_available(dn))
1232 		needs_probing = true;
1233 
1234 	of_node_put(dn);
1235 
1236 	return needs_probing;
1237 }
1238 
arch_timer_common_init(void)1239 static int __init arch_timer_common_init(void)
1240 {
1241 	arch_timer_banner(arch_timers_present);
1242 	arch_counter_register(arch_timers_present);
1243 	return arch_timer_arch_init();
1244 }
1245 
1246 /**
1247  * arch_timer_select_ppi() - Select suitable PPI for the current system.
1248  *
1249  * If HYP mode is available, we know that the physical timer
1250  * has been configured to be accessible from PL1. Use it, so
1251  * that a guest can use the virtual timer instead.
1252  *
1253  * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1254  * accesses to CNTP_*_EL1 registers are silently redirected to
1255  * their CNTHP_*_EL2 counterparts, and use a different PPI
1256  * number.
1257  *
1258  * If no interrupt provided for virtual timer, we'll have to
1259  * stick to the physical timer. It'd better be accessible...
1260  * For arm64 we never use the secure interrupt.
1261  *
1262  * Return: a suitable PPI type for the current system.
1263  */
arch_timer_select_ppi(void)1264 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1265 {
1266 	if (is_kernel_in_hyp_mode())
1267 		return ARCH_TIMER_HYP_PPI;
1268 
1269 	if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1270 		return ARCH_TIMER_VIRT_PPI;
1271 
1272 	if (IS_ENABLED(CONFIG_ARM64))
1273 		return ARCH_TIMER_PHYS_NONSECURE_PPI;
1274 
1275 	return ARCH_TIMER_PHYS_SECURE_PPI;
1276 }
1277 
arch_timer_populate_kvm_info(void)1278 static void __init arch_timer_populate_kvm_info(void)
1279 {
1280 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1281 	if (is_kernel_in_hyp_mode())
1282 		arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1283 }
1284 
arch_timer_of_init(struct device_node * np)1285 static int __init arch_timer_of_init(struct device_node *np)
1286 {
1287 	int i, ret;
1288 	u32 rate;
1289 
1290 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1291 		pr_warn("multiple nodes in dt, skipping\n");
1292 		return 0;
1293 	}
1294 
1295 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1296 	for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1297 		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1298 
1299 	arch_timer_populate_kvm_info();
1300 
1301 	rate = arch_timer_get_cntfrq();
1302 	arch_timer_of_configure_rate(rate, np);
1303 
1304 	arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1305 
1306 	/* Check for globally applicable workarounds */
1307 	arch_timer_check_ool_workaround(ate_match_dt, np);
1308 
1309 	/*
1310 	 * If we cannot rely on firmware initializing the timer registers then
1311 	 * we should use the physical timers instead.
1312 	 */
1313 	if (IS_ENABLED(CONFIG_ARM) &&
1314 	    of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1315 		arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1316 	else
1317 		arch_timer_uses_ppi = arch_timer_select_ppi();
1318 
1319 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1320 		pr_err("No interrupt available, giving up\n");
1321 		return -EINVAL;
1322 	}
1323 
1324 	/* On some systems, the counter stops ticking when in suspend. */
1325 	arch_counter_suspend_stop = of_property_read_bool(np,
1326 							 "arm,no-tick-in-suspend");
1327 
1328 	ret = arch_timer_register();
1329 	if (ret)
1330 		return ret;
1331 
1332 	if (arch_timer_needs_of_probing())
1333 		return 0;
1334 
1335 	return arch_timer_common_init();
1336 }
1337 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1338 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1339 
1340 static u32 __init
arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame * frame)1341 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1342 {
1343 	void __iomem *base;
1344 	u32 rate;
1345 
1346 	base = ioremap(frame->cntbase, frame->size);
1347 	if (!base) {
1348 		pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1349 		return 0;
1350 	}
1351 
1352 	rate = readl_relaxed(base + CNTFRQ);
1353 
1354 	iounmap(base);
1355 
1356 	return rate;
1357 }
1358 
1359 static struct arch_timer_mem_frame * __init
arch_timer_mem_find_best_frame(struct arch_timer_mem * timer_mem)1360 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1361 {
1362 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1363 	void __iomem *cntctlbase;
1364 	u32 cnttidr;
1365 	int i;
1366 
1367 	cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1368 	if (!cntctlbase) {
1369 		pr_err("Can't map CNTCTLBase @ %pa\n",
1370 			&timer_mem->cntctlbase);
1371 		return NULL;
1372 	}
1373 
1374 	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1375 
1376 	/*
1377 	 * Try to find a virtual capable frame. Otherwise fall back to a
1378 	 * physical capable frame.
1379 	 */
1380 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1381 		u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1382 			     CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1383 
1384 		frame = &timer_mem->frame[i];
1385 		if (!frame->valid)
1386 			continue;
1387 
1388 		/* Try enabling everything, and see what sticks */
1389 		writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1390 		cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1391 
1392 		if ((cnttidr & CNTTIDR_VIRT(i)) &&
1393 		    !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1394 			best_frame = frame;
1395 			arch_timer_mem_use_virtual = true;
1396 			break;
1397 		}
1398 
1399 		if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1400 			continue;
1401 
1402 		best_frame = frame;
1403 	}
1404 
1405 	iounmap(cntctlbase);
1406 
1407 	return best_frame;
1408 }
1409 
1410 static int __init
arch_timer_mem_frame_register(struct arch_timer_mem_frame * frame)1411 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1412 {
1413 	void __iomem *base;
1414 	int ret, irq = 0;
1415 
1416 	if (arch_timer_mem_use_virtual)
1417 		irq = frame->virt_irq;
1418 	else
1419 		irq = frame->phys_irq;
1420 
1421 	if (!irq) {
1422 		pr_err("Frame missing %s irq.\n",
1423 		       arch_timer_mem_use_virtual ? "virt" : "phys");
1424 		return -EINVAL;
1425 	}
1426 
1427 	if (!request_mem_region(frame->cntbase, frame->size,
1428 				"arch_mem_timer"))
1429 		return -EBUSY;
1430 
1431 	base = ioremap(frame->cntbase, frame->size);
1432 	if (!base) {
1433 		pr_err("Can't map frame's registers\n");
1434 		return -ENXIO;
1435 	}
1436 
1437 	ret = arch_timer_mem_register(base, irq);
1438 	if (ret) {
1439 		iounmap(base);
1440 		return ret;
1441 	}
1442 
1443 	arch_counter_base = base;
1444 	arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1445 
1446 	return 0;
1447 }
1448 
arch_timer_mem_of_init(struct device_node * np)1449 static int __init arch_timer_mem_of_init(struct device_node *np)
1450 {
1451 	struct arch_timer_mem *timer_mem;
1452 	struct arch_timer_mem_frame *frame;
1453 	struct device_node *frame_node;
1454 	struct resource res;
1455 	int ret = -EINVAL;
1456 	u32 rate;
1457 
1458 	timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1459 	if (!timer_mem)
1460 		return -ENOMEM;
1461 
1462 	if (of_address_to_resource(np, 0, &res))
1463 		goto out;
1464 	timer_mem->cntctlbase = res.start;
1465 	timer_mem->size = resource_size(&res);
1466 
1467 	for_each_available_child_of_node(np, frame_node) {
1468 		u32 n;
1469 		struct arch_timer_mem_frame *frame;
1470 
1471 		if (of_property_read_u32(frame_node, "frame-number", &n)) {
1472 			pr_err(FW_BUG "Missing frame-number.\n");
1473 			of_node_put(frame_node);
1474 			goto out;
1475 		}
1476 		if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1477 			pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1478 			       ARCH_TIMER_MEM_MAX_FRAMES - 1);
1479 			of_node_put(frame_node);
1480 			goto out;
1481 		}
1482 		frame = &timer_mem->frame[n];
1483 
1484 		if (frame->valid) {
1485 			pr_err(FW_BUG "Duplicated frame-number.\n");
1486 			of_node_put(frame_node);
1487 			goto out;
1488 		}
1489 
1490 		if (of_address_to_resource(frame_node, 0, &res)) {
1491 			of_node_put(frame_node);
1492 			goto out;
1493 		}
1494 		frame->cntbase = res.start;
1495 		frame->size = resource_size(&res);
1496 
1497 		frame->virt_irq = irq_of_parse_and_map(frame_node,
1498 						       ARCH_TIMER_VIRT_SPI);
1499 		frame->phys_irq = irq_of_parse_and_map(frame_node,
1500 						       ARCH_TIMER_PHYS_SPI);
1501 
1502 		frame->valid = true;
1503 	}
1504 
1505 	frame = arch_timer_mem_find_best_frame(timer_mem);
1506 	if (!frame) {
1507 		pr_err("Unable to find a suitable frame in timer @ %pa\n",
1508 			&timer_mem->cntctlbase);
1509 		ret = -EINVAL;
1510 		goto out;
1511 	}
1512 
1513 	rate = arch_timer_mem_frame_get_cntfrq(frame);
1514 	arch_timer_of_configure_rate(rate, np);
1515 
1516 	ret = arch_timer_mem_frame_register(frame);
1517 	if (!ret && !arch_timer_needs_of_probing())
1518 		ret = arch_timer_common_init();
1519 out:
1520 	kfree(timer_mem);
1521 	return ret;
1522 }
1523 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1524 		       arch_timer_mem_of_init);
1525 
1526 #ifdef CONFIG_ACPI_GTDT
1527 static int __init
arch_timer_mem_verify_cntfrq(struct arch_timer_mem * timer_mem)1528 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1529 {
1530 	struct arch_timer_mem_frame *frame;
1531 	u32 rate;
1532 	int i;
1533 
1534 	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1535 		frame = &timer_mem->frame[i];
1536 
1537 		if (!frame->valid)
1538 			continue;
1539 
1540 		rate = arch_timer_mem_frame_get_cntfrq(frame);
1541 		if (rate == arch_timer_rate)
1542 			continue;
1543 
1544 		pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1545 			&frame->cntbase,
1546 			(unsigned long)rate, (unsigned long)arch_timer_rate);
1547 
1548 		return -EINVAL;
1549 	}
1550 
1551 	return 0;
1552 }
1553 
arch_timer_mem_acpi_init(int platform_timer_count)1554 static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1555 {
1556 	struct arch_timer_mem *timers, *timer;
1557 	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1558 	int timer_count, i, ret = 0;
1559 
1560 	timers = kcalloc(platform_timer_count, sizeof(*timers),
1561 			    GFP_KERNEL);
1562 	if (!timers)
1563 		return -ENOMEM;
1564 
1565 	ret = acpi_arch_timer_mem_init(timers, &timer_count);
1566 	if (ret || !timer_count)
1567 		goto out;
1568 
1569 	/*
1570 	 * While unlikely, it's theoretically possible that none of the frames
1571 	 * in a timer expose the combination of feature we want.
1572 	 */
1573 	for (i = 0; i < timer_count; i++) {
1574 		timer = &timers[i];
1575 
1576 		frame = arch_timer_mem_find_best_frame(timer);
1577 		if (!best_frame)
1578 			best_frame = frame;
1579 
1580 		ret = arch_timer_mem_verify_cntfrq(timer);
1581 		if (ret) {
1582 			pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1583 			goto out;
1584 		}
1585 
1586 		if (!best_frame) /* implies !frame */
1587 			/*
1588 			 * Only complain about missing suitable frames if we
1589 			 * haven't already found one in a previous iteration.
1590 			 */
1591 			pr_err("Unable to find a suitable frame in timer @ %pa\n",
1592 				&timer->cntctlbase);
1593 	}
1594 
1595 	if (best_frame)
1596 		ret = arch_timer_mem_frame_register(best_frame);
1597 out:
1598 	kfree(timers);
1599 	return ret;
1600 }
1601 
1602 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
arch_timer_acpi_init(struct acpi_table_header * table)1603 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1604 {
1605 	int ret, platform_timer_count;
1606 
1607 	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1608 		pr_warn("already initialized, skipping\n");
1609 		return -EINVAL;
1610 	}
1611 
1612 	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1613 
1614 	ret = acpi_gtdt_init(table, &platform_timer_count);
1615 	if (ret)
1616 		return ret;
1617 
1618 	arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1619 		acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1620 
1621 	arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1622 		acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1623 
1624 	arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1625 		acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1626 
1627 	arch_timer_populate_kvm_info();
1628 
1629 	/*
1630 	 * When probing via ACPI, we have no mechanism to override the sysreg
1631 	 * CNTFRQ value. This *must* be correct.
1632 	 */
1633 	arch_timer_rate = arch_timer_get_cntfrq();
1634 	ret = validate_timer_rate();
1635 	if (ret) {
1636 		pr_err(FW_BUG "frequency not available.\n");
1637 		return ret;
1638 	}
1639 
1640 	arch_timer_uses_ppi = arch_timer_select_ppi();
1641 	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1642 		pr_err("No interrupt available, giving up\n");
1643 		return -EINVAL;
1644 	}
1645 
1646 	/* Always-on capability */
1647 	arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1648 
1649 	/* Check for globally applicable workarounds */
1650 	arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1651 
1652 	ret = arch_timer_register();
1653 	if (ret)
1654 		return ret;
1655 
1656 	if (platform_timer_count &&
1657 	    arch_timer_mem_acpi_init(platform_timer_count))
1658 		pr_err("Failed to initialize memory-mapped timer.\n");
1659 
1660 	return arch_timer_common_init();
1661 }
1662 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1663 #endif
1664 
kvm_arch_ptp_get_crosststamp(u64 * cycle,struct timespec64 * ts,struct clocksource ** cs)1665 int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
1666 				 struct clocksource **cs)
1667 {
1668 	struct arm_smccc_res hvc_res;
1669 	u32 ptp_counter;
1670 	ktime_t ktime;
1671 
1672 	if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY))
1673 		return -EOPNOTSUPP;
1674 
1675 	if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
1676 		ptp_counter = KVM_PTP_VIRT_COUNTER;
1677 	else
1678 		ptp_counter = KVM_PTP_PHYS_COUNTER;
1679 
1680 	arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID,
1681 			     ptp_counter, &hvc_res);
1682 
1683 	if ((int)(hvc_res.a0) < 0)
1684 		return -EOPNOTSUPP;
1685 
1686 	ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1;
1687 	*ts = ktime_to_timespec64(ktime);
1688 	if (cycle)
1689 		*cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
1690 	if (cs)
1691 		*cs = &clocksource_counter;
1692 
1693 	return 0;
1694 }
1695 EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);
1696