1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 *
6 * All RISC-V systems have a timer attached to every hart. These timers can be
7 * read from the "time" and "timeh" CSRs, and can use the SBI to setup
8 * events.
9 */
10 #include <linux/clocksource.h>
11 #include <linux/clockchips.h>
12 #include <linux/cpu.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/sched_clock.h>
16 #include <asm/smp.h>
17 #include <asm/sbi.h>
18
riscv_clock_next_event(unsigned long delta,struct clock_event_device * ce)19 static int riscv_clock_next_event(unsigned long delta,
20 struct clock_event_device *ce)
21 {
22 csr_set(sie, SIE_STIE);
23 sbi_set_timer(get_cycles64() + delta);
24 return 0;
25 }
26
27 static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
28 .name = "riscv_timer_clockevent",
29 .features = CLOCK_EVT_FEAT_ONESHOT,
30 .rating = 100,
31 .set_next_event = riscv_clock_next_event,
32 };
33
34 /*
35 * It is guaranteed that all the timers across all the harts are synchronized
36 * within one tick of each other, so while this could technically go
37 * backwards when hopping between CPUs, practically it won't happen.
38 */
riscv_clocksource_rdtime(struct clocksource * cs)39 static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
40 {
41 return get_cycles64();
42 }
43
riscv_sched_clock(void)44 static u64 notrace riscv_sched_clock(void)
45 {
46 return get_cycles64();
47 }
48
49 static struct clocksource riscv_clocksource = {
50 .name = "riscv_clocksource",
51 .rating = 300,
52 .mask = CLOCKSOURCE_MASK(64),
53 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
54 .read = riscv_clocksource_rdtime,
55 };
56
riscv_timer_starting_cpu(unsigned int cpu)57 static int riscv_timer_starting_cpu(unsigned int cpu)
58 {
59 struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
60
61 ce->cpumask = cpumask_of(cpu);
62 clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
63
64 csr_set(sie, SIE_STIE);
65 return 0;
66 }
67
riscv_timer_dying_cpu(unsigned int cpu)68 static int riscv_timer_dying_cpu(unsigned int cpu)
69 {
70 csr_clear(sie, SIE_STIE);
71 return 0;
72 }
73
74 /* called directly from the low-level interrupt handler */
riscv_timer_interrupt(void)75 void riscv_timer_interrupt(void)
76 {
77 struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
78
79 csr_clear(sie, SIE_STIE);
80 evdev->event_handler(evdev);
81 }
82
riscv_timer_init_dt(struct device_node * n)83 static int __init riscv_timer_init_dt(struct device_node *n)
84 {
85 int cpuid, hartid, error;
86
87 hartid = riscv_of_processor_hartid(n);
88 if (hartid < 0) {
89 pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
90 n, hartid);
91 return hartid;
92 }
93
94 cpuid = riscv_hartid_to_cpuid(hartid);
95 if (cpuid < 0) {
96 pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
97 return cpuid;
98 }
99
100 if (cpuid != smp_processor_id())
101 return 0;
102
103 pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
104 __func__, cpuid, hartid);
105 error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
106 if (error) {
107 pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
108 error, cpuid);
109 return error;
110 }
111
112 sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
113
114 error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
115 "clockevents/riscv/timer:starting",
116 riscv_timer_starting_cpu, riscv_timer_dying_cpu);
117 if (error)
118 pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
119 error);
120 return error;
121 }
122
123 TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);
124