1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/errno.h>
7 #include <linux/err.h>
8 #include <linux/ktime.h>
9 #include <linux/kvm_host.h>
10 #include <linux/vmalloc.h>
11 #include <linux/fs.h>
12 #include <linux/random.h>
13 #include <asm/page.h>
14 #include <asm/cacheflush.h>
15 #include <asm/cacheops.h>
16 #include <asm/cpu-info.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlbflush.h>
19 #include <asm/inst.h>
20 #include "kvmcpu.h"
21 #include "trace.h"
22 #include "kvm_compat.h"
23
24 /*
25 * ktime_to_tick() - Scale ktime_t to a 64-bit stable timer.
26 *
27 * Caches the dynamic nanosecond bias in vcpu->arch.timer_dyn_bias.
28 */
ktime_to_tick(struct kvm_vcpu * vcpu,ktime_t now)29 static u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
30 {
31 s64 now_ns, periods;
32 u64 delta;
33
34 now_ns = ktime_to_ns(now);
35 delta = now_ns + vcpu->arch.timer_dyn_bias;
36
37 if (delta >= vcpu->arch.timer_period) {
38 /* If delta is out of safe range the bias needs adjusting */
39 periods = div64_s64(now_ns, vcpu->arch.timer_period);
40 vcpu->arch.timer_dyn_bias = -periods * vcpu->arch.timer_period;
41 /* Recalculate delta with new bias */
42 delta = now_ns + vcpu->arch.timer_dyn_bias;
43 }
44
45 /*
46 * We've ensured that:
47 * delta < timer_period
48 */
49 return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
50 }
51
52 /**
53 * kvm_resume_hrtimer() - Resume hrtimer, updating expiry.
54 * @vcpu: Virtual CPU.
55 * @now: ktime at point of resume.
56 * @stable_timer: stable timer at point of resume.
57 *
58 * Resumes the timer and updates the timer expiry based on @now and @count.
59 */
kvm_resume_hrtimer(struct kvm_vcpu * vcpu,ktime_t now,u64 stable_timer)60 static void kvm_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now, u64 stable_timer)
61 {
62 u64 delta;
63 ktime_t expire;
64
65 /* Stable timer decreased to zero or
66 * initialize to zero, set 4 second timer
67 */
68 delta = div_u64(stable_timer * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
69 expire = ktime_add_ns(now, delta);
70
71 /* Update hrtimer to use new timeout */
72 hrtimer_cancel(&vcpu->arch.swtimer);
73 hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
74 }
75
76 /**
77 * kvm_init_timer() - Initialise stable timer.
78 * @vcpu: Virtual CPU.
79 * @timer_hz: Frequency of timer.
80 *
81 * Initialise the timer to the specified frequency, zero it, and set it going if
82 * it's enabled.
83 */
kvm_init_timer(struct kvm_vcpu * vcpu,unsigned long timer_hz)84 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
85 {
86 ktime_t now;
87 unsigned long ticks;
88 struct loongarch_csrs *csr = vcpu->arch.csr;
89
90 vcpu->arch.timer_mhz = timer_hz >> 20;
91 vcpu->arch.timer_period = div_u64((u64)MNSEC_PER_SEC * IOCSR_TIMER_MASK, vcpu->arch.timer_mhz);
92 vcpu->arch.timer_dyn_bias = 0;
93
94 /* Starting at 0 */
95 ticks = 0;
96 now = ktime_get();
97 vcpu->arch.timer_bias = ticks - ktime_to_tick(vcpu, now);
98 vcpu->arch.timer_bias &= IOCSR_TIMER_MASK;
99
100 kvm_write_sw_gcsr(csr, KVM_CSR_TVAL, ticks);
101 }
102
103 /**
104 * kvm_count_timeout() - Push timer forward on timeout.
105 * @vcpu: Virtual CPU.
106 *
107 * Handle an hrtimer event by push the hrtimer forward a period.
108 *
109 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
110 */
kvm_count_timeout(struct kvm_vcpu * vcpu)111 enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
112 {
113 unsigned long timer_cfg;
114
115 /* Add the Count period to the current expiry time */
116 timer_cfg = kvm_read_sw_gcsr(vcpu->arch.csr, KVM_CSR_TCFG);
117 if (timer_cfg & KVM_TCFG_PERIOD) {
118 hrtimer_add_expires_ns(&vcpu->arch.swtimer, timer_cfg & KVM_TCFG_VAL);
119 return HRTIMER_RESTART;
120 } else
121 return HRTIMER_NORESTART;
122 }
123
124 /*
125 * kvm_restore_timer() - Restore timer state.
126 * @vcpu: Virtual CPU.
127 *
128 * Restore soft timer state from saved context.
129 */
kvm_restore_timer(struct kvm_vcpu * vcpu)130 void kvm_restore_timer(struct kvm_vcpu *vcpu)
131 {
132 struct loongarch_csrs *csr = vcpu->arch.csr;
133 ktime_t saved_ktime, now;
134 u64 stable_timer, new_timertick = 0;
135 u64 delta = 0;
136 int expired = 0;
137 unsigned long timer_cfg;
138
139 /*
140 * Set guest stable timer cfg csr
141 */
142 timer_cfg = kvm_read_sw_gcsr(csr, KVM_CSR_TCFG);
143 kvm_restore_hw_gcsr(csr, KVM_CSR_ESTAT);
144 if (!(timer_cfg & KVM_TCFG_EN)) {
145 kvm_restore_hw_gcsr(csr, KVM_CSR_TCFG);
146 kvm_restore_hw_gcsr(csr, KVM_CSR_TVAL);
147 return;
148 }
149
150 now = ktime_get();
151 saved_ktime = vcpu->arch.stable_ktime_saved;
152 stable_timer = kvm_read_sw_gcsr(csr, KVM_CSR_TVAL);
153
154 /*hrtimer not expire */
155 delta = ktime_to_tick(vcpu, ktime_sub(now, saved_ktime));
156 if (delta >= stable_timer)
157 expired = 1;
158
159 if (expired) {
160 if (timer_cfg & KVM_TCFG_PERIOD) {
161 new_timertick = (delta - stable_timer) % (timer_cfg & KVM_TCFG_VAL);
162 } else {
163 new_timertick = 1;
164 }
165 } else {
166 new_timertick = stable_timer - delta;
167 }
168
169 new_timertick &= KVM_TCFG_VAL;
170 kvm_write_gcsr_timercfg(timer_cfg);
171 kvm_write_gcsr_timertick(new_timertick);
172 if (expired)
173 _kvm_queue_irq(vcpu, LARCH_INT_TIMER);
174 }
175
176 /*
177 * kvm_acquire_timer() - Switch to hard timer state.
178 * @vcpu: Virtual CPU.
179 *
180 * Restore hard timer state on top of existing soft timer state if possible.
181 *
182 * Since hard timer won't remain active over preemption, preemption should be
183 * disabled by the caller.
184 */
kvm_acquire_timer(struct kvm_vcpu * vcpu)185 void kvm_acquire_timer(struct kvm_vcpu *vcpu)
186 {
187 unsigned long flags, guestcfg;
188
189 guestcfg = kvm_read_csr_gcfg();
190 if (!(guestcfg & KVM_GCFG_TIT))
191 return;
192
193 /* enable guest access to hard timer */
194 kvm_write_csr_gcfg(guestcfg & ~KVM_GCFG_TIT);
195
196 /*
197 * Freeze the soft-timer and sync the guest stable timer with it. We do
198 * this with interrupts disabled to avoid latency.
199 */
200 local_irq_save(flags);
201 hrtimer_cancel(&vcpu->arch.swtimer);
202 local_irq_restore(flags);
203 }
204
205
206 /*
207 * _kvm_save_timer() - Switch to software emulation of guest timer.
208 * @vcpu: Virtual CPU.
209 *
210 * Save guest timer state and switch to software emulation of guest
211 * timer. The hard timer must already be in use, so preemption should be
212 * disabled.
213 */
_kvm_save_timer(struct kvm_vcpu * vcpu,u64 * stable_timer)214 static ktime_t _kvm_save_timer(struct kvm_vcpu *vcpu, u64 *stable_timer)
215 {
216 u64 end_stable_timer;
217 ktime_t before_time;
218
219 before_time = ktime_get();
220
221 /*
222 * Record a final stable timer which we will transfer to the soft-timer.
223 */
224 end_stable_timer = kvm_read_gcsr_timertick();
225 *stable_timer = end_stable_timer;
226
227 kvm_resume_hrtimer(vcpu, before_time, end_stable_timer);
228 return before_time;
229 }
230
231 /*
232 * kvm_save_timer() - Save guest timer state.
233 * @vcpu: Virtual CPU.
234 *
235 * Save guest timer state and switch to soft guest timer if hard timer was in
236 * use.
237 */
kvm_save_timer(struct kvm_vcpu * vcpu)238 void kvm_save_timer(struct kvm_vcpu *vcpu)
239 {
240 struct loongarch_csrs *csr = vcpu->arch.csr;
241 unsigned long guestcfg;
242 u64 stable_timer = 0;
243 ktime_t save_ktime;
244
245 preempt_disable();
246 guestcfg = kvm_read_csr_gcfg();
247 if (!(guestcfg & KVM_GCFG_TIT)) {
248 /* disable guest use of hard timer */
249 kvm_write_csr_gcfg(guestcfg | KVM_GCFG_TIT);
250
251 /* save hard timer state */
252 kvm_save_hw_gcsr(csr, KVM_CSR_TCFG);
253 if (kvm_read_sw_gcsr(csr, KVM_CSR_TCFG) & KVM_TCFG_EN) {
254 save_ktime = _kvm_save_timer(vcpu, &stable_timer);
255 kvm_write_sw_gcsr(csr, KVM_CSR_TVAL, stable_timer);
256 vcpu->arch.stable_ktime_saved = save_ktime;
257 if (stable_timer == IOCSR_TIMER_MASK)
258 _kvm_queue_irq(vcpu, LARCH_INT_TIMER);
259 } else {
260 kvm_save_hw_gcsr(csr, KVM_CSR_TVAL);
261 }
262 }
263
264 /* save timer-related state to VCPU context */
265 kvm_save_hw_gcsr(csr, KVM_CSR_ESTAT);
266 preempt_enable();
267 }
268
kvm_reset_timer(struct kvm_vcpu * vcpu)269 void kvm_reset_timer(struct kvm_vcpu *vcpu)
270 {
271 kvm_write_gcsr_timercfg(0);
272 kvm_write_sw_gcsr(vcpu->arch.csr, KVM_CSR_TCFG, 0);
273 hrtimer_cancel(&vcpu->arch.swtimer);
274 }
275