1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19
20 #include <kvm/arm_vgic.h>
21 #include <kvm/arm_arch_timer.h>
22
23 #include "trace.h"
24
25 static struct timecounter *timecounter;
26 static unsigned int host_vtimer_irq;
27 static unsigned int host_ptimer_irq;
28 static u32 host_vtimer_irq_flags;
29 static u32 host_ptimer_irq_flags;
30
31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32
33 static const struct kvm_irq_level default_ptimer_irq = {
34 .irq = 30,
35 .level = 1,
36 };
37
38 static const struct kvm_irq_level default_vtimer_irq = {
39 .irq = 27,
40 .level = 1,
41 };
42
43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45 struct arch_timer_context *timer_ctx);
46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48 struct arch_timer_context *timer,
49 enum kvm_arch_timer_regs treg,
50 u64 val);
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52 struct arch_timer_context *timer,
53 enum kvm_arch_timer_regs treg);
54
timer_get_ctl(struct arch_timer_context * ctxt)55 u32 timer_get_ctl(struct arch_timer_context *ctxt)
56 {
57 struct kvm_vcpu *vcpu = ctxt->vcpu;
58
59 switch(arch_timer_ctx_index(ctxt)) {
60 case TIMER_VTIMER:
61 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62 case TIMER_PTIMER:
63 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64 default:
65 WARN_ON(1);
66 return 0;
67 }
68 }
69
timer_get_cval(struct arch_timer_context * ctxt)70 u64 timer_get_cval(struct arch_timer_context *ctxt)
71 {
72 struct kvm_vcpu *vcpu = ctxt->vcpu;
73
74 switch(arch_timer_ctx_index(ctxt)) {
75 case TIMER_VTIMER:
76 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77 case TIMER_PTIMER:
78 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79 default:
80 WARN_ON(1);
81 return 0;
82 }
83 }
84
timer_get_offset(struct arch_timer_context * ctxt)85 static u64 timer_get_offset(struct arch_timer_context *ctxt)
86 {
87 struct kvm_vcpu *vcpu = ctxt->vcpu;
88
89 switch(arch_timer_ctx_index(ctxt)) {
90 case TIMER_VTIMER:
91 if (likely(!kvm_vm_is_protected(vcpu->kvm)))
92 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
93 fallthrough;
94 default:
95 return 0;
96 }
97 }
98
timer_set_ctl(struct arch_timer_context * ctxt,u32 ctl)99 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
100 {
101 struct kvm_vcpu *vcpu = ctxt->vcpu;
102
103 switch(arch_timer_ctx_index(ctxt)) {
104 case TIMER_VTIMER:
105 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
106 break;
107 case TIMER_PTIMER:
108 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
109 break;
110 default:
111 WARN_ON(1);
112 }
113 }
114
timer_set_cval(struct arch_timer_context * ctxt,u64 cval)115 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
116 {
117 struct kvm_vcpu *vcpu = ctxt->vcpu;
118
119 switch(arch_timer_ctx_index(ctxt)) {
120 case TIMER_VTIMER:
121 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
122 break;
123 case TIMER_PTIMER:
124 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
125 break;
126 default:
127 WARN_ON(1);
128 }
129 }
130
timer_set_offset(struct arch_timer_context * ctxt,u64 offset)131 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
132 {
133 struct kvm_vcpu *vcpu = ctxt->vcpu;
134
135 switch(arch_timer_ctx_index(ctxt)) {
136 case TIMER_VTIMER:
137 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
138 break;
139 default:
140 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
141 }
142 }
143
kvm_phys_timer_read(void)144 u64 kvm_phys_timer_read(void)
145 {
146 return timecounter->cc->read(timecounter->cc);
147 }
148
get_timer_map(struct kvm_vcpu * vcpu,struct timer_map * map)149 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
150 {
151 if (has_vhe()) {
152 map->direct_vtimer = vcpu_vtimer(vcpu);
153 map->direct_ptimer = vcpu_ptimer(vcpu);
154 map->emul_ptimer = NULL;
155 } else {
156 map->direct_vtimer = vcpu_vtimer(vcpu);
157 map->direct_ptimer = NULL;
158 map->emul_ptimer = vcpu_ptimer(vcpu);
159 }
160
161 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
162 }
163
userspace_irqchip(struct kvm * kvm)164 static inline bool userspace_irqchip(struct kvm *kvm)
165 {
166 return static_branch_unlikely(&userspace_irqchip_in_use) &&
167 unlikely(!irqchip_in_kernel(kvm));
168 }
169
soft_timer_start(struct hrtimer * hrt,u64 ns)170 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
171 {
172 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
173 HRTIMER_MODE_ABS_HARD);
174 }
175
soft_timer_cancel(struct hrtimer * hrt)176 static void soft_timer_cancel(struct hrtimer *hrt)
177 {
178 hrtimer_cancel(hrt);
179 }
180
kvm_arch_timer_handler(int irq,void * dev_id)181 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
182 {
183 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
184 struct arch_timer_context *ctx;
185 struct timer_map map;
186
187 /*
188 * We may see a timer interrupt after vcpu_put() has been called which
189 * sets the CPU's vcpu pointer to NULL, because even though the timer
190 * has been disabled in timer_save_state(), the hardware interrupt
191 * signal may not have been retired from the interrupt controller yet.
192 */
193 if (!vcpu)
194 return IRQ_HANDLED;
195
196 get_timer_map(vcpu, &map);
197
198 if (irq == host_vtimer_irq)
199 ctx = map.direct_vtimer;
200 else
201 ctx = map.direct_ptimer;
202
203 if (kvm_timer_should_fire(ctx))
204 kvm_timer_update_irq(vcpu, true, ctx);
205
206 if (userspace_irqchip(vcpu->kvm) &&
207 !static_branch_unlikely(&has_gic_active_state))
208 disable_percpu_irq(host_vtimer_irq);
209
210 return IRQ_HANDLED;
211 }
212
kvm_counter_compute_delta(struct arch_timer_context * timer_ctx,u64 val)213 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
214 u64 val)
215 {
216 u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
217
218 if (now < val) {
219 u64 ns;
220
221 ns = cyclecounter_cyc2ns(timecounter->cc,
222 val - now,
223 timecounter->mask,
224 &timecounter->frac);
225 return ns;
226 }
227
228 return 0;
229 }
230
kvm_timer_compute_delta(struct arch_timer_context * timer_ctx)231 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
232 {
233 return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
234 }
235
kvm_timer_irq_can_fire(struct arch_timer_context * timer_ctx)236 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
237 {
238 WARN_ON(timer_ctx && timer_ctx->loaded);
239 return timer_ctx &&
240 ((timer_get_ctl(timer_ctx) &
241 (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
242 }
243
vcpu_has_wfit_active(struct kvm_vcpu * vcpu)244 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
245 {
246 return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
247 vcpu_get_flag(vcpu, IN_WFIT));
248 }
249
wfit_delay_ns(struct kvm_vcpu * vcpu)250 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
251 {
252 struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
253 u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
254
255 return kvm_counter_compute_delta(ctx, val);
256 }
257
258 /*
259 * Returns the earliest expiration time in ns among guest timers.
260 * Note that it will return 0 if none of timers can fire.
261 */
kvm_timer_earliest_exp(struct kvm_vcpu * vcpu)262 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
263 {
264 u64 min_delta = ULLONG_MAX;
265 int i;
266
267 for (i = 0; i < NR_KVM_TIMERS; i++) {
268 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
269
270 WARN(ctx->loaded, "timer %d loaded\n", i);
271 if (kvm_timer_irq_can_fire(ctx))
272 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
273 }
274
275 if (vcpu_has_wfit_active(vcpu))
276 min_delta = min(min_delta, wfit_delay_ns(vcpu));
277
278 /* If none of timers can fire, then return 0 */
279 if (min_delta == ULLONG_MAX)
280 return 0;
281
282 return min_delta;
283 }
284
kvm_bg_timer_expire(struct hrtimer * hrt)285 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
286 {
287 struct arch_timer_cpu *timer;
288 struct kvm_vcpu *vcpu;
289 u64 ns;
290
291 timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
292 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
293
294 /*
295 * Check that the timer has really expired from the guest's
296 * PoV (NTP on the host may have forced it to expire
297 * early). If we should have slept longer, restart it.
298 */
299 ns = kvm_timer_earliest_exp(vcpu);
300 if (unlikely(ns)) {
301 hrtimer_forward_now(hrt, ns_to_ktime(ns));
302 return HRTIMER_RESTART;
303 }
304
305 kvm_vcpu_wake_up(vcpu);
306 return HRTIMER_NORESTART;
307 }
308
kvm_hrtimer_expire(struct hrtimer * hrt)309 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
310 {
311 struct arch_timer_context *ctx;
312 struct kvm_vcpu *vcpu;
313 u64 ns;
314
315 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
316 vcpu = ctx->vcpu;
317
318 trace_kvm_timer_hrtimer_expire(ctx);
319
320 /*
321 * Check that the timer has really expired from the guest's
322 * PoV (NTP on the host may have forced it to expire
323 * early). If not ready, schedule for a later time.
324 */
325 ns = kvm_timer_compute_delta(ctx);
326 if (unlikely(ns)) {
327 hrtimer_forward_now(hrt, ns_to_ktime(ns));
328 return HRTIMER_RESTART;
329 }
330
331 kvm_timer_update_irq(vcpu, true, ctx);
332 return HRTIMER_NORESTART;
333 }
334
kvm_timer_should_fire(struct arch_timer_context * timer_ctx)335 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
336 {
337 enum kvm_arch_timers index;
338 u64 cval, now;
339
340 if (!timer_ctx)
341 return false;
342
343 index = arch_timer_ctx_index(timer_ctx);
344
345 if (timer_ctx->loaded) {
346 u32 cnt_ctl = 0;
347
348 switch (index) {
349 case TIMER_VTIMER:
350 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
351 break;
352 case TIMER_PTIMER:
353 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
354 break;
355 case NR_KVM_TIMERS:
356 /* GCC is braindead */
357 cnt_ctl = 0;
358 break;
359 }
360
361 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
362 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
363 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
364 }
365
366 if (!kvm_timer_irq_can_fire(timer_ctx))
367 return false;
368
369 cval = timer_get_cval(timer_ctx);
370 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
371
372 return cval <= now;
373 }
374
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)375 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
376 {
377 return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
378 }
379
380 /*
381 * Reflect the timer output level into the kvm_run structure
382 */
kvm_timer_update_run(struct kvm_vcpu * vcpu)383 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
384 {
385 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
386 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
387 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
388
389 /* Populate the device bitmap with the timer states */
390 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
391 KVM_ARM_DEV_EL1_PTIMER);
392 if (kvm_timer_should_fire(vtimer))
393 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
394 if (kvm_timer_should_fire(ptimer))
395 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
396 }
397
kvm_timer_update_irq(struct kvm_vcpu * vcpu,bool new_level,struct arch_timer_context * timer_ctx)398 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
399 struct arch_timer_context *timer_ctx)
400 {
401 int ret;
402
403 timer_ctx->irq.level = new_level;
404 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
405 timer_ctx->irq.level);
406
407 if (!userspace_irqchip(vcpu->kvm)) {
408 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
409 timer_ctx->irq.irq,
410 timer_ctx->irq.level,
411 timer_ctx);
412 WARN_ON(ret);
413 }
414 }
415
416 /* Only called for a fully emulated timer */
timer_emulate(struct arch_timer_context * ctx)417 static void timer_emulate(struct arch_timer_context *ctx)
418 {
419 bool should_fire = kvm_timer_should_fire(ctx);
420
421 trace_kvm_timer_emulate(ctx, should_fire);
422
423 if (should_fire != ctx->irq.level) {
424 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
425 return;
426 }
427
428 /*
429 * If the timer can fire now, we don't need to have a soft timer
430 * scheduled for the future. If the timer cannot fire at all,
431 * then we also don't need a soft timer.
432 */
433 if (!kvm_timer_irq_can_fire(ctx)) {
434 soft_timer_cancel(&ctx->hrtimer);
435 return;
436 }
437
438 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
439 }
440
timer_save_state(struct arch_timer_context * ctx)441 static void timer_save_state(struct arch_timer_context *ctx)
442 {
443 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
444 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
445 unsigned long flags;
446
447 if (!timer->enabled)
448 return;
449
450 local_irq_save(flags);
451
452 if (!ctx->loaded)
453 goto out;
454
455 switch (index) {
456 case TIMER_VTIMER:
457 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
458 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
459
460 /* Disable the timer */
461 write_sysreg_el0(0, SYS_CNTV_CTL);
462 isb();
463
464 break;
465 case TIMER_PTIMER:
466 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
467 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
468
469 /* Disable the timer */
470 write_sysreg_el0(0, SYS_CNTP_CTL);
471 isb();
472
473 break;
474 case NR_KVM_TIMERS:
475 BUG();
476 }
477
478 trace_kvm_timer_save_state(ctx);
479
480 ctx->loaded = false;
481 out:
482 local_irq_restore(flags);
483 }
484
485 /*
486 * Schedule the background timer before calling kvm_vcpu_block, so that this
487 * thread is removed from its waitqueue and made runnable when there's a timer
488 * interrupt to handle.
489 */
kvm_timer_blocking(struct kvm_vcpu * vcpu)490 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
491 {
492 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
493 struct timer_map map;
494
495 get_timer_map(vcpu, &map);
496
497 /*
498 * If no timers are capable of raising interrupts (disabled or
499 * masked), then there's no more work for us to do.
500 */
501 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
502 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
503 !kvm_timer_irq_can_fire(map.emul_ptimer) &&
504 !vcpu_has_wfit_active(vcpu))
505 return;
506
507 /*
508 * At least one guest time will expire. Schedule a background timer.
509 * Set the earliest expiration time among the guest timers.
510 */
511 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
512 }
513
kvm_timer_unblocking(struct kvm_vcpu * vcpu)514 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
515 {
516 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
517
518 soft_timer_cancel(&timer->bg_timer);
519 }
520
timer_restore_state(struct arch_timer_context * ctx)521 static void timer_restore_state(struct arch_timer_context *ctx)
522 {
523 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
524 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
525 unsigned long flags;
526
527 if (!timer->enabled)
528 return;
529
530 local_irq_save(flags);
531
532 if (ctx->loaded)
533 goto out;
534
535 switch (index) {
536 case TIMER_VTIMER:
537 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
538 isb();
539 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
540 break;
541 case TIMER_PTIMER:
542 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
543 isb();
544 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
545 break;
546 case NR_KVM_TIMERS:
547 BUG();
548 }
549
550 trace_kvm_timer_restore_state(ctx);
551
552 ctx->loaded = true;
553 out:
554 local_irq_restore(flags);
555 }
556
set_cntvoff(u64 cntvoff)557 static void set_cntvoff(u64 cntvoff)
558 {
559 kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
560 }
561
set_timer_irq_phys_active(struct arch_timer_context * ctx,bool active)562 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
563 {
564 int r;
565 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
566 WARN_ON(r);
567 }
568
kvm_timer_vcpu_load_gic(struct arch_timer_context * ctx)569 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
570 {
571 struct kvm_vcpu *vcpu = ctx->vcpu;
572 bool phys_active = false;
573
574 /*
575 * Update the timer output so that it is likely to match the
576 * state we're about to restore. If the timer expires between
577 * this point and the register restoration, we'll take the
578 * interrupt anyway.
579 */
580 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
581
582 if (irqchip_in_kernel(vcpu->kvm))
583 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
584
585 phys_active |= ctx->irq.level;
586
587 set_timer_irq_phys_active(ctx, phys_active);
588 }
589
kvm_timer_vcpu_load_nogic(struct kvm_vcpu * vcpu)590 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
591 {
592 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
593
594 /*
595 * Update the timer output so that it is likely to match the
596 * state we're about to restore. If the timer expires between
597 * this point and the register restoration, we'll take the
598 * interrupt anyway.
599 */
600 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
601
602 /*
603 * When using a userspace irqchip with the architected timers and a
604 * host interrupt controller that doesn't support an active state, we
605 * must still prevent continuously exiting from the guest, and
606 * therefore mask the physical interrupt by disabling it on the host
607 * interrupt controller when the virtual level is high, such that the
608 * guest can make forward progress. Once we detect the output level
609 * being de-asserted, we unmask the interrupt again so that we exit
610 * from the guest when the timer fires.
611 */
612 if (vtimer->irq.level)
613 disable_percpu_irq(host_vtimer_irq);
614 else
615 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
616 }
617
kvm_timer_vcpu_load(struct kvm_vcpu * vcpu)618 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
619 {
620 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
621 struct timer_map map;
622
623 if (unlikely(!timer->enabled))
624 return;
625
626 get_timer_map(vcpu, &map);
627
628 if (static_branch_likely(&has_gic_active_state)) {
629 kvm_timer_vcpu_load_gic(map.direct_vtimer);
630 if (map.direct_ptimer)
631 kvm_timer_vcpu_load_gic(map.direct_ptimer);
632 } else {
633 kvm_timer_vcpu_load_nogic(vcpu);
634 }
635
636 set_cntvoff(timer_get_offset(map.direct_vtimer));
637
638 kvm_timer_unblocking(vcpu);
639
640 timer_restore_state(map.direct_vtimer);
641 if (map.direct_ptimer)
642 timer_restore_state(map.direct_ptimer);
643
644 if (map.emul_ptimer)
645 timer_emulate(map.emul_ptimer);
646 }
647
kvm_timer_should_notify_user(struct kvm_vcpu * vcpu)648 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
649 {
650 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
651 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
652 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
653 bool vlevel, plevel;
654
655 if (likely(irqchip_in_kernel(vcpu->kvm)))
656 return false;
657
658 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
659 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
660
661 return kvm_timer_should_fire(vtimer) != vlevel ||
662 kvm_timer_should_fire(ptimer) != plevel;
663 }
664
kvm_timer_vcpu_put(struct kvm_vcpu * vcpu)665 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
666 {
667 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
668 struct timer_map map;
669 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
670
671 if (unlikely(!timer->enabled))
672 return;
673
674 get_timer_map(vcpu, &map);
675
676 timer_save_state(map.direct_vtimer);
677 if (map.direct_ptimer)
678 timer_save_state(map.direct_ptimer);
679
680 /*
681 * Cancel soft timer emulation, because the only case where we
682 * need it after a vcpu_put is in the context of a sleeping VCPU, and
683 * in that case we already factor in the deadline for the physical
684 * timer when scheduling the bg_timer.
685 *
686 * In any case, we re-schedule the hrtimer for the physical timer when
687 * coming back to the VCPU thread in kvm_timer_vcpu_load().
688 */
689 if (map.emul_ptimer)
690 soft_timer_cancel(&map.emul_ptimer->hrtimer);
691
692 if (rcuwait_active(wait))
693 kvm_timer_blocking(vcpu);
694
695 /*
696 * The kernel may decide to run userspace after calling vcpu_put, so
697 * we reset cntvoff to 0 to ensure a consistent read between user
698 * accesses to the virtual counter and kernel access to the physical
699 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
700 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
701 */
702 set_cntvoff(0);
703 }
704
705 /*
706 * With a userspace irqchip we have to check if the guest de-asserted the
707 * timer and if so, unmask the timer irq signal on the host interrupt
708 * controller to ensure that we see future timer signals.
709 */
unmask_vtimer_irq_user(struct kvm_vcpu * vcpu)710 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
711 {
712 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
713
714 if (!kvm_timer_should_fire(vtimer)) {
715 kvm_timer_update_irq(vcpu, false, vtimer);
716 if (static_branch_likely(&has_gic_active_state))
717 set_timer_irq_phys_active(vtimer, false);
718 else
719 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
720 }
721 }
722
kvm_timer_sync_user(struct kvm_vcpu * vcpu)723 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
724 {
725 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
726
727 if (unlikely(!timer->enabled))
728 return;
729
730 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
731 unmask_vtimer_irq_user(vcpu);
732 }
733
kvm_timer_vcpu_reset(struct kvm_vcpu * vcpu)734 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
735 {
736 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
737 struct timer_map map;
738
739 get_timer_map(vcpu, &map);
740
741 /*
742 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
743 * and to 0 for ARMv7. We provide an implementation that always
744 * resets the timer to be disabled and unmasked and is compliant with
745 * the ARMv7 architecture.
746 */
747 timer_set_ctl(vcpu_vtimer(vcpu), 0);
748 timer_set_ctl(vcpu_ptimer(vcpu), 0);
749
750 if (timer->enabled) {
751 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
752 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
753
754 if (irqchip_in_kernel(vcpu->kvm)) {
755 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
756 if (map.direct_ptimer)
757 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
758 }
759 }
760
761 if (map.emul_ptimer)
762 soft_timer_cancel(&map.emul_ptimer->hrtimer);
763
764 return 0;
765 }
766
767 /* Make the updates of cntvoff for all vtimer contexts atomic */
update_vtimer_cntvoff(struct kvm_vcpu * vcpu,u64 cntvoff)768 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
769 {
770 unsigned long i;
771 struct kvm *kvm = vcpu->kvm;
772 struct kvm_vcpu *tmp;
773
774 if (unlikely(kvm_vm_is_protected(vcpu->kvm)))
775 cntvoff = 0;
776
777 mutex_lock(&kvm->lock);
778 kvm_for_each_vcpu(i, tmp, kvm)
779 timer_set_offset(vcpu_vtimer(tmp), cntvoff);
780
781 /*
782 * When called from the vcpu create path, the CPU being created is not
783 * included in the loop above, so we just set it here as well.
784 */
785 timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
786 mutex_unlock(&kvm->lock);
787 }
788
kvm_timer_vcpu_init(struct kvm_vcpu * vcpu)789 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
790 {
791 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
792 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
793 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
794
795 vtimer->vcpu = vcpu;
796 ptimer->vcpu = vcpu;
797
798 /* Synchronize cntvoff across all vtimers of a VM. */
799 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
800 timer_set_offset(ptimer, 0);
801
802 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
803 timer->bg_timer.function = kvm_bg_timer_expire;
804
805 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
806 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
807 vtimer->hrtimer.function = kvm_hrtimer_expire;
808 ptimer->hrtimer.function = kvm_hrtimer_expire;
809
810 vtimer->irq.irq = default_vtimer_irq.irq;
811 ptimer->irq.irq = default_ptimer_irq.irq;
812
813 vtimer->host_timer_irq = host_vtimer_irq;
814 ptimer->host_timer_irq = host_ptimer_irq;
815
816 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
817 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
818 }
819
kvm_timer_init_interrupt(void * info)820 static void kvm_timer_init_interrupt(void *info)
821 {
822 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
823 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
824 }
825
kvm_arm_timer_set_reg(struct kvm_vcpu * vcpu,u64 regid,u64 value)826 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
827 {
828 struct arch_timer_context *timer;
829
830 switch (regid) {
831 case KVM_REG_ARM_TIMER_CTL:
832 timer = vcpu_vtimer(vcpu);
833 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
834 break;
835 case KVM_REG_ARM_TIMER_CNT:
836 timer = vcpu_vtimer(vcpu);
837 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
838 break;
839 case KVM_REG_ARM_TIMER_CVAL:
840 timer = vcpu_vtimer(vcpu);
841 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
842 break;
843 case KVM_REG_ARM_PTIMER_CTL:
844 timer = vcpu_ptimer(vcpu);
845 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
846 break;
847 case KVM_REG_ARM_PTIMER_CVAL:
848 timer = vcpu_ptimer(vcpu);
849 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
850 break;
851
852 default:
853 return -1;
854 }
855
856 return 0;
857 }
858
read_timer_ctl(struct arch_timer_context * timer)859 static u64 read_timer_ctl(struct arch_timer_context *timer)
860 {
861 /*
862 * Set ISTATUS bit if it's expired.
863 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
864 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
865 * regardless of ENABLE bit for our implementation convenience.
866 */
867 u32 ctl = timer_get_ctl(timer);
868
869 if (!kvm_timer_compute_delta(timer))
870 ctl |= ARCH_TIMER_CTRL_IT_STAT;
871
872 return ctl;
873 }
874
kvm_arm_timer_get_reg(struct kvm_vcpu * vcpu,u64 regid)875 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
876 {
877 switch (regid) {
878 case KVM_REG_ARM_TIMER_CTL:
879 return kvm_arm_timer_read(vcpu,
880 vcpu_vtimer(vcpu), TIMER_REG_CTL);
881 case KVM_REG_ARM_TIMER_CNT:
882 return kvm_arm_timer_read(vcpu,
883 vcpu_vtimer(vcpu), TIMER_REG_CNT);
884 case KVM_REG_ARM_TIMER_CVAL:
885 return kvm_arm_timer_read(vcpu,
886 vcpu_vtimer(vcpu), TIMER_REG_CVAL);
887 case KVM_REG_ARM_PTIMER_CTL:
888 return kvm_arm_timer_read(vcpu,
889 vcpu_ptimer(vcpu), TIMER_REG_CTL);
890 case KVM_REG_ARM_PTIMER_CNT:
891 return kvm_arm_timer_read(vcpu,
892 vcpu_ptimer(vcpu), TIMER_REG_CNT);
893 case KVM_REG_ARM_PTIMER_CVAL:
894 return kvm_arm_timer_read(vcpu,
895 vcpu_ptimer(vcpu), TIMER_REG_CVAL);
896 }
897 return (u64)-1;
898 }
899
kvm_arm_timer_read(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg)900 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
901 struct arch_timer_context *timer,
902 enum kvm_arch_timer_regs treg)
903 {
904 u64 val;
905
906 switch (treg) {
907 case TIMER_REG_TVAL:
908 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
909 val = lower_32_bits(val);
910 break;
911
912 case TIMER_REG_CTL:
913 val = read_timer_ctl(timer);
914 break;
915
916 case TIMER_REG_CVAL:
917 val = timer_get_cval(timer);
918 break;
919
920 case TIMER_REG_CNT:
921 val = kvm_phys_timer_read() - timer_get_offset(timer);
922 break;
923
924 default:
925 BUG();
926 }
927
928 return val;
929 }
930
kvm_arm_timer_read_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg)931 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
932 enum kvm_arch_timers tmr,
933 enum kvm_arch_timer_regs treg)
934 {
935 u64 val;
936
937 preempt_disable();
938 kvm_timer_vcpu_put(vcpu);
939
940 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
941
942 kvm_timer_vcpu_load(vcpu);
943 preempt_enable();
944
945 return val;
946 }
947
kvm_arm_timer_write(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg,u64 val)948 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
949 struct arch_timer_context *timer,
950 enum kvm_arch_timer_regs treg,
951 u64 val)
952 {
953 switch (treg) {
954 case TIMER_REG_TVAL:
955 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
956 break;
957
958 case TIMER_REG_CTL:
959 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
960 break;
961
962 case TIMER_REG_CVAL:
963 timer_set_cval(timer, val);
964 break;
965
966 default:
967 BUG();
968 }
969 }
970
kvm_arm_timer_write_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg,u64 val)971 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
972 enum kvm_arch_timers tmr,
973 enum kvm_arch_timer_regs treg,
974 u64 val)
975 {
976 preempt_disable();
977 kvm_timer_vcpu_put(vcpu);
978
979 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
980
981 kvm_timer_vcpu_load(vcpu);
982 preempt_enable();
983 }
984
kvm_timer_starting_cpu(unsigned int cpu)985 static int kvm_timer_starting_cpu(unsigned int cpu)
986 {
987 kvm_timer_init_interrupt(NULL);
988 return 0;
989 }
990
kvm_timer_dying_cpu(unsigned int cpu)991 static int kvm_timer_dying_cpu(unsigned int cpu)
992 {
993 disable_percpu_irq(host_vtimer_irq);
994 return 0;
995 }
996
timer_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)997 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
998 {
999 if (vcpu)
1000 irqd_set_forwarded_to_vcpu(d);
1001 else
1002 irqd_clr_forwarded_to_vcpu(d);
1003
1004 return 0;
1005 }
1006
timer_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)1007 static int timer_irq_set_irqchip_state(struct irq_data *d,
1008 enum irqchip_irq_state which, bool val)
1009 {
1010 if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
1011 return irq_chip_set_parent_state(d, which, val);
1012
1013 if (val)
1014 irq_chip_mask_parent(d);
1015 else
1016 irq_chip_unmask_parent(d);
1017
1018 return 0;
1019 }
1020
timer_irq_eoi(struct irq_data * d)1021 static void timer_irq_eoi(struct irq_data *d)
1022 {
1023 if (!irqd_is_forwarded_to_vcpu(d))
1024 irq_chip_eoi_parent(d);
1025 }
1026
timer_irq_ack(struct irq_data * d)1027 static void timer_irq_ack(struct irq_data *d)
1028 {
1029 d = d->parent_data;
1030 if (d->chip->irq_ack)
1031 d->chip->irq_ack(d);
1032 }
1033
1034 static struct irq_chip timer_chip = {
1035 .name = "KVM",
1036 .irq_ack = timer_irq_ack,
1037 .irq_mask = irq_chip_mask_parent,
1038 .irq_unmask = irq_chip_unmask_parent,
1039 .irq_eoi = timer_irq_eoi,
1040 .irq_set_type = irq_chip_set_type_parent,
1041 .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
1042 .irq_set_irqchip_state = timer_irq_set_irqchip_state,
1043 };
1044
timer_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)1045 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1046 unsigned int nr_irqs, void *arg)
1047 {
1048 irq_hw_number_t hwirq = (uintptr_t)arg;
1049
1050 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1051 &timer_chip, NULL);
1052 }
1053
timer_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1054 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1055 unsigned int nr_irqs)
1056 {
1057 }
1058
1059 static const struct irq_domain_ops timer_domain_ops = {
1060 .alloc = timer_irq_domain_alloc,
1061 .free = timer_irq_domain_free,
1062 };
1063
1064 static struct irq_ops arch_timer_irq_ops = {
1065 .get_input_level = kvm_arch_timer_get_input_level,
1066 };
1067
kvm_irq_fixup_flags(unsigned int virq,u32 * flags)1068 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1069 {
1070 *flags = irq_get_trigger_type(virq);
1071 if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1072 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1073 virq);
1074 *flags = IRQF_TRIGGER_LOW;
1075 }
1076 }
1077
kvm_irq_init(struct arch_timer_kvm_info * info)1078 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1079 {
1080 struct irq_domain *domain = NULL;
1081
1082 if (info->virtual_irq <= 0) {
1083 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1084 info->virtual_irq);
1085 return -ENODEV;
1086 }
1087
1088 host_vtimer_irq = info->virtual_irq;
1089 kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1090
1091 if (kvm_vgic_global_state.no_hw_deactivation) {
1092 struct fwnode_handle *fwnode;
1093 struct irq_data *data;
1094
1095 fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1096 if (!fwnode)
1097 return -ENOMEM;
1098
1099 /* Assume both vtimer and ptimer in the same parent */
1100 data = irq_get_irq_data(host_vtimer_irq);
1101 domain = irq_domain_create_hierarchy(data->domain, 0,
1102 NR_KVM_TIMERS, fwnode,
1103 &timer_domain_ops, NULL);
1104 if (!domain) {
1105 irq_domain_free_fwnode(fwnode);
1106 return -ENOMEM;
1107 }
1108
1109 arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1110 WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1111 (void *)TIMER_VTIMER));
1112 }
1113
1114 if (info->physical_irq > 0) {
1115 host_ptimer_irq = info->physical_irq;
1116 kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1117
1118 if (domain)
1119 WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1120 (void *)TIMER_PTIMER));
1121 }
1122
1123 return 0;
1124 }
1125
kvm_timer_hyp_init(bool has_gic)1126 int kvm_timer_hyp_init(bool has_gic)
1127 {
1128 struct arch_timer_kvm_info *info;
1129 int err;
1130
1131 info = arch_timer_get_kvm_info();
1132 timecounter = &info->timecounter;
1133
1134 if (!timecounter->cc) {
1135 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1136 return -ENODEV;
1137 }
1138
1139 err = kvm_irq_init(info);
1140 if (err)
1141 return err;
1142
1143 /* First, do the virtual EL1 timer irq */
1144
1145 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1146 "kvm guest vtimer", kvm_get_running_vcpus());
1147 if (err) {
1148 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1149 host_vtimer_irq, err);
1150 return err;
1151 }
1152
1153 if (has_gic) {
1154 err = irq_set_vcpu_affinity(host_vtimer_irq,
1155 kvm_get_running_vcpus());
1156 if (err) {
1157 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1158 goto out_free_irq;
1159 }
1160
1161 static_branch_enable(&has_gic_active_state);
1162 }
1163
1164 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1165
1166 /* Now let's do the physical EL1 timer irq */
1167
1168 if (info->physical_irq > 0) {
1169 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1170 "kvm guest ptimer", kvm_get_running_vcpus());
1171 if (err) {
1172 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1173 host_ptimer_irq, err);
1174 return err;
1175 }
1176
1177 if (has_gic) {
1178 err = irq_set_vcpu_affinity(host_ptimer_irq,
1179 kvm_get_running_vcpus());
1180 if (err) {
1181 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1182 goto out_free_irq;
1183 }
1184 }
1185
1186 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1187 } else if (has_vhe()) {
1188 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1189 info->physical_irq);
1190 err = -ENODEV;
1191 goto out_free_irq;
1192 }
1193
1194 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
1195 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
1196 kvm_timer_dying_cpu);
1197 return 0;
1198 out_free_irq:
1199 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1200 return err;
1201 }
1202
kvm_timer_vcpu_terminate(struct kvm_vcpu * vcpu)1203 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1204 {
1205 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1206
1207 soft_timer_cancel(&timer->bg_timer);
1208 }
1209
timer_irqs_are_valid(struct kvm_vcpu * vcpu)1210 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1211 {
1212 int vtimer_irq, ptimer_irq, ret;
1213 unsigned long i;
1214
1215 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1216 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1217 if (ret)
1218 return false;
1219
1220 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1221 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1222 if (ret)
1223 return false;
1224
1225 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1226 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1227 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1228 return false;
1229 }
1230
1231 return true;
1232 }
1233
kvm_arch_timer_get_input_level(int vintid)1234 bool kvm_arch_timer_get_input_level(int vintid)
1235 {
1236 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1237 struct arch_timer_context *timer;
1238
1239 if (WARN(!vcpu, "No vcpu context!\n"))
1240 return false;
1241
1242 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1243 timer = vcpu_vtimer(vcpu);
1244 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1245 timer = vcpu_ptimer(vcpu);
1246 else
1247 BUG();
1248
1249 return kvm_timer_should_fire(timer);
1250 }
1251
kvm_timer_enable(struct kvm_vcpu * vcpu)1252 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1253 {
1254 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1255 struct timer_map map;
1256 int ret;
1257
1258 if (timer->enabled)
1259 return 0;
1260
1261 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1262 if (!irqchip_in_kernel(vcpu->kvm))
1263 goto no_vgic;
1264
1265 /*
1266 * At this stage, we have the guarantee that the vgic is both
1267 * available and initialized.
1268 */
1269 if (!timer_irqs_are_valid(vcpu)) {
1270 kvm_debug("incorrectly configured timer irqs\n");
1271 return -EINVAL;
1272 }
1273
1274 get_timer_map(vcpu, &map);
1275
1276 ret = kvm_vgic_map_phys_irq(vcpu,
1277 map.direct_vtimer->host_timer_irq,
1278 map.direct_vtimer->irq.irq,
1279 &arch_timer_irq_ops);
1280 if (ret)
1281 return ret;
1282
1283 if (map.direct_ptimer) {
1284 ret = kvm_vgic_map_phys_irq(vcpu,
1285 map.direct_ptimer->host_timer_irq,
1286 map.direct_ptimer->irq.irq,
1287 &arch_timer_irq_ops);
1288 }
1289
1290 if (ret)
1291 return ret;
1292
1293 no_vgic:
1294 timer->enabled = 1;
1295 return 0;
1296 }
1297
1298 /*
1299 * On VHE system, we only need to configure the EL2 timer trap register once,
1300 * not for every world switch.
1301 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1302 * and this makes those bits have no effect for the host kernel execution.
1303 */
kvm_timer_init_vhe(void)1304 void kvm_timer_init_vhe(void)
1305 {
1306 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1307 u32 cnthctl_shift = 10;
1308 u64 val;
1309
1310 /*
1311 * VHE systems allow the guest direct access to the EL1 physical
1312 * timer/counter.
1313 */
1314 val = read_sysreg(cnthctl_el2);
1315 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1316 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1317 write_sysreg(val, cnthctl_el2);
1318 }
1319
set_timer_irqs(struct kvm * kvm,int vtimer_irq,int ptimer_irq)1320 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1321 {
1322 struct kvm_vcpu *vcpu;
1323 unsigned long i;
1324
1325 kvm_for_each_vcpu(i, vcpu, kvm) {
1326 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1327 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1328 }
1329 }
1330
kvm_arm_timer_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1331 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1332 {
1333 int __user *uaddr = (int __user *)(long)attr->addr;
1334 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1335 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1336 int irq;
1337
1338 if (!irqchip_in_kernel(vcpu->kvm))
1339 return -EINVAL;
1340
1341 if (get_user(irq, uaddr))
1342 return -EFAULT;
1343
1344 if (!(irq_is_ppi(irq)))
1345 return -EINVAL;
1346
1347 if (vcpu->arch.timer_cpu.enabled)
1348 return -EBUSY;
1349
1350 switch (attr->attr) {
1351 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1352 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1353 break;
1354 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1355 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1356 break;
1357 default:
1358 return -ENXIO;
1359 }
1360
1361 return 0;
1362 }
1363
kvm_arm_timer_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1364 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1365 {
1366 int __user *uaddr = (int __user *)(long)attr->addr;
1367 struct arch_timer_context *timer;
1368 int irq;
1369
1370 switch (attr->attr) {
1371 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1372 timer = vcpu_vtimer(vcpu);
1373 break;
1374 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1375 timer = vcpu_ptimer(vcpu);
1376 break;
1377 default:
1378 return -ENXIO;
1379 }
1380
1381 irq = timer->irq.irq;
1382 return put_user(irq, uaddr);
1383 }
1384
kvm_arm_timer_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1385 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1386 {
1387 switch (attr->attr) {
1388 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1389 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1390 return 0;
1391 }
1392
1393 return -ENXIO;
1394 }
1395