1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19
20 #include <kvm/arm_vgic.h>
21 #include <kvm/arm_arch_timer.h>
22
23 #include "trace.h"
24
25 static struct timecounter *timecounter;
26 static unsigned int host_vtimer_irq;
27 static unsigned int host_ptimer_irq;
28 static u32 host_vtimer_irq_flags;
29 static u32 host_ptimer_irq_flags;
30
31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32
33 static const struct kvm_irq_level default_ptimer_irq = {
34 .irq = 30,
35 .level = 1,
36 };
37
38 static const struct kvm_irq_level default_vtimer_irq = {
39 .irq = 27,
40 .level = 1,
41 };
42
43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45 struct arch_timer_context *timer_ctx);
46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48 struct arch_timer_context *timer,
49 enum kvm_arch_timer_regs treg,
50 u64 val);
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52 struct arch_timer_context *timer,
53 enum kvm_arch_timer_regs treg);
54
timer_get_ctl(struct arch_timer_context * ctxt)55 u32 timer_get_ctl(struct arch_timer_context *ctxt)
56 {
57 struct kvm_vcpu *vcpu = ctxt->vcpu;
58
59 switch(arch_timer_ctx_index(ctxt)) {
60 case TIMER_VTIMER:
61 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62 case TIMER_PTIMER:
63 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64 default:
65 WARN_ON(1);
66 return 0;
67 }
68 }
69
timer_get_cval(struct arch_timer_context * ctxt)70 u64 timer_get_cval(struct arch_timer_context *ctxt)
71 {
72 struct kvm_vcpu *vcpu = ctxt->vcpu;
73
74 switch(arch_timer_ctx_index(ctxt)) {
75 case TIMER_VTIMER:
76 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77 case TIMER_PTIMER:
78 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79 default:
80 WARN_ON(1);
81 return 0;
82 }
83 }
84
timer_get_offset(struct arch_timer_context * ctxt)85 static u64 timer_get_offset(struct arch_timer_context *ctxt)
86 {
87 struct kvm_vcpu *vcpu = ctxt->vcpu;
88
89 switch(arch_timer_ctx_index(ctxt)) {
90 case TIMER_VTIMER:
91 if (likely(!kvm_vm_is_protected(vcpu->kvm)))
92 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
93 fallthrough;
94 default:
95 return 0;
96 }
97 }
98
timer_set_ctl(struct arch_timer_context * ctxt,u32 ctl)99 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
100 {
101 struct kvm_vcpu *vcpu = ctxt->vcpu;
102
103 switch(arch_timer_ctx_index(ctxt)) {
104 case TIMER_VTIMER:
105 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
106 break;
107 case TIMER_PTIMER:
108 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
109 break;
110 default:
111 WARN_ON(1);
112 }
113 }
114
timer_set_cval(struct arch_timer_context * ctxt,u64 cval)115 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
116 {
117 struct kvm_vcpu *vcpu = ctxt->vcpu;
118
119 switch(arch_timer_ctx_index(ctxt)) {
120 case TIMER_VTIMER:
121 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
122 break;
123 case TIMER_PTIMER:
124 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
125 break;
126 default:
127 WARN_ON(1);
128 }
129 }
130
timer_set_offset(struct arch_timer_context * ctxt,u64 offset)131 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
132 {
133 struct kvm_vcpu *vcpu = ctxt->vcpu;
134
135 switch(arch_timer_ctx_index(ctxt)) {
136 case TIMER_VTIMER:
137 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
138 break;
139 default:
140 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
141 }
142 }
143
kvm_phys_timer_read(void)144 u64 kvm_phys_timer_read(void)
145 {
146 return timecounter->cc->read(timecounter->cc);
147 }
148
get_timer_map(struct kvm_vcpu * vcpu,struct timer_map * map)149 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
150 {
151 if (has_vhe()) {
152 map->direct_vtimer = vcpu_vtimer(vcpu);
153 map->direct_ptimer = vcpu_ptimer(vcpu);
154 map->emul_ptimer = NULL;
155 } else {
156 map->direct_vtimer = vcpu_vtimer(vcpu);
157 map->direct_ptimer = NULL;
158 map->emul_ptimer = vcpu_ptimer(vcpu);
159 }
160
161 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
162 }
163
userspace_irqchip(struct kvm * kvm)164 static inline bool userspace_irqchip(struct kvm *kvm)
165 {
166 return static_branch_unlikely(&userspace_irqchip_in_use) &&
167 unlikely(!irqchip_in_kernel(kvm));
168 }
169
soft_timer_start(struct hrtimer * hrt,u64 ns)170 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
171 {
172 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
173 HRTIMER_MODE_ABS_HARD);
174 }
175
soft_timer_cancel(struct hrtimer * hrt)176 static void soft_timer_cancel(struct hrtimer *hrt)
177 {
178 hrtimer_cancel(hrt);
179 }
180
kvm_arch_timer_handler(int irq,void * dev_id)181 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
182 {
183 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
184 struct arch_timer_context *ctx;
185 struct timer_map map;
186
187 /*
188 * We may see a timer interrupt after vcpu_put() has been called which
189 * sets the CPU's vcpu pointer to NULL, because even though the timer
190 * has been disabled in timer_save_state(), the hardware interrupt
191 * signal may not have been retired from the interrupt controller yet.
192 */
193 if (!vcpu)
194 return IRQ_HANDLED;
195
196 get_timer_map(vcpu, &map);
197
198 if (irq == host_vtimer_irq)
199 ctx = map.direct_vtimer;
200 else
201 ctx = map.direct_ptimer;
202
203 if (kvm_timer_should_fire(ctx))
204 kvm_timer_update_irq(vcpu, true, ctx);
205
206 if (userspace_irqchip(vcpu->kvm) &&
207 !static_branch_unlikely(&has_gic_active_state))
208 disable_percpu_irq(host_vtimer_irq);
209
210 return IRQ_HANDLED;
211 }
212
kvm_timer_compute_delta(struct arch_timer_context * timer_ctx)213 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
214 {
215 u64 cval, now;
216
217 cval = timer_get_cval(timer_ctx);
218 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
219
220 if (now < cval) {
221 u64 ns;
222
223 ns = cyclecounter_cyc2ns(timecounter->cc,
224 cval - now,
225 timecounter->mask,
226 &timecounter->frac);
227 return ns;
228 }
229
230 return 0;
231 }
232
kvm_timer_irq_can_fire(struct arch_timer_context * timer_ctx)233 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
234 {
235 WARN_ON(timer_ctx && timer_ctx->loaded);
236 return timer_ctx &&
237 ((timer_get_ctl(timer_ctx) &
238 (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
239 }
240
241 /*
242 * Returns the earliest expiration time in ns among guest timers.
243 * Note that it will return 0 if none of timers can fire.
244 */
kvm_timer_earliest_exp(struct kvm_vcpu * vcpu)245 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
246 {
247 u64 min_delta = ULLONG_MAX;
248 int i;
249
250 for (i = 0; i < NR_KVM_TIMERS; i++) {
251 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
252
253 WARN(ctx->loaded, "timer %d loaded\n", i);
254 if (kvm_timer_irq_can_fire(ctx))
255 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
256 }
257
258 /* If none of timers can fire, then return 0 */
259 if (min_delta == ULLONG_MAX)
260 return 0;
261
262 return min_delta;
263 }
264
kvm_bg_timer_expire(struct hrtimer * hrt)265 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
266 {
267 struct arch_timer_cpu *timer;
268 struct kvm_vcpu *vcpu;
269 u64 ns;
270
271 timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
272 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
273
274 /*
275 * Check that the timer has really expired from the guest's
276 * PoV (NTP on the host may have forced it to expire
277 * early). If we should have slept longer, restart it.
278 */
279 ns = kvm_timer_earliest_exp(vcpu);
280 if (unlikely(ns)) {
281 hrtimer_forward_now(hrt, ns_to_ktime(ns));
282 return HRTIMER_RESTART;
283 }
284
285 kvm_vcpu_wake_up(vcpu);
286 return HRTIMER_NORESTART;
287 }
288
kvm_hrtimer_expire(struct hrtimer * hrt)289 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
290 {
291 struct arch_timer_context *ctx;
292 struct kvm_vcpu *vcpu;
293 u64 ns;
294
295 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
296 vcpu = ctx->vcpu;
297
298 trace_kvm_timer_hrtimer_expire(ctx);
299
300 /*
301 * Check that the timer has really expired from the guest's
302 * PoV (NTP on the host may have forced it to expire
303 * early). If not ready, schedule for a later time.
304 */
305 ns = kvm_timer_compute_delta(ctx);
306 if (unlikely(ns)) {
307 hrtimer_forward_now(hrt, ns_to_ktime(ns));
308 return HRTIMER_RESTART;
309 }
310
311 kvm_timer_update_irq(vcpu, true, ctx);
312 return HRTIMER_NORESTART;
313 }
314
kvm_timer_should_fire(struct arch_timer_context * timer_ctx)315 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
316 {
317 enum kvm_arch_timers index;
318 u64 cval, now;
319
320 if (!timer_ctx)
321 return false;
322
323 index = arch_timer_ctx_index(timer_ctx);
324
325 if (timer_ctx->loaded) {
326 u32 cnt_ctl = 0;
327
328 switch (index) {
329 case TIMER_VTIMER:
330 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
331 break;
332 case TIMER_PTIMER:
333 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
334 break;
335 case NR_KVM_TIMERS:
336 /* GCC is braindead */
337 cnt_ctl = 0;
338 break;
339 }
340
341 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
342 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
343 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
344 }
345
346 if (!kvm_timer_irq_can_fire(timer_ctx))
347 return false;
348
349 cval = timer_get_cval(timer_ctx);
350 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
351
352 return cval <= now;
353 }
354
kvm_timer_is_pending(struct kvm_vcpu * vcpu)355 bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
356 {
357 struct timer_map map;
358
359 get_timer_map(vcpu, &map);
360
361 return kvm_timer_should_fire(map.direct_vtimer) ||
362 kvm_timer_should_fire(map.direct_ptimer) ||
363 kvm_timer_should_fire(map.emul_ptimer);
364 }
365
366 /*
367 * Reflect the timer output level into the kvm_run structure
368 */
kvm_timer_update_run(struct kvm_vcpu * vcpu)369 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
370 {
371 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
372 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
373 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
374
375 /* Populate the device bitmap with the timer states */
376 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
377 KVM_ARM_DEV_EL1_PTIMER);
378 if (kvm_timer_should_fire(vtimer))
379 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
380 if (kvm_timer_should_fire(ptimer))
381 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
382 }
383
kvm_timer_update_irq(struct kvm_vcpu * vcpu,bool new_level,struct arch_timer_context * timer_ctx)384 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
385 struct arch_timer_context *timer_ctx)
386 {
387 int ret;
388
389 timer_ctx->irq.level = new_level;
390 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
391 timer_ctx->irq.level);
392
393 if (!userspace_irqchip(vcpu->kvm)) {
394 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
395 timer_ctx->irq.irq,
396 timer_ctx->irq.level,
397 timer_ctx);
398 WARN_ON(ret);
399 }
400 }
401
402 /* Only called for a fully emulated timer */
timer_emulate(struct arch_timer_context * ctx)403 static void timer_emulate(struct arch_timer_context *ctx)
404 {
405 bool should_fire = kvm_timer_should_fire(ctx);
406
407 trace_kvm_timer_emulate(ctx, should_fire);
408
409 if (should_fire != ctx->irq.level) {
410 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
411 return;
412 }
413
414 /*
415 * If the timer can fire now, we don't need to have a soft timer
416 * scheduled for the future. If the timer cannot fire at all,
417 * then we also don't need a soft timer.
418 */
419 if (!kvm_timer_irq_can_fire(ctx)) {
420 soft_timer_cancel(&ctx->hrtimer);
421 return;
422 }
423
424 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
425 }
426
timer_save_state(struct arch_timer_context * ctx)427 static void timer_save_state(struct arch_timer_context *ctx)
428 {
429 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
430 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
431 unsigned long flags;
432
433 if (!timer->enabled)
434 return;
435
436 local_irq_save(flags);
437
438 if (!ctx->loaded)
439 goto out;
440
441 switch (index) {
442 case TIMER_VTIMER:
443 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
444 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
445
446 /* Disable the timer */
447 write_sysreg_el0(0, SYS_CNTV_CTL);
448 isb();
449
450 break;
451 case TIMER_PTIMER:
452 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
453 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
454
455 /* Disable the timer */
456 write_sysreg_el0(0, SYS_CNTP_CTL);
457 isb();
458
459 break;
460 case NR_KVM_TIMERS:
461 BUG();
462 }
463
464 trace_kvm_timer_save_state(ctx);
465
466 ctx->loaded = false;
467 out:
468 local_irq_restore(flags);
469 }
470
471 /*
472 * Schedule the background timer before calling kvm_vcpu_block, so that this
473 * thread is removed from its waitqueue and made runnable when there's a timer
474 * interrupt to handle.
475 */
kvm_timer_blocking(struct kvm_vcpu * vcpu)476 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
477 {
478 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
479 struct timer_map map;
480
481 get_timer_map(vcpu, &map);
482
483 /*
484 * If no timers are capable of raising interrupts (disabled or
485 * masked), then there's no more work for us to do.
486 */
487 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
488 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
489 !kvm_timer_irq_can_fire(map.emul_ptimer))
490 return;
491
492 /*
493 * At least one guest time will expire. Schedule a background timer.
494 * Set the earliest expiration time among the guest timers.
495 */
496 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
497 }
498
kvm_timer_unblocking(struct kvm_vcpu * vcpu)499 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
500 {
501 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
502
503 soft_timer_cancel(&timer->bg_timer);
504 }
505
timer_restore_state(struct arch_timer_context * ctx)506 static void timer_restore_state(struct arch_timer_context *ctx)
507 {
508 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
509 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
510 unsigned long flags;
511
512 if (!timer->enabled)
513 return;
514
515 local_irq_save(flags);
516
517 if (ctx->loaded)
518 goto out;
519
520 switch (index) {
521 case TIMER_VTIMER:
522 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
523 isb();
524 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
525 break;
526 case TIMER_PTIMER:
527 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
528 isb();
529 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
530 break;
531 case NR_KVM_TIMERS:
532 BUG();
533 }
534
535 trace_kvm_timer_restore_state(ctx);
536
537 ctx->loaded = true;
538 out:
539 local_irq_restore(flags);
540 }
541
set_cntvoff(u64 cntvoff)542 static void set_cntvoff(u64 cntvoff)
543 {
544 kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
545 }
546
set_timer_irq_phys_active(struct arch_timer_context * ctx,bool active)547 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
548 {
549 int r;
550 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
551 WARN_ON(r);
552 }
553
kvm_timer_vcpu_load_gic(struct arch_timer_context * ctx)554 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
555 {
556 struct kvm_vcpu *vcpu = ctx->vcpu;
557 bool phys_active = false;
558
559 /*
560 * Update the timer output so that it is likely to match the
561 * state we're about to restore. If the timer expires between
562 * this point and the register restoration, we'll take the
563 * interrupt anyway.
564 */
565 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
566
567 if (irqchip_in_kernel(vcpu->kvm))
568 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
569
570 phys_active |= ctx->irq.level;
571
572 set_timer_irq_phys_active(ctx, phys_active);
573 }
574
kvm_timer_vcpu_load_nogic(struct kvm_vcpu * vcpu)575 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
576 {
577 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
578
579 /*
580 * Update the timer output so that it is likely to match the
581 * state we're about to restore. If the timer expires between
582 * this point and the register restoration, we'll take the
583 * interrupt anyway.
584 */
585 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
586
587 /*
588 * When using a userspace irqchip with the architected timers and a
589 * host interrupt controller that doesn't support an active state, we
590 * must still prevent continuously exiting from the guest, and
591 * therefore mask the physical interrupt by disabling it on the host
592 * interrupt controller when the virtual level is high, such that the
593 * guest can make forward progress. Once we detect the output level
594 * being de-asserted, we unmask the interrupt again so that we exit
595 * from the guest when the timer fires.
596 */
597 if (vtimer->irq.level)
598 disable_percpu_irq(host_vtimer_irq);
599 else
600 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
601 }
602
kvm_timer_vcpu_load(struct kvm_vcpu * vcpu)603 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
604 {
605 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
606 struct timer_map map;
607
608 if (unlikely(!timer->enabled))
609 return;
610
611 get_timer_map(vcpu, &map);
612
613 if (static_branch_likely(&has_gic_active_state)) {
614 kvm_timer_vcpu_load_gic(map.direct_vtimer);
615 if (map.direct_ptimer)
616 kvm_timer_vcpu_load_gic(map.direct_ptimer);
617 } else {
618 kvm_timer_vcpu_load_nogic(vcpu);
619 }
620
621 set_cntvoff(timer_get_offset(map.direct_vtimer));
622
623 kvm_timer_unblocking(vcpu);
624
625 timer_restore_state(map.direct_vtimer);
626 if (map.direct_ptimer)
627 timer_restore_state(map.direct_ptimer);
628
629 if (map.emul_ptimer)
630 timer_emulate(map.emul_ptimer);
631 }
632
kvm_timer_should_notify_user(struct kvm_vcpu * vcpu)633 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
634 {
635 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
636 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
637 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
638 bool vlevel, plevel;
639
640 if (likely(irqchip_in_kernel(vcpu->kvm)))
641 return false;
642
643 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
644 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
645
646 return kvm_timer_should_fire(vtimer) != vlevel ||
647 kvm_timer_should_fire(ptimer) != plevel;
648 }
649
kvm_timer_vcpu_put(struct kvm_vcpu * vcpu)650 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
651 {
652 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
653 struct timer_map map;
654 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
655
656 if (unlikely(!timer->enabled))
657 return;
658
659 get_timer_map(vcpu, &map);
660
661 timer_save_state(map.direct_vtimer);
662 if (map.direct_ptimer)
663 timer_save_state(map.direct_ptimer);
664
665 /*
666 * Cancel soft timer emulation, because the only case where we
667 * need it after a vcpu_put is in the context of a sleeping VCPU, and
668 * in that case we already factor in the deadline for the physical
669 * timer when scheduling the bg_timer.
670 *
671 * In any case, we re-schedule the hrtimer for the physical timer when
672 * coming back to the VCPU thread in kvm_timer_vcpu_load().
673 */
674 if (map.emul_ptimer)
675 soft_timer_cancel(&map.emul_ptimer->hrtimer);
676
677 if (rcuwait_active(wait))
678 kvm_timer_blocking(vcpu);
679
680 /*
681 * The kernel may decide to run userspace after calling vcpu_put, so
682 * we reset cntvoff to 0 to ensure a consistent read between user
683 * accesses to the virtual counter and kernel access to the physical
684 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
685 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
686 */
687 set_cntvoff(0);
688 }
689
690 /*
691 * With a userspace irqchip we have to check if the guest de-asserted the
692 * timer and if so, unmask the timer irq signal on the host interrupt
693 * controller to ensure that we see future timer signals.
694 */
unmask_vtimer_irq_user(struct kvm_vcpu * vcpu)695 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
696 {
697 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
698
699 if (!kvm_timer_should_fire(vtimer)) {
700 kvm_timer_update_irq(vcpu, false, vtimer);
701 if (static_branch_likely(&has_gic_active_state))
702 set_timer_irq_phys_active(vtimer, false);
703 else
704 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
705 }
706 }
707
kvm_timer_sync_user(struct kvm_vcpu * vcpu)708 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
709 {
710 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
711
712 if (unlikely(!timer->enabled))
713 return;
714
715 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
716 unmask_vtimer_irq_user(vcpu);
717 }
718
kvm_timer_vcpu_reset(struct kvm_vcpu * vcpu)719 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
720 {
721 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
722 struct timer_map map;
723
724 get_timer_map(vcpu, &map);
725
726 /*
727 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
728 * and to 0 for ARMv7. We provide an implementation that always
729 * resets the timer to be disabled and unmasked and is compliant with
730 * the ARMv7 architecture.
731 */
732 timer_set_ctl(vcpu_vtimer(vcpu), 0);
733 timer_set_ctl(vcpu_ptimer(vcpu), 0);
734
735 if (timer->enabled) {
736 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
737 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
738
739 if (irqchip_in_kernel(vcpu->kvm)) {
740 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
741 if (map.direct_ptimer)
742 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
743 }
744 }
745
746 if (map.emul_ptimer)
747 soft_timer_cancel(&map.emul_ptimer->hrtimer);
748
749 return 0;
750 }
751
752 /* Make the updates of cntvoff for all vtimer contexts atomic */
update_vtimer_cntvoff(struct kvm_vcpu * vcpu,u64 cntvoff)753 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
754 {
755 int i;
756 struct kvm *kvm = vcpu->kvm;
757 struct kvm_vcpu *tmp;
758
759 if (unlikely(kvm_vm_is_protected(vcpu->kvm)))
760 cntvoff = 0;
761
762 mutex_lock(&kvm->lock);
763 kvm_for_each_vcpu(i, tmp, kvm)
764 timer_set_offset(vcpu_vtimer(tmp), cntvoff);
765
766 /*
767 * When called from the vcpu create path, the CPU being created is not
768 * included in the loop above, so we just set it here as well.
769 */
770 timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
771 mutex_unlock(&kvm->lock);
772 }
773
kvm_timer_vcpu_init(struct kvm_vcpu * vcpu)774 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
775 {
776 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
777 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
778 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
779
780 vtimer->vcpu = vcpu;
781 ptimer->vcpu = vcpu;
782
783 /* Synchronize cntvoff across all vtimers of a VM. */
784 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
785 timer_set_offset(ptimer, 0);
786
787 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
788 timer->bg_timer.function = kvm_bg_timer_expire;
789
790 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
791 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
792 vtimer->hrtimer.function = kvm_hrtimer_expire;
793 ptimer->hrtimer.function = kvm_hrtimer_expire;
794
795 vtimer->irq.irq = default_vtimer_irq.irq;
796 ptimer->irq.irq = default_ptimer_irq.irq;
797
798 vtimer->host_timer_irq = host_vtimer_irq;
799 ptimer->host_timer_irq = host_ptimer_irq;
800
801 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
802 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
803 }
804
kvm_timer_init_interrupt(void * info)805 static void kvm_timer_init_interrupt(void *info)
806 {
807 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
808 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
809 }
810
kvm_arm_timer_set_reg(struct kvm_vcpu * vcpu,u64 regid,u64 value)811 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
812 {
813 struct arch_timer_context *timer;
814
815 switch (regid) {
816 case KVM_REG_ARM_TIMER_CTL:
817 timer = vcpu_vtimer(vcpu);
818 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
819 break;
820 case KVM_REG_ARM_TIMER_CNT:
821 timer = vcpu_vtimer(vcpu);
822 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
823 break;
824 case KVM_REG_ARM_TIMER_CVAL:
825 timer = vcpu_vtimer(vcpu);
826 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
827 break;
828 case KVM_REG_ARM_PTIMER_CTL:
829 timer = vcpu_ptimer(vcpu);
830 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
831 break;
832 case KVM_REG_ARM_PTIMER_CVAL:
833 timer = vcpu_ptimer(vcpu);
834 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
835 break;
836
837 default:
838 return -1;
839 }
840
841 return 0;
842 }
843
read_timer_ctl(struct arch_timer_context * timer)844 static u64 read_timer_ctl(struct arch_timer_context *timer)
845 {
846 /*
847 * Set ISTATUS bit if it's expired.
848 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
849 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
850 * regardless of ENABLE bit for our implementation convenience.
851 */
852 u32 ctl = timer_get_ctl(timer);
853
854 if (!kvm_timer_compute_delta(timer))
855 ctl |= ARCH_TIMER_CTRL_IT_STAT;
856
857 return ctl;
858 }
859
kvm_arm_timer_get_reg(struct kvm_vcpu * vcpu,u64 regid)860 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
861 {
862 switch (regid) {
863 case KVM_REG_ARM_TIMER_CTL:
864 return kvm_arm_timer_read(vcpu,
865 vcpu_vtimer(vcpu), TIMER_REG_CTL);
866 case KVM_REG_ARM_TIMER_CNT:
867 return kvm_arm_timer_read(vcpu,
868 vcpu_vtimer(vcpu), TIMER_REG_CNT);
869 case KVM_REG_ARM_TIMER_CVAL:
870 return kvm_arm_timer_read(vcpu,
871 vcpu_vtimer(vcpu), TIMER_REG_CVAL);
872 case KVM_REG_ARM_PTIMER_CTL:
873 return kvm_arm_timer_read(vcpu,
874 vcpu_ptimer(vcpu), TIMER_REG_CTL);
875 case KVM_REG_ARM_PTIMER_CNT:
876 return kvm_arm_timer_read(vcpu,
877 vcpu_ptimer(vcpu), TIMER_REG_CNT);
878 case KVM_REG_ARM_PTIMER_CVAL:
879 return kvm_arm_timer_read(vcpu,
880 vcpu_ptimer(vcpu), TIMER_REG_CVAL);
881 }
882 return (u64)-1;
883 }
884
kvm_arm_timer_read(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg)885 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
886 struct arch_timer_context *timer,
887 enum kvm_arch_timer_regs treg)
888 {
889 u64 val;
890
891 switch (treg) {
892 case TIMER_REG_TVAL:
893 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
894 val = lower_32_bits(val);
895 break;
896
897 case TIMER_REG_CTL:
898 val = read_timer_ctl(timer);
899 break;
900
901 case TIMER_REG_CVAL:
902 val = timer_get_cval(timer);
903 break;
904
905 case TIMER_REG_CNT:
906 val = kvm_phys_timer_read() - timer_get_offset(timer);
907 break;
908
909 default:
910 BUG();
911 }
912
913 return val;
914 }
915
kvm_arm_timer_read_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg)916 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
917 enum kvm_arch_timers tmr,
918 enum kvm_arch_timer_regs treg)
919 {
920 u64 val;
921
922 preempt_disable();
923 kvm_timer_vcpu_put(vcpu);
924
925 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
926
927 kvm_timer_vcpu_load(vcpu);
928 preempt_enable();
929
930 return val;
931 }
932
kvm_arm_timer_write(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg,u64 val)933 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
934 struct arch_timer_context *timer,
935 enum kvm_arch_timer_regs treg,
936 u64 val)
937 {
938 switch (treg) {
939 case TIMER_REG_TVAL:
940 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
941 break;
942
943 case TIMER_REG_CTL:
944 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
945 break;
946
947 case TIMER_REG_CVAL:
948 timer_set_cval(timer, val);
949 break;
950
951 default:
952 BUG();
953 }
954 }
955
kvm_arm_timer_write_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg,u64 val)956 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
957 enum kvm_arch_timers tmr,
958 enum kvm_arch_timer_regs treg,
959 u64 val)
960 {
961 preempt_disable();
962 kvm_timer_vcpu_put(vcpu);
963
964 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
965
966 kvm_timer_vcpu_load(vcpu);
967 preempt_enable();
968 }
969
kvm_timer_starting_cpu(unsigned int cpu)970 static int kvm_timer_starting_cpu(unsigned int cpu)
971 {
972 kvm_timer_init_interrupt(NULL);
973 return 0;
974 }
975
kvm_timer_dying_cpu(unsigned int cpu)976 static int kvm_timer_dying_cpu(unsigned int cpu)
977 {
978 disable_percpu_irq(host_vtimer_irq);
979 return 0;
980 }
981
timer_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)982 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
983 {
984 if (vcpu)
985 irqd_set_forwarded_to_vcpu(d);
986 else
987 irqd_clr_forwarded_to_vcpu(d);
988
989 return 0;
990 }
991
timer_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)992 static int timer_irq_set_irqchip_state(struct irq_data *d,
993 enum irqchip_irq_state which, bool val)
994 {
995 if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
996 return irq_chip_set_parent_state(d, which, val);
997
998 if (val)
999 irq_chip_mask_parent(d);
1000 else
1001 irq_chip_unmask_parent(d);
1002
1003 return 0;
1004 }
1005
timer_irq_eoi(struct irq_data * d)1006 static void timer_irq_eoi(struct irq_data *d)
1007 {
1008 if (!irqd_is_forwarded_to_vcpu(d))
1009 irq_chip_eoi_parent(d);
1010 }
1011
timer_irq_ack(struct irq_data * d)1012 static void timer_irq_ack(struct irq_data *d)
1013 {
1014 d = d->parent_data;
1015 if (d->chip->irq_ack)
1016 d->chip->irq_ack(d);
1017 }
1018
1019 static struct irq_chip timer_chip = {
1020 .name = "KVM",
1021 .irq_ack = timer_irq_ack,
1022 .irq_mask = irq_chip_mask_parent,
1023 .irq_unmask = irq_chip_unmask_parent,
1024 .irq_eoi = timer_irq_eoi,
1025 .irq_set_type = irq_chip_set_type_parent,
1026 .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
1027 .irq_set_irqchip_state = timer_irq_set_irqchip_state,
1028 };
1029
timer_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)1030 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1031 unsigned int nr_irqs, void *arg)
1032 {
1033 irq_hw_number_t hwirq = (uintptr_t)arg;
1034
1035 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1036 &timer_chip, NULL);
1037 }
1038
timer_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1039 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1040 unsigned int nr_irqs)
1041 {
1042 }
1043
1044 static const struct irq_domain_ops timer_domain_ops = {
1045 .alloc = timer_irq_domain_alloc,
1046 .free = timer_irq_domain_free,
1047 };
1048
1049 static struct irq_ops arch_timer_irq_ops = {
1050 .get_input_level = kvm_arch_timer_get_input_level,
1051 };
1052
kvm_irq_fixup_flags(unsigned int virq,u32 * flags)1053 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1054 {
1055 *flags = irq_get_trigger_type(virq);
1056 if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1057 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1058 virq);
1059 *flags = IRQF_TRIGGER_LOW;
1060 }
1061 }
1062
kvm_irq_init(struct arch_timer_kvm_info * info)1063 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1064 {
1065 struct irq_domain *domain = NULL;
1066
1067 if (info->virtual_irq <= 0) {
1068 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1069 info->virtual_irq);
1070 return -ENODEV;
1071 }
1072
1073 host_vtimer_irq = info->virtual_irq;
1074 kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1075
1076 if (kvm_vgic_global_state.no_hw_deactivation) {
1077 struct fwnode_handle *fwnode;
1078 struct irq_data *data;
1079
1080 fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1081 if (!fwnode)
1082 return -ENOMEM;
1083
1084 /* Assume both vtimer and ptimer in the same parent */
1085 data = irq_get_irq_data(host_vtimer_irq);
1086 domain = irq_domain_create_hierarchy(data->domain, 0,
1087 NR_KVM_TIMERS, fwnode,
1088 &timer_domain_ops, NULL);
1089 if (!domain) {
1090 irq_domain_free_fwnode(fwnode);
1091 return -ENOMEM;
1092 }
1093
1094 arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1095 WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1096 (void *)TIMER_VTIMER));
1097 }
1098
1099 if (info->physical_irq > 0) {
1100 host_ptimer_irq = info->physical_irq;
1101 kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1102
1103 if (domain)
1104 WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1105 (void *)TIMER_PTIMER));
1106 }
1107
1108 return 0;
1109 }
1110
kvm_timer_hyp_init(bool has_gic)1111 int kvm_timer_hyp_init(bool has_gic)
1112 {
1113 struct arch_timer_kvm_info *info;
1114 int err;
1115
1116 info = arch_timer_get_kvm_info();
1117 timecounter = &info->timecounter;
1118
1119 if (!timecounter->cc) {
1120 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1121 return -ENODEV;
1122 }
1123
1124 err = kvm_irq_init(info);
1125 if (err)
1126 return err;
1127
1128 /* First, do the virtual EL1 timer irq */
1129
1130 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1131 "kvm guest vtimer", kvm_get_running_vcpus());
1132 if (err) {
1133 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1134 host_vtimer_irq, err);
1135 return err;
1136 }
1137
1138 if (has_gic) {
1139 err = irq_set_vcpu_affinity(host_vtimer_irq,
1140 kvm_get_running_vcpus());
1141 if (err) {
1142 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1143 goto out_free_irq;
1144 }
1145
1146 static_branch_enable(&has_gic_active_state);
1147 }
1148
1149 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1150
1151 /* Now let's do the physical EL1 timer irq */
1152
1153 if (info->physical_irq > 0) {
1154 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1155 "kvm guest ptimer", kvm_get_running_vcpus());
1156 if (err) {
1157 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1158 host_ptimer_irq, err);
1159 return err;
1160 }
1161
1162 if (has_gic) {
1163 err = irq_set_vcpu_affinity(host_ptimer_irq,
1164 kvm_get_running_vcpus());
1165 if (err) {
1166 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1167 goto out_free_irq;
1168 }
1169 }
1170
1171 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1172 } else if (has_vhe()) {
1173 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1174 info->physical_irq);
1175 err = -ENODEV;
1176 goto out_free_irq;
1177 }
1178
1179 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
1180 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
1181 kvm_timer_dying_cpu);
1182 return 0;
1183 out_free_irq:
1184 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1185 return err;
1186 }
1187
kvm_timer_vcpu_terminate(struct kvm_vcpu * vcpu)1188 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1189 {
1190 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1191
1192 soft_timer_cancel(&timer->bg_timer);
1193 }
1194
timer_irqs_are_valid(struct kvm_vcpu * vcpu)1195 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1196 {
1197 int vtimer_irq, ptimer_irq;
1198 int i, ret;
1199
1200 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1201 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1202 if (ret)
1203 return false;
1204
1205 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1206 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1207 if (ret)
1208 return false;
1209
1210 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1211 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1212 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1213 return false;
1214 }
1215
1216 return true;
1217 }
1218
kvm_arch_timer_get_input_level(int vintid)1219 bool kvm_arch_timer_get_input_level(int vintid)
1220 {
1221 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1222 struct arch_timer_context *timer;
1223
1224 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1225 timer = vcpu_vtimer(vcpu);
1226 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1227 timer = vcpu_ptimer(vcpu);
1228 else
1229 BUG();
1230
1231 return kvm_timer_should_fire(timer);
1232 }
1233
kvm_timer_enable(struct kvm_vcpu * vcpu)1234 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1235 {
1236 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1237 struct timer_map map;
1238 int ret;
1239
1240 if (timer->enabled)
1241 return 0;
1242
1243 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1244 if (!irqchip_in_kernel(vcpu->kvm))
1245 goto no_vgic;
1246
1247 /*
1248 * At this stage, we have the guarantee that the vgic is both
1249 * available and initialized.
1250 */
1251 if (!timer_irqs_are_valid(vcpu)) {
1252 kvm_debug("incorrectly configured timer irqs\n");
1253 return -EINVAL;
1254 }
1255
1256 get_timer_map(vcpu, &map);
1257
1258 ret = kvm_vgic_map_phys_irq(vcpu,
1259 map.direct_vtimer->host_timer_irq,
1260 map.direct_vtimer->irq.irq,
1261 &arch_timer_irq_ops);
1262 if (ret)
1263 return ret;
1264
1265 if (map.direct_ptimer) {
1266 ret = kvm_vgic_map_phys_irq(vcpu,
1267 map.direct_ptimer->host_timer_irq,
1268 map.direct_ptimer->irq.irq,
1269 &arch_timer_irq_ops);
1270 }
1271
1272 if (ret)
1273 return ret;
1274
1275 no_vgic:
1276 timer->enabled = 1;
1277 return 0;
1278 }
1279
1280 /*
1281 * On VHE system, we only need to configure the EL2 timer trap register once,
1282 * not for every world switch.
1283 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1284 * and this makes those bits have no effect for the host kernel execution.
1285 */
kvm_timer_init_vhe(void)1286 void kvm_timer_init_vhe(void)
1287 {
1288 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1289 u32 cnthctl_shift = 10;
1290 u64 val;
1291
1292 /*
1293 * VHE systems allow the guest direct access to the EL1 physical
1294 * timer/counter.
1295 */
1296 val = read_sysreg(cnthctl_el2);
1297 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1298 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1299 write_sysreg(val, cnthctl_el2);
1300 }
1301
set_timer_irqs(struct kvm * kvm,int vtimer_irq,int ptimer_irq)1302 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1303 {
1304 struct kvm_vcpu *vcpu;
1305 int i;
1306
1307 kvm_for_each_vcpu(i, vcpu, kvm) {
1308 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1309 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1310 }
1311 }
1312
kvm_arm_timer_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1313 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1314 {
1315 int __user *uaddr = (int __user *)(long)attr->addr;
1316 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1317 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1318 int irq;
1319
1320 if (!irqchip_in_kernel(vcpu->kvm))
1321 return -EINVAL;
1322
1323 if (get_user(irq, uaddr))
1324 return -EFAULT;
1325
1326 if (!(irq_is_ppi(irq)))
1327 return -EINVAL;
1328
1329 if (vcpu->arch.timer_cpu.enabled)
1330 return -EBUSY;
1331
1332 switch (attr->attr) {
1333 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1334 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1335 break;
1336 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1337 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1338 break;
1339 default:
1340 return -ENXIO;
1341 }
1342
1343 return 0;
1344 }
1345
kvm_arm_timer_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1346 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1347 {
1348 int __user *uaddr = (int __user *)(long)attr->addr;
1349 struct arch_timer_context *timer;
1350 int irq;
1351
1352 switch (attr->attr) {
1353 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1354 timer = vcpu_vtimer(vcpu);
1355 break;
1356 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1357 timer = vcpu_ptimer(vcpu);
1358 break;
1359 default:
1360 return -ENXIO;
1361 }
1362
1363 irq = timer->irq.irq;
1364 return put_user(irq, uaddr);
1365 }
1366
kvm_arm_timer_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1367 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1368 {
1369 switch (attr->attr) {
1370 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1371 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1372 return 0;
1373 }
1374
1375 return -ENXIO;
1376 }
1377