1 /*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
16
17 #include <asm/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
21 #include <asm/xics.h>
22 #include <asm/debug.h>
23 #include <asm/time.h>
24
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27
28 #include "book3s_xics.h"
29
30 #if 1
31 #define XICS_DBG(fmt...) do { } while (0)
32 #else
33 #define XICS_DBG(fmt...) trace_printk(fmt)
34 #endif
35
36 #define ENABLE_REALMODE true
37 #define DEBUG_REALMODE false
38
39 /*
40 * LOCKING
41 * =======
42 *
43 * Each ICS has a spin lock protecting the information about the IRQ
44 * sources and avoiding simultaneous deliveries of the same interrupt.
45 *
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
48 */
49
50 /*
51 * TODO
52 * ====
53 *
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
55 * ICS
56 *
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
58 *
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
61 */
62
63 /* -- ICS routines -- */
64
65 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
66 u32 new_irq);
67
68 /*
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
71 * so just return 0.
72 */
ics_deliver_irq(struct kvmppc_xics * xics,u32 irq,u32 level)73 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
74 {
75 struct ics_irq_state *state;
76 struct kvmppc_ics *ics;
77 u16 src;
78
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
80
81 ics = kvmppc_xics_find_ics(xics, irq, &src);
82 if (!ics) {
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
84 return -EINVAL;
85 }
86 state = &ics->irq_state[src];
87 if (!state->exists)
88 return -EINVAL;
89
90 /*
91 * We set state->asserted locklessly. This should be fine as
92 * we are the only setter, thus concurrent access is undefined
93 * to begin with.
94 */
95 if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
96 state->asserted = 1;
97 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
98 state->asserted = 0;
99 return 0;
100 }
101
102 /* Attempt delivery */
103 icp_deliver_irq(xics, NULL, irq);
104
105 return 0;
106 }
107
ics_check_resend(struct kvmppc_xics * xics,struct kvmppc_ics * ics,struct kvmppc_icp * icp)108 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
109 struct kvmppc_icp *icp)
110 {
111 int i;
112
113 unsigned long flags;
114
115 local_irq_save(flags);
116 arch_spin_lock(&ics->lock);
117
118 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
119 struct ics_irq_state *state = &ics->irq_state[i];
120
121 if (!state->resend)
122 continue;
123
124 XICS_DBG("resend %#x prio %#x\n", state->number,
125 state->priority);
126
127 arch_spin_unlock(&ics->lock);
128 local_irq_restore(flags);
129 icp_deliver_irq(xics, icp, state->number);
130 local_irq_save(flags);
131 arch_spin_lock(&ics->lock);
132 }
133
134 arch_spin_unlock(&ics->lock);
135 local_irq_restore(flags);
136 }
137
write_xive(struct kvmppc_xics * xics,struct kvmppc_ics * ics,struct ics_irq_state * state,u32 server,u32 priority,u32 saved_priority)138 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
139 struct ics_irq_state *state,
140 u32 server, u32 priority, u32 saved_priority)
141 {
142 bool deliver;
143 unsigned long flags;
144
145 local_irq_save(flags);
146 arch_spin_lock(&ics->lock);
147
148 state->server = server;
149 state->priority = priority;
150 state->saved_priority = saved_priority;
151 deliver = false;
152 if ((state->masked_pending || state->resend) && priority != MASKED) {
153 state->masked_pending = 0;
154 deliver = true;
155 }
156
157 arch_spin_unlock(&ics->lock);
158 local_irq_restore(flags);
159
160 return deliver;
161 }
162
kvmppc_xics_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)163 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
164 {
165 struct kvmppc_xics *xics = kvm->arch.xics;
166 struct kvmppc_icp *icp;
167 struct kvmppc_ics *ics;
168 struct ics_irq_state *state;
169 u16 src;
170
171 if (!xics)
172 return -ENODEV;
173
174 ics = kvmppc_xics_find_ics(xics, irq, &src);
175 if (!ics)
176 return -EINVAL;
177 state = &ics->irq_state[src];
178
179 icp = kvmppc_xics_find_server(kvm, server);
180 if (!icp)
181 return -EINVAL;
182
183 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
184 irq, server, priority,
185 state->masked_pending, state->resend);
186
187 if (write_xive(xics, ics, state, server, priority, priority))
188 icp_deliver_irq(xics, icp, irq);
189
190 return 0;
191 }
192
kvmppc_xics_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)193 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
194 {
195 struct kvmppc_xics *xics = kvm->arch.xics;
196 struct kvmppc_ics *ics;
197 struct ics_irq_state *state;
198 u16 src;
199 unsigned long flags;
200
201 if (!xics)
202 return -ENODEV;
203
204 ics = kvmppc_xics_find_ics(xics, irq, &src);
205 if (!ics)
206 return -EINVAL;
207 state = &ics->irq_state[src];
208
209 local_irq_save(flags);
210 arch_spin_lock(&ics->lock);
211 *server = state->server;
212 *priority = state->priority;
213 arch_spin_unlock(&ics->lock);
214 local_irq_restore(flags);
215
216 return 0;
217 }
218
kvmppc_xics_int_on(struct kvm * kvm,u32 irq)219 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
220 {
221 struct kvmppc_xics *xics = kvm->arch.xics;
222 struct kvmppc_icp *icp;
223 struct kvmppc_ics *ics;
224 struct ics_irq_state *state;
225 u16 src;
226
227 if (!xics)
228 return -ENODEV;
229
230 ics = kvmppc_xics_find_ics(xics, irq, &src);
231 if (!ics)
232 return -EINVAL;
233 state = &ics->irq_state[src];
234
235 icp = kvmppc_xics_find_server(kvm, state->server);
236 if (!icp)
237 return -EINVAL;
238
239 if (write_xive(xics, ics, state, state->server, state->saved_priority,
240 state->saved_priority))
241 icp_deliver_irq(xics, icp, irq);
242
243 return 0;
244 }
245
kvmppc_xics_int_off(struct kvm * kvm,u32 irq)246 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
247 {
248 struct kvmppc_xics *xics = kvm->arch.xics;
249 struct kvmppc_ics *ics;
250 struct ics_irq_state *state;
251 u16 src;
252
253 if (!xics)
254 return -ENODEV;
255
256 ics = kvmppc_xics_find_ics(xics, irq, &src);
257 if (!ics)
258 return -EINVAL;
259 state = &ics->irq_state[src];
260
261 write_xive(xics, ics, state, state->server, MASKED, state->priority);
262
263 return 0;
264 }
265
266 /* -- ICP routines, including hcalls -- */
267
icp_try_update(struct kvmppc_icp * icp,union kvmppc_icp_state old,union kvmppc_icp_state new,bool change_self)268 static inline bool icp_try_update(struct kvmppc_icp *icp,
269 union kvmppc_icp_state old,
270 union kvmppc_icp_state new,
271 bool change_self)
272 {
273 bool success;
274
275 /* Calculate new output value */
276 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
277
278 /* Attempt atomic update */
279 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
280 if (!success)
281 goto bail;
282
283 XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
284 icp->server_num,
285 old.cppr, old.mfrr, old.pending_pri, old.xisr,
286 old.need_resend, old.out_ee);
287 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
288 new.cppr, new.mfrr, new.pending_pri, new.xisr,
289 new.need_resend, new.out_ee);
290 /*
291 * Check for output state update
292 *
293 * Note that this is racy since another processor could be updating
294 * the state already. This is why we never clear the interrupt output
295 * here, we only ever set it. The clear only happens prior to doing
296 * an update and only by the processor itself. Currently we do it
297 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
298 *
299 * We also do not try to figure out whether the EE state has changed,
300 * we unconditionally set it if the new state calls for it. The reason
301 * for that is that we opportunistically remove the pending interrupt
302 * flag when raising CPPR, so we need to set it back here if an
303 * interrupt is still pending.
304 */
305 if (new.out_ee) {
306 kvmppc_book3s_queue_irqprio(icp->vcpu,
307 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
308 if (!change_self)
309 kvmppc_fast_vcpu_kick(icp->vcpu);
310 }
311 bail:
312 return success;
313 }
314
icp_check_resend(struct kvmppc_xics * xics,struct kvmppc_icp * icp)315 static void icp_check_resend(struct kvmppc_xics *xics,
316 struct kvmppc_icp *icp)
317 {
318 u32 icsid;
319
320 /* Order this load with the test for need_resend in the caller */
321 smp_rmb();
322 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
323 struct kvmppc_ics *ics = xics->ics[icsid];
324
325 if (!test_and_clear_bit(icsid, icp->resend_map))
326 continue;
327 if (!ics)
328 continue;
329 ics_check_resend(xics, ics, icp);
330 }
331 }
332
icp_try_to_deliver(struct kvmppc_icp * icp,u32 irq,u8 priority,u32 * reject)333 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
334 u32 *reject)
335 {
336 union kvmppc_icp_state old_state, new_state;
337 bool success;
338
339 XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
340 icp->server_num);
341
342 do {
343 old_state = new_state = READ_ONCE(icp->state);
344
345 *reject = 0;
346
347 /* See if we can deliver */
348 success = new_state.cppr > priority &&
349 new_state.mfrr > priority &&
350 new_state.pending_pri > priority;
351
352 /*
353 * If we can, check for a rejection and perform the
354 * delivery
355 */
356 if (success) {
357 *reject = new_state.xisr;
358 new_state.xisr = irq;
359 new_state.pending_pri = priority;
360 } else {
361 /*
362 * If we failed to deliver we set need_resend
363 * so a subsequent CPPR state change causes us
364 * to try a new delivery.
365 */
366 new_state.need_resend = true;
367 }
368
369 } while (!icp_try_update(icp, old_state, new_state, false));
370
371 return success;
372 }
373
icp_deliver_irq(struct kvmppc_xics * xics,struct kvmppc_icp * icp,u32 new_irq)374 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
375 u32 new_irq)
376 {
377 struct ics_irq_state *state;
378 struct kvmppc_ics *ics;
379 u32 reject;
380 u16 src;
381 unsigned long flags;
382
383 /*
384 * This is used both for initial delivery of an interrupt and
385 * for subsequent rejection.
386 *
387 * Rejection can be racy vs. resends. We have evaluated the
388 * rejection in an atomic ICP transaction which is now complete,
389 * so potentially the ICP can already accept the interrupt again.
390 *
391 * So we need to retry the delivery. Essentially the reject path
392 * boils down to a failed delivery. Always.
393 *
394 * Now the interrupt could also have moved to a different target,
395 * thus we may need to re-do the ICP lookup as well
396 */
397
398 again:
399 /* Get the ICS state and lock it */
400 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
401 if (!ics) {
402 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
403 return;
404 }
405 state = &ics->irq_state[src];
406
407 /* Get a lock on the ICS */
408 local_irq_save(flags);
409 arch_spin_lock(&ics->lock);
410
411 /* Get our server */
412 if (!icp || state->server != icp->server_num) {
413 icp = kvmppc_xics_find_server(xics->kvm, state->server);
414 if (!icp) {
415 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
416 new_irq, state->server);
417 goto out;
418 }
419 }
420
421 /* Clear the resend bit of that interrupt */
422 state->resend = 0;
423
424 /*
425 * If masked, bail out
426 *
427 * Note: PAPR doesn't mention anything about masked pending
428 * when doing a resend, only when doing a delivery.
429 *
430 * However that would have the effect of losing a masked
431 * interrupt that was rejected and isn't consistent with
432 * the whole masked_pending business which is about not
433 * losing interrupts that occur while masked.
434 *
435 * I don't differenciate normal deliveries and resends, this
436 * implementation will differ from PAPR and not lose such
437 * interrupts.
438 */
439 if (state->priority == MASKED) {
440 XICS_DBG("irq %#x masked pending\n", new_irq);
441 state->masked_pending = 1;
442 goto out;
443 }
444
445 /*
446 * Try the delivery, this will set the need_resend flag
447 * in the ICP as part of the atomic transaction if the
448 * delivery is not possible.
449 *
450 * Note that if successful, the new delivery might have itself
451 * rejected an interrupt that was "delivered" before we took the
452 * ics spin lock.
453 *
454 * In this case we do the whole sequence all over again for the
455 * new guy. We cannot assume that the rejected interrupt is less
456 * favored than the new one, and thus doesn't need to be delivered,
457 * because by the time we exit icp_try_to_deliver() the target
458 * processor may well have alrady consumed & completed it, and thus
459 * the rejected interrupt might actually be already acceptable.
460 */
461 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
462 /*
463 * Delivery was successful, did we reject somebody else ?
464 */
465 if (reject && reject != XICS_IPI) {
466 arch_spin_unlock(&ics->lock);
467 local_irq_restore(flags);
468 new_irq = reject;
469 goto again;
470 }
471 } else {
472 /*
473 * We failed to deliver the interrupt we need to set the
474 * resend map bit and mark the ICS state as needing a resend
475 */
476 set_bit(ics->icsid, icp->resend_map);
477 state->resend = 1;
478
479 /*
480 * If the need_resend flag got cleared in the ICP some time
481 * between icp_try_to_deliver() atomic update and now, then
482 * we know it might have missed the resend_map bit. So we
483 * retry
484 */
485 smp_mb();
486 if (!icp->state.need_resend) {
487 arch_spin_unlock(&ics->lock);
488 local_irq_restore(flags);
489 goto again;
490 }
491 }
492 out:
493 arch_spin_unlock(&ics->lock);
494 local_irq_restore(flags);
495 }
496
icp_down_cppr(struct kvmppc_xics * xics,struct kvmppc_icp * icp,u8 new_cppr)497 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
498 u8 new_cppr)
499 {
500 union kvmppc_icp_state old_state, new_state;
501 bool resend;
502
503 /*
504 * This handles several related states in one operation:
505 *
506 * ICP State: Down_CPPR
507 *
508 * Load CPPR with new value and if the XISR is 0
509 * then check for resends:
510 *
511 * ICP State: Resend
512 *
513 * If MFRR is more favored than CPPR, check for IPIs
514 * and notify ICS of a potential resend. This is done
515 * asynchronously (when used in real mode, we will have
516 * to exit here).
517 *
518 * We do not handle the complete Check_IPI as documented
519 * here. In the PAPR, this state will be used for both
520 * Set_MFRR and Down_CPPR. However, we know that we aren't
521 * changing the MFRR state here so we don't need to handle
522 * the case of an MFRR causing a reject of a pending irq,
523 * this will have been handled when the MFRR was set in the
524 * first place.
525 *
526 * Thus we don't have to handle rejects, only resends.
527 *
528 * When implementing real mode for HV KVM, resend will lead to
529 * a H_TOO_HARD return and the whole transaction will be handled
530 * in virtual mode.
531 */
532 do {
533 old_state = new_state = READ_ONCE(icp->state);
534
535 /* Down_CPPR */
536 new_state.cppr = new_cppr;
537
538 /*
539 * Cut down Resend / Check_IPI / IPI
540 *
541 * The logic is that we cannot have a pending interrupt
542 * trumped by an IPI at this point (see above), so we
543 * know that either the pending interrupt is already an
544 * IPI (in which case we don't care to override it) or
545 * it's either more favored than us or non existent
546 */
547 if (new_state.mfrr < new_cppr &&
548 new_state.mfrr <= new_state.pending_pri) {
549 WARN_ON(new_state.xisr != XICS_IPI &&
550 new_state.xisr != 0);
551 new_state.pending_pri = new_state.mfrr;
552 new_state.xisr = XICS_IPI;
553 }
554
555 /* Latch/clear resend bit */
556 resend = new_state.need_resend;
557 new_state.need_resend = 0;
558
559 } while (!icp_try_update(icp, old_state, new_state, true));
560
561 /*
562 * Now handle resend checks. Those are asynchronous to the ICP
563 * state update in HW (ie bus transactions) so we can handle them
564 * separately here too
565 */
566 if (resend)
567 icp_check_resend(xics, icp);
568 }
569
kvmppc_h_xirr(struct kvm_vcpu * vcpu)570 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
571 {
572 union kvmppc_icp_state old_state, new_state;
573 struct kvmppc_icp *icp = vcpu->arch.icp;
574 u32 xirr;
575
576 /* First, remove EE from the processor */
577 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
578 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
579
580 /*
581 * ICP State: Accept_Interrupt
582 *
583 * Return the pending interrupt (if any) along with the
584 * current CPPR, then clear the XISR & set CPPR to the
585 * pending priority
586 */
587 do {
588 old_state = new_state = READ_ONCE(icp->state);
589
590 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
591 if (!old_state.xisr)
592 break;
593 new_state.cppr = new_state.pending_pri;
594 new_state.pending_pri = 0xff;
595 new_state.xisr = 0;
596
597 } while (!icp_try_update(icp, old_state, new_state, true));
598
599 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
600
601 return xirr;
602 }
603
kvmppc_h_ipi(struct kvm_vcpu * vcpu,unsigned long server,unsigned long mfrr)604 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
605 unsigned long mfrr)
606 {
607 union kvmppc_icp_state old_state, new_state;
608 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
609 struct kvmppc_icp *icp;
610 u32 reject;
611 bool resend;
612 bool local;
613
614 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
615 vcpu->vcpu_id, server, mfrr);
616
617 icp = vcpu->arch.icp;
618 local = icp->server_num == server;
619 if (!local) {
620 icp = kvmppc_xics_find_server(vcpu->kvm, server);
621 if (!icp)
622 return H_PARAMETER;
623 }
624
625 /*
626 * ICP state: Set_MFRR
627 *
628 * If the CPPR is more favored than the new MFRR, then
629 * nothing needs to be rejected as there can be no XISR to
630 * reject. If the MFRR is being made less favored then
631 * there might be a previously-rejected interrupt needing
632 * to be resent.
633 *
634 * ICP state: Check_IPI
635 *
636 * If the CPPR is less favored, then we might be replacing
637 * an interrupt, and thus need to possibly reject it.
638 *
639 * ICP State: IPI
640 *
641 * Besides rejecting any pending interrupts, we also
642 * update XISR and pending_pri to mark IPI as pending.
643 *
644 * PAPR does not describe this state, but if the MFRR is being
645 * made less favored than its earlier value, there might be
646 * a previously-rejected interrupt needing to be resent.
647 * Ideally, we would want to resend only if
648 * prio(pending_interrupt) < mfrr &&
649 * prio(pending_interrupt) < cppr
650 * where pending interrupt is the one that was rejected. But
651 * we don't have that state, so we simply trigger a resend
652 * whenever the MFRR is made less favored.
653 */
654 do {
655 old_state = new_state = READ_ONCE(icp->state);
656
657 /* Set_MFRR */
658 new_state.mfrr = mfrr;
659
660 /* Check_IPI */
661 reject = 0;
662 resend = false;
663 if (mfrr < new_state.cppr) {
664 /* Reject a pending interrupt if not an IPI */
665 if (mfrr <= new_state.pending_pri) {
666 reject = new_state.xisr;
667 new_state.pending_pri = mfrr;
668 new_state.xisr = XICS_IPI;
669 }
670 }
671
672 if (mfrr > old_state.mfrr) {
673 resend = new_state.need_resend;
674 new_state.need_resend = 0;
675 }
676 } while (!icp_try_update(icp, old_state, new_state, local));
677
678 /* Handle reject */
679 if (reject && reject != XICS_IPI)
680 icp_deliver_irq(xics, icp, reject);
681
682 /* Handle resend */
683 if (resend)
684 icp_check_resend(xics, icp);
685
686 return H_SUCCESS;
687 }
688
kvmppc_h_ipoll(struct kvm_vcpu * vcpu,unsigned long server)689 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
690 {
691 union kvmppc_icp_state state;
692 struct kvmppc_icp *icp;
693
694 icp = vcpu->arch.icp;
695 if (icp->server_num != server) {
696 icp = kvmppc_xics_find_server(vcpu->kvm, server);
697 if (!icp)
698 return H_PARAMETER;
699 }
700 state = READ_ONCE(icp->state);
701 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
702 kvmppc_set_gpr(vcpu, 5, state.mfrr);
703 return H_SUCCESS;
704 }
705
kvmppc_h_cppr(struct kvm_vcpu * vcpu,unsigned long cppr)706 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
707 {
708 union kvmppc_icp_state old_state, new_state;
709 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
710 struct kvmppc_icp *icp = vcpu->arch.icp;
711 u32 reject;
712
713 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
714
715 /*
716 * ICP State: Set_CPPR
717 *
718 * We can safely compare the new value with the current
719 * value outside of the transaction as the CPPR is only
720 * ever changed by the processor on itself
721 */
722 if (cppr > icp->state.cppr)
723 icp_down_cppr(xics, icp, cppr);
724 else if (cppr == icp->state.cppr)
725 return;
726
727 /*
728 * ICP State: Up_CPPR
729 *
730 * The processor is raising its priority, this can result
731 * in a rejection of a pending interrupt:
732 *
733 * ICP State: Reject_Current
734 *
735 * We can remove EE from the current processor, the update
736 * transaction will set it again if needed
737 */
738 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
739 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
740
741 do {
742 old_state = new_state = READ_ONCE(icp->state);
743
744 reject = 0;
745 new_state.cppr = cppr;
746
747 if (cppr <= new_state.pending_pri) {
748 reject = new_state.xisr;
749 new_state.xisr = 0;
750 new_state.pending_pri = 0xff;
751 }
752
753 } while (!icp_try_update(icp, old_state, new_state, true));
754
755 /*
756 * Check for rejects. They are handled by doing a new delivery
757 * attempt (see comments in icp_deliver_irq).
758 */
759 if (reject && reject != XICS_IPI)
760 icp_deliver_irq(xics, icp, reject);
761 }
762
kvmppc_h_eoi(struct kvm_vcpu * vcpu,unsigned long xirr)763 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
764 {
765 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
766 struct kvmppc_icp *icp = vcpu->arch.icp;
767 struct kvmppc_ics *ics;
768 struct ics_irq_state *state;
769 u32 irq = xirr & 0x00ffffff;
770 u16 src;
771
772 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
773
774 /*
775 * ICP State: EOI
776 *
777 * Note: If EOI is incorrectly used by SW to lower the CPPR
778 * value (ie more favored), we do not check for rejection of
779 * a pending interrupt, this is a SW error and PAPR sepcifies
780 * that we don't have to deal with it.
781 *
782 * The sending of an EOI to the ICS is handled after the
783 * CPPR update
784 *
785 * ICP State: Down_CPPR which we handle
786 * in a separate function as it's shared with H_CPPR.
787 */
788 icp_down_cppr(xics, icp, xirr >> 24);
789
790 /* IPIs have no EOI */
791 if (irq == XICS_IPI)
792 return H_SUCCESS;
793 /*
794 * EOI handling: If the interrupt is still asserted, we need to
795 * resend it. We can take a lockless "peek" at the ICS state here.
796 *
797 * "Message" interrupts will never have "asserted" set
798 */
799 ics = kvmppc_xics_find_ics(xics, irq, &src);
800 if (!ics) {
801 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
802 return H_PARAMETER;
803 }
804 state = &ics->irq_state[src];
805
806 /* Still asserted, resend it */
807 if (state->asserted)
808 icp_deliver_irq(xics, icp, irq);
809
810 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
811
812 return H_SUCCESS;
813 }
814
kvmppc_xics_rm_complete(struct kvm_vcpu * vcpu,u32 hcall)815 static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
816 {
817 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
818 struct kvmppc_icp *icp = vcpu->arch.icp;
819
820 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
821 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
822
823 if (icp->rm_action & XICS_RM_KICK_VCPU) {
824 icp->n_rm_kick_vcpu++;
825 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
826 }
827 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
828 icp->n_rm_check_resend++;
829 icp_check_resend(xics, icp->rm_resend_icp);
830 }
831 if (icp->rm_action & XICS_RM_REJECT) {
832 icp->n_rm_reject++;
833 icp_deliver_irq(xics, icp, icp->rm_reject);
834 }
835 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
836 icp->n_rm_notify_eoi++;
837 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
838 }
839
840 icp->rm_action = 0;
841
842 return H_SUCCESS;
843 }
844
kvmppc_xics_hcall(struct kvm_vcpu * vcpu,u32 req)845 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
846 {
847 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
848 unsigned long res;
849 int rc = H_SUCCESS;
850
851 /* Check if we have an ICP */
852 if (!xics || !vcpu->arch.icp)
853 return H_HARDWARE;
854
855 /* These requests don't have real-mode implementations at present */
856 switch (req) {
857 case H_XIRR_X:
858 res = kvmppc_h_xirr(vcpu);
859 kvmppc_set_gpr(vcpu, 4, res);
860 kvmppc_set_gpr(vcpu, 5, get_tb());
861 return rc;
862 case H_IPOLL:
863 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
864 return rc;
865 }
866
867 /* Check for real mode returning too hard */
868 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
869 return kvmppc_xics_rm_complete(vcpu, req);
870
871 switch (req) {
872 case H_XIRR:
873 res = kvmppc_h_xirr(vcpu);
874 kvmppc_set_gpr(vcpu, 4, res);
875 break;
876 case H_CPPR:
877 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
878 break;
879 case H_EOI:
880 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
881 break;
882 case H_IPI:
883 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
884 kvmppc_get_gpr(vcpu, 5));
885 break;
886 }
887
888 return rc;
889 }
890 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
891
892
893 /* -- Initialisation code etc. -- */
894
xics_debug_show(struct seq_file * m,void * private)895 static int xics_debug_show(struct seq_file *m, void *private)
896 {
897 struct kvmppc_xics *xics = m->private;
898 struct kvm *kvm = xics->kvm;
899 struct kvm_vcpu *vcpu;
900 int icsid, i;
901 unsigned long flags;
902 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
903 unsigned long t_rm_reject, t_rm_notify_eoi;
904 unsigned long t_reject, t_check_resend;
905
906 if (!kvm)
907 return 0;
908
909 t_rm_kick_vcpu = 0;
910 t_rm_notify_eoi = 0;
911 t_rm_check_resend = 0;
912 t_rm_reject = 0;
913 t_check_resend = 0;
914 t_reject = 0;
915
916 seq_printf(m, "=========\nICP state\n=========\n");
917
918 kvm_for_each_vcpu(i, vcpu, kvm) {
919 struct kvmppc_icp *icp = vcpu->arch.icp;
920 union kvmppc_icp_state state;
921
922 if (!icp)
923 continue;
924
925 state.raw = READ_ONCE(icp->state.raw);
926 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
927 icp->server_num, state.xisr,
928 state.pending_pri, state.cppr, state.mfrr,
929 state.out_ee, state.need_resend);
930 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
931 t_rm_notify_eoi += icp->n_rm_notify_eoi;
932 t_rm_check_resend += icp->n_rm_check_resend;
933 t_rm_reject += icp->n_rm_reject;
934 t_check_resend += icp->n_check_resend;
935 t_reject += icp->n_reject;
936 }
937
938 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu reject=%lu notify_eoi=%lu\n",
939 t_rm_kick_vcpu, t_rm_check_resend,
940 t_rm_reject, t_rm_notify_eoi);
941 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
942 t_check_resend, t_reject);
943 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
944 struct kvmppc_ics *ics = xics->ics[icsid];
945
946 if (!ics)
947 continue;
948
949 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
950 icsid);
951
952 local_irq_save(flags);
953 arch_spin_lock(&ics->lock);
954
955 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
956 struct ics_irq_state *irq = &ics->irq_state[i];
957
958 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
959 irq->number, irq->server, irq->priority,
960 irq->saved_priority, irq->asserted,
961 irq->resend, irq->masked_pending);
962
963 }
964 arch_spin_unlock(&ics->lock);
965 local_irq_restore(flags);
966 }
967 return 0;
968 }
969
xics_debug_open(struct inode * inode,struct file * file)970 static int xics_debug_open(struct inode *inode, struct file *file)
971 {
972 return single_open(file, xics_debug_show, inode->i_private);
973 }
974
975 static const struct file_operations xics_debug_fops = {
976 .open = xics_debug_open,
977 .read = seq_read,
978 .llseek = seq_lseek,
979 .release = single_release,
980 };
981
xics_debugfs_init(struct kvmppc_xics * xics)982 static void xics_debugfs_init(struct kvmppc_xics *xics)
983 {
984 char *name;
985
986 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
987 if (!name) {
988 pr_err("%s: no memory for name\n", __func__);
989 return;
990 }
991
992 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
993 xics, &xics_debug_fops);
994
995 pr_debug("%s: created %s\n", __func__, name);
996 kfree(name);
997 }
998
kvmppc_xics_create_ics(struct kvm * kvm,struct kvmppc_xics * xics,int irq)999 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1000 struct kvmppc_xics *xics, int irq)
1001 {
1002 struct kvmppc_ics *ics;
1003 int i, icsid;
1004
1005 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1006
1007 mutex_lock(&kvm->lock);
1008
1009 /* ICS already exists - somebody else got here first */
1010 if (xics->ics[icsid])
1011 goto out;
1012
1013 /* Create the ICS */
1014 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1015 if (!ics)
1016 goto out;
1017
1018 ics->icsid = icsid;
1019
1020 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1021 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1022 ics->irq_state[i].priority = MASKED;
1023 ics->irq_state[i].saved_priority = MASKED;
1024 }
1025 smp_wmb();
1026 xics->ics[icsid] = ics;
1027
1028 if (icsid > xics->max_icsid)
1029 xics->max_icsid = icsid;
1030
1031 out:
1032 mutex_unlock(&kvm->lock);
1033 return xics->ics[icsid];
1034 }
1035
kvmppc_xics_create_icp(struct kvm_vcpu * vcpu,unsigned long server_num)1036 int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1037 {
1038 struct kvmppc_icp *icp;
1039
1040 if (!vcpu->kvm->arch.xics)
1041 return -ENODEV;
1042
1043 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1044 return -EEXIST;
1045
1046 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1047 if (!icp)
1048 return -ENOMEM;
1049
1050 icp->vcpu = vcpu;
1051 icp->server_num = server_num;
1052 icp->state.mfrr = MASKED;
1053 icp->state.pending_pri = MASKED;
1054 vcpu->arch.icp = icp;
1055
1056 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1057
1058 return 0;
1059 }
1060
kvmppc_xics_get_icp(struct kvm_vcpu * vcpu)1061 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1062 {
1063 struct kvmppc_icp *icp = vcpu->arch.icp;
1064 union kvmppc_icp_state state;
1065
1066 if (!icp)
1067 return 0;
1068 state = icp->state;
1069 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1070 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1071 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1072 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1073 }
1074
kvmppc_xics_set_icp(struct kvm_vcpu * vcpu,u64 icpval)1075 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1076 {
1077 struct kvmppc_icp *icp = vcpu->arch.icp;
1078 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1079 union kvmppc_icp_state old_state, new_state;
1080 struct kvmppc_ics *ics;
1081 u8 cppr, mfrr, pending_pri;
1082 u32 xisr;
1083 u16 src;
1084 bool resend;
1085
1086 if (!icp || !xics)
1087 return -ENOENT;
1088
1089 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1090 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1091 KVM_REG_PPC_ICP_XISR_MASK;
1092 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1093 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1094
1095 /* Require the new state to be internally consistent */
1096 if (xisr == 0) {
1097 if (pending_pri != 0xff)
1098 return -EINVAL;
1099 } else if (xisr == XICS_IPI) {
1100 if (pending_pri != mfrr || pending_pri >= cppr)
1101 return -EINVAL;
1102 } else {
1103 if (pending_pri >= mfrr || pending_pri >= cppr)
1104 return -EINVAL;
1105 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1106 if (!ics)
1107 return -EINVAL;
1108 }
1109
1110 new_state.raw = 0;
1111 new_state.cppr = cppr;
1112 new_state.xisr = xisr;
1113 new_state.mfrr = mfrr;
1114 new_state.pending_pri = pending_pri;
1115
1116 /*
1117 * Deassert the CPU interrupt request.
1118 * icp_try_update will reassert it if necessary.
1119 */
1120 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
1121 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
1122
1123 /*
1124 * Note that if we displace an interrupt from old_state.xisr,
1125 * we don't mark it as rejected. We expect userspace to set
1126 * the state of the interrupt sources to be consistent with
1127 * the ICP states (either before or afterwards, which doesn't
1128 * matter). We do handle resends due to CPPR becoming less
1129 * favoured because that is necessary to end up with a
1130 * consistent state in the situation where userspace restores
1131 * the ICS states before the ICP states.
1132 */
1133 do {
1134 old_state = READ_ONCE(icp->state);
1135
1136 if (new_state.mfrr <= old_state.mfrr) {
1137 resend = false;
1138 new_state.need_resend = old_state.need_resend;
1139 } else {
1140 resend = old_state.need_resend;
1141 new_state.need_resend = 0;
1142 }
1143 } while (!icp_try_update(icp, old_state, new_state, false));
1144
1145 if (resend)
1146 icp_check_resend(xics, icp);
1147
1148 return 0;
1149 }
1150
xics_get_source(struct kvmppc_xics * xics,long irq,u64 addr)1151 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1152 {
1153 int ret;
1154 struct kvmppc_ics *ics;
1155 struct ics_irq_state *irqp;
1156 u64 __user *ubufp = (u64 __user *) addr;
1157 u16 idx;
1158 u64 val, prio;
1159 unsigned long flags;
1160
1161 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1162 if (!ics)
1163 return -ENOENT;
1164
1165 irqp = &ics->irq_state[idx];
1166 local_irq_save(flags);
1167 arch_spin_lock(&ics->lock);
1168 ret = -ENOENT;
1169 if (irqp->exists) {
1170 val = irqp->server;
1171 prio = irqp->priority;
1172 if (prio == MASKED) {
1173 val |= KVM_XICS_MASKED;
1174 prio = irqp->saved_priority;
1175 }
1176 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1177 if (irqp->asserted)
1178 val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
1179 else if (irqp->masked_pending || irqp->resend)
1180 val |= KVM_XICS_PENDING;
1181 ret = 0;
1182 }
1183 arch_spin_unlock(&ics->lock);
1184 local_irq_restore(flags);
1185
1186 if (!ret && put_user(val, ubufp))
1187 ret = -EFAULT;
1188
1189 return ret;
1190 }
1191
xics_set_source(struct kvmppc_xics * xics,long irq,u64 addr)1192 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1193 {
1194 struct kvmppc_ics *ics;
1195 struct ics_irq_state *irqp;
1196 u64 __user *ubufp = (u64 __user *) addr;
1197 u16 idx;
1198 u64 val;
1199 u8 prio;
1200 u32 server;
1201 unsigned long flags;
1202
1203 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1204 return -ENOENT;
1205
1206 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1207 if (!ics) {
1208 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1209 if (!ics)
1210 return -ENOMEM;
1211 }
1212 irqp = &ics->irq_state[idx];
1213 if (get_user(val, ubufp))
1214 return -EFAULT;
1215
1216 server = val & KVM_XICS_DESTINATION_MASK;
1217 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1218 if (prio != MASKED &&
1219 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1220 return -EINVAL;
1221
1222 local_irq_save(flags);
1223 arch_spin_lock(&ics->lock);
1224 irqp->server = server;
1225 irqp->saved_priority = prio;
1226 if (val & KVM_XICS_MASKED)
1227 prio = MASKED;
1228 irqp->priority = prio;
1229 irqp->resend = 0;
1230 irqp->masked_pending = 0;
1231 irqp->asserted = 0;
1232 if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
1233 irqp->asserted = 1;
1234 irqp->exists = 1;
1235 arch_spin_unlock(&ics->lock);
1236 local_irq_restore(flags);
1237
1238 if (val & KVM_XICS_PENDING)
1239 icp_deliver_irq(xics, NULL, irqp->number);
1240
1241 return 0;
1242 }
1243
kvm_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)1244 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1245 bool line_status)
1246 {
1247 struct kvmppc_xics *xics = kvm->arch.xics;
1248
1249 return ics_deliver_irq(xics, irq, level);
1250 }
1251
kvm_set_msi(struct kvm_kernel_irq_routing_entry * irq_entry,struct kvm * kvm,int irq_source_id,int level,bool line_status)1252 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1253 int irq_source_id, int level, bool line_status)
1254 {
1255 if (!level)
1256 return -1;
1257 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1258 level, line_status);
1259 }
1260
xics_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1261 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1262 {
1263 struct kvmppc_xics *xics = dev->private;
1264
1265 switch (attr->group) {
1266 case KVM_DEV_XICS_GRP_SOURCES:
1267 return xics_set_source(xics, attr->attr, attr->addr);
1268 }
1269 return -ENXIO;
1270 }
1271
xics_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1272 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1273 {
1274 struct kvmppc_xics *xics = dev->private;
1275
1276 switch (attr->group) {
1277 case KVM_DEV_XICS_GRP_SOURCES:
1278 return xics_get_source(xics, attr->attr, attr->addr);
1279 }
1280 return -ENXIO;
1281 }
1282
xics_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1283 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1284 {
1285 switch (attr->group) {
1286 case KVM_DEV_XICS_GRP_SOURCES:
1287 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1288 attr->attr < KVMPPC_XICS_NR_IRQS)
1289 return 0;
1290 break;
1291 }
1292 return -ENXIO;
1293 }
1294
kvmppc_xics_free(struct kvm_device * dev)1295 static void kvmppc_xics_free(struct kvm_device *dev)
1296 {
1297 struct kvmppc_xics *xics = dev->private;
1298 int i;
1299 struct kvm *kvm = xics->kvm;
1300
1301 debugfs_remove(xics->dentry);
1302
1303 if (kvm)
1304 kvm->arch.xics = NULL;
1305
1306 for (i = 0; i <= xics->max_icsid; i++)
1307 kfree(xics->ics[i]);
1308 kfree(xics);
1309 kfree(dev);
1310 }
1311
kvmppc_xics_create(struct kvm_device * dev,u32 type)1312 static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1313 {
1314 struct kvmppc_xics *xics;
1315 struct kvm *kvm = dev->kvm;
1316 int ret = 0;
1317
1318 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1319 if (!xics)
1320 return -ENOMEM;
1321
1322 dev->private = xics;
1323 xics->dev = dev;
1324 xics->kvm = kvm;
1325
1326 /* Already there ? */
1327 mutex_lock(&kvm->lock);
1328 if (kvm->arch.xics)
1329 ret = -EEXIST;
1330 else
1331 kvm->arch.xics = xics;
1332 mutex_unlock(&kvm->lock);
1333
1334 if (ret) {
1335 kfree(xics);
1336 return ret;
1337 }
1338
1339 xics_debugfs_init(xics);
1340
1341 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1342 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1343 /* Enable real mode support */
1344 xics->real_mode = ENABLE_REALMODE;
1345 xics->real_mode_dbg = DEBUG_REALMODE;
1346 }
1347 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1348
1349 return 0;
1350 }
1351
1352 struct kvm_device_ops kvm_xics_ops = {
1353 .name = "kvm-xics",
1354 .create = kvmppc_xics_create,
1355 .destroy = kvmppc_xics_free,
1356 .set_attr = xics_set_attr,
1357 .get_attr = xics_get_attr,
1358 .has_attr = xics_has_attr,
1359 };
1360
kvmppc_xics_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 xcpu)1361 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1362 u32 xcpu)
1363 {
1364 struct kvmppc_xics *xics = dev->private;
1365 int r = -EBUSY;
1366
1367 if (dev->ops != &kvm_xics_ops)
1368 return -EPERM;
1369 if (xics->kvm != vcpu->kvm)
1370 return -EPERM;
1371 if (vcpu->arch.irq_type)
1372 return -EBUSY;
1373
1374 r = kvmppc_xics_create_icp(vcpu, xcpu);
1375 if (!r)
1376 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1377
1378 return r;
1379 }
1380
kvmppc_xics_free_icp(struct kvm_vcpu * vcpu)1381 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1382 {
1383 if (!vcpu->arch.icp)
1384 return;
1385 kfree(vcpu->arch.icp);
1386 vcpu->arch.icp = NULL;
1387 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1388 }
1389
xics_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)1390 static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
1391 struct kvm *kvm, int irq_source_id, int level,
1392 bool line_status)
1393 {
1394 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1395 }
1396
kvm_irq_map_gsi(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * entries,int gsi)1397 int kvm_irq_map_gsi(struct kvm *kvm,
1398 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1399 {
1400 entries->gsi = gsi;
1401 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1402 entries->set = xics_set_irq;
1403 entries->irqchip.irqchip = 0;
1404 entries->irqchip.pin = gsi;
1405 return 1;
1406 }
1407
kvm_irq_map_chip_pin(struct kvm * kvm,unsigned irqchip,unsigned pin)1408 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1409 {
1410 return pin;
1411 }
1412