1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Interrupt delivery
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/bootmem.h>
18 #include <asm/page.h>
19 #include <asm/cacheflush.h>
20
21 #include <linux/kvm_host.h>
22
23 #include "interrupt.h"
24
kvm_mips_queue_irq(struct kvm_vcpu * vcpu,uint32_t priority)25 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
26 {
27 set_bit(priority, &vcpu->arch.pending_exceptions);
28 }
29
kvm_mips_dequeue_irq(struct kvm_vcpu * vcpu,uint32_t priority)30 void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
31 {
32 clear_bit(priority, &vcpu->arch.pending_exceptions);
33 }
34
kvm_mips_queue_timer_int_cb(struct kvm_vcpu * vcpu)35 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36 {
37 /*
38 * Cause bits to reflect the pending timer interrupt,
39 * the EXC code will be set when we are actually
40 * delivering the interrupt:
41 */
42 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
43
44 /* Queue up an INT exception for the core */
45 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
46
47 }
48
kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu * vcpu)49 void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
50 {
51 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
52 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
53 }
54
kvm_mips_queue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)55 void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
56 struct kvm_mips_interrupt *irq)
57 {
58 int intr = (int)irq->irq;
59
60 /*
61 * Cause bits to reflect the pending IO interrupt,
62 * the EXC code will be set when we are actually
63 * delivering the interrupt:
64 */
65 switch (intr) {
66 case 2:
67 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
68 /* Queue up an INT exception for the core */
69 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
70 break;
71
72 case 3:
73 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
74 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
75 break;
76
77 case 4:
78 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
79 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
80 break;
81
82 default:
83 break;
84 }
85
86 }
87
kvm_mips_dequeue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)88 void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
89 struct kvm_mips_interrupt *irq)
90 {
91 int intr = (int)irq->irq;
92
93 switch (intr) {
94 case -2:
95 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
96 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
97 break;
98
99 case -3:
100 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
101 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
102 break;
103
104 case -4:
105 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
106 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
107 break;
108
109 default:
110 break;
111 }
112
113 }
114
115 /* Deliver the interrupt of the corresponding priority, if possible. */
kvm_mips_irq_deliver_cb(struct kvm_vcpu * vcpu,unsigned int priority,uint32_t cause)116 int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
117 uint32_t cause)
118 {
119 int allowed = 0;
120 uint32_t exccode;
121
122 struct kvm_vcpu_arch *arch = &vcpu->arch;
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124
125 switch (priority) {
126 case MIPS_EXC_INT_TIMER:
127 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
128 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
129 && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
130 allowed = 1;
131 exccode = T_INT;
132 }
133 break;
134
135 case MIPS_EXC_INT_IO:
136 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
137 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
138 && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
139 allowed = 1;
140 exccode = T_INT;
141 }
142 break;
143
144 case MIPS_EXC_INT_IPI_1:
145 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
146 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
147 && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
148 allowed = 1;
149 exccode = T_INT;
150 }
151 break;
152
153 case MIPS_EXC_INT_IPI_2:
154 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
155 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
156 && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
157 allowed = 1;
158 exccode = T_INT;
159 }
160 break;
161
162 default:
163 break;
164 }
165
166 /* Are we allowed to deliver the interrupt ??? */
167 if (allowed) {
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc);
171 kvm_set_c0_guest_status(cop0, ST0_EXL);
172
173 if (cause & CAUSEF_BD)
174 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
175 else
176 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
177
178 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
179
180 } else
181 kvm_err("Trying to deliver interrupt when EXL is already set\n");
182
183 kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
184 (exccode << CAUSEB_EXCCODE));
185
186 /* XXXSL Set PC to the interrupt exception entry point */
187 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
188 arch->pc = KVM_GUEST_KSEG0 + 0x200;
189 else
190 arch->pc = KVM_GUEST_KSEG0 + 0x180;
191
192 clear_bit(priority, &vcpu->arch.pending_exceptions);
193 }
194
195 return allowed;
196 }
197
kvm_mips_irq_clear_cb(struct kvm_vcpu * vcpu,unsigned int priority,uint32_t cause)198 int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
199 uint32_t cause)
200 {
201 return 1;
202 }
203
kvm_mips_deliver_interrupts(struct kvm_vcpu * vcpu,uint32_t cause)204 void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
205 {
206 unsigned long *pending = &vcpu->arch.pending_exceptions;
207 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
208 unsigned int priority;
209
210 if (!(*pending) && !(*pending_clr))
211 return;
212
213 priority = __ffs(*pending_clr);
214 while (priority <= MIPS_EXC_MAX) {
215 if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
216 if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
217 break;
218 }
219
220 priority = find_next_bit(pending_clr,
221 BITS_PER_BYTE * sizeof(*pending_clr),
222 priority + 1);
223 }
224
225 priority = __ffs(*pending);
226 while (priority <= MIPS_EXC_MAX) {
227 if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
228 if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
229 break;
230 }
231
232 priority = find_next_bit(pending,
233 BITS_PER_BYTE * sizeof(*pending),
234 priority + 1);
235 }
236
237 }
238
kvm_mips_pending_timer(struct kvm_vcpu * vcpu)239 int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
240 {
241 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
242 }
243