1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Interrupt delivery
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/fs.h>
16 #include <linux/memblock.h>
17 #include <asm/page.h>
18 #include <asm/cacheflush.h>
19
20 #include <linux/kvm_host.h>
21
22 #include "interrupt.h"
23
kvm_mips_queue_irq(struct kvm_vcpu * vcpu,unsigned int priority)24 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
25 {
26 set_bit(priority, &vcpu->arch.pending_exceptions);
27 }
28
kvm_mips_dequeue_irq(struct kvm_vcpu * vcpu,unsigned int priority)29 void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
30 {
31 clear_bit(priority, &vcpu->arch.pending_exceptions);
32 }
33
kvm_mips_queue_timer_int_cb(struct kvm_vcpu * vcpu)34 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
35 {
36 /*
37 * Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually
39 * delivering the interrupt:
40 */
41 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
42
43 /* Queue up an INT exception for the core */
44 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
45
46 }
47
kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu * vcpu)48 void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
49 {
50 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52 }
53
kvm_mips_queue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)54 void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
55 struct kvm_mips_interrupt *irq)
56 {
57 int intr = (int)irq->irq;
58
59 /*
60 * Cause bits to reflect the pending IO interrupt,
61 * the EXC code will be set when we are actually
62 * delivering the interrupt:
63 */
64 switch (intr) {
65 case 2:
66 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
67 /* Queue up an INT exception for the core */
68 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
69 break;
70
71 case 3:
72 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
73 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
74 break;
75
76 case 4:
77 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
78 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
79 break;
80
81 default:
82 break;
83 }
84
85 }
86
kvm_mips_dequeue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)87 void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
88 struct kvm_mips_interrupt *irq)
89 {
90 int intr = (int)irq->irq;
91
92 switch (intr) {
93 case -2:
94 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
95 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
96 break;
97
98 case -3:
99 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
100 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
101 break;
102
103 case -4:
104 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
105 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
106 break;
107
108 default:
109 break;
110 }
111
112 }
113
114 /* Deliver the interrupt of the corresponding priority, if possible. */
kvm_mips_irq_deliver_cb(struct kvm_vcpu * vcpu,unsigned int priority,u32 cause)115 int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
116 u32 cause)
117 {
118 int allowed = 0;
119 u32 exccode;
120
121 struct kvm_vcpu_arch *arch = &vcpu->arch;
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123
124 switch (priority) {
125 case MIPS_EXC_INT_TIMER:
126 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
127 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
128 && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
129 allowed = 1;
130 exccode = EXCCODE_INT;
131 }
132 break;
133
134 case MIPS_EXC_INT_IO:
135 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
136 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
137 && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
138 allowed = 1;
139 exccode = EXCCODE_INT;
140 }
141 break;
142
143 case MIPS_EXC_INT_IPI_1:
144 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
145 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
146 && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
147 allowed = 1;
148 exccode = EXCCODE_INT;
149 }
150 break;
151
152 case MIPS_EXC_INT_IPI_2:
153 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
154 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
155 && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
156 allowed = 1;
157 exccode = EXCCODE_INT;
158 }
159 break;
160
161 default:
162 break;
163 }
164
165 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) {
167 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
168 /* save old pc */
169 kvm_write_c0_guest_epc(cop0, arch->pc);
170 kvm_set_c0_guest_status(cop0, ST0_EXL);
171
172 if (cause & CAUSEF_BD)
173 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
174 else
175 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
176
177 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
178
179 } else
180 kvm_err("Trying to deliver interrupt when EXL is already set\n");
181
182 kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
183 (exccode << CAUSEB_EXCCODE));
184
185 /* XXXSL Set PC to the interrupt exception entry point */
186 arch->pc = kvm_mips_guest_exception_base(vcpu);
187 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
188 arch->pc += 0x200;
189 else
190 arch->pc += 0x180;
191
192 clear_bit(priority, &vcpu->arch.pending_exceptions);
193 }
194
195 return allowed;
196 }
197
kvm_mips_irq_clear_cb(struct kvm_vcpu * vcpu,unsigned int priority,u32 cause)198 int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
199 u32 cause)
200 {
201 return 1;
202 }
203
kvm_mips_deliver_interrupts(struct kvm_vcpu * vcpu,u32 cause)204 void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
205 {
206 unsigned long *pending = &vcpu->arch.pending_exceptions;
207 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
208 unsigned int priority;
209
210 if (!(*pending) && !(*pending_clr))
211 return;
212
213 priority = __ffs(*pending_clr);
214 while (priority <= MIPS_EXC_MAX) {
215 if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
216 if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
217 break;
218 }
219
220 priority = find_next_bit(pending_clr,
221 BITS_PER_BYTE * sizeof(*pending_clr),
222 priority + 1);
223 }
224
225 priority = __ffs(*pending);
226 while (priority <= MIPS_EXC_MAX) {
227 if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
228 if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
229 break;
230 }
231
232 priority = find_next_bit(pending,
233 BITS_PER_BYTE * sizeof(*pending),
234 priority + 1);
235 }
236
237 }
238
kvm_mips_pending_timer(struct kvm_vcpu * vcpu)239 int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
240 {
241 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
242 }
243