1 /*
2 * 8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2007 Intel Corporation
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 * Authors:
25 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
26 * Port from Qemu.
27 */
28 #include <linux/mm.h>
29 #include <linux/bitops.h>
30 #include "irq.h"
31
32 #include <linux/kvm_host.h>
33
pic_lock(struct kvm_pic * s)34 static void pic_lock(struct kvm_pic *s)
35 {
36 spin_lock(&s->lock);
37 }
38
pic_unlock(struct kvm_pic * s)39 static void pic_unlock(struct kvm_pic *s)
40 {
41 struct kvm *kvm = s->kvm;
42 unsigned acks = s->pending_acks;
43 bool wakeup = s->wakeup_needed;
44 struct kvm_vcpu *vcpu;
45
46 s->pending_acks = 0;
47 s->wakeup_needed = false;
48
49 spin_unlock(&s->lock);
50
51 while (acks) {
52 kvm_notify_acked_irq(kvm, __ffs(acks));
53 acks &= acks - 1;
54 }
55
56 if (wakeup) {
57 vcpu = s->kvm->vcpus[0];
58 if (vcpu)
59 kvm_vcpu_kick(vcpu);
60 }
61 }
62
pic_clear_isr(struct kvm_kpic_state * s,int irq)63 static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
64 {
65 s->isr &= ~(1 << irq);
66 s->isr_ack |= (1 << irq);
67 }
68
kvm_pic_clear_isr_ack(struct kvm * kvm)69 void kvm_pic_clear_isr_ack(struct kvm *kvm)
70 {
71 struct kvm_pic *s = pic_irqchip(kvm);
72 s->pics[0].isr_ack = 0xff;
73 s->pics[1].isr_ack = 0xff;
74 }
75
76 /*
77 * set irq level. If an edge is detected, then the IRR is set to 1
78 */
pic_set_irq1(struct kvm_kpic_state * s,int irq,int level)79 static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
80 {
81 int mask;
82 mask = 1 << irq;
83 if (s->elcr & mask) /* level triggered */
84 if (level) {
85 s->irr |= mask;
86 s->last_irr |= mask;
87 } else {
88 s->irr &= ~mask;
89 s->last_irr &= ~mask;
90 }
91 else /* edge triggered */
92 if (level) {
93 if ((s->last_irr & mask) == 0)
94 s->irr |= mask;
95 s->last_irr |= mask;
96 } else
97 s->last_irr &= ~mask;
98 }
99
100 /*
101 * return the highest priority found in mask (highest = smallest
102 * number). Return 8 if no irq
103 */
get_priority(struct kvm_kpic_state * s,int mask)104 static inline int get_priority(struct kvm_kpic_state *s, int mask)
105 {
106 int priority;
107 if (mask == 0)
108 return 8;
109 priority = 0;
110 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
111 priority++;
112 return priority;
113 }
114
115 /*
116 * return the pic wanted interrupt. return -1 if none
117 */
pic_get_irq(struct kvm_kpic_state * s)118 static int pic_get_irq(struct kvm_kpic_state *s)
119 {
120 int mask, cur_priority, priority;
121
122 mask = s->irr & ~s->imr;
123 priority = get_priority(s, mask);
124 if (priority == 8)
125 return -1;
126 /*
127 * compute current priority. If special fully nested mode on the
128 * master, the IRQ coming from the slave is not taken into account
129 * for the priority computation.
130 */
131 mask = s->isr;
132 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
133 mask &= ~(1 << 2);
134 cur_priority = get_priority(s, mask);
135 if (priority < cur_priority)
136 /*
137 * higher priority found: an irq should be generated
138 */
139 return (priority + s->priority_add) & 7;
140 else
141 return -1;
142 }
143
144 /*
145 * raise irq to CPU if necessary. must be called every time the active
146 * irq may change
147 */
pic_update_irq(struct kvm_pic * s)148 static void pic_update_irq(struct kvm_pic *s)
149 {
150 int irq2, irq;
151
152 irq2 = pic_get_irq(&s->pics[1]);
153 if (irq2 >= 0) {
154 /*
155 * if irq request by slave pic, signal master PIC
156 */
157 pic_set_irq1(&s->pics[0], 2, 1);
158 pic_set_irq1(&s->pics[0], 2, 0);
159 }
160 irq = pic_get_irq(&s->pics[0]);
161 if (irq >= 0)
162 s->irq_request(s->irq_request_opaque, 1);
163 else
164 s->irq_request(s->irq_request_opaque, 0);
165 }
166
kvm_pic_update_irq(struct kvm_pic * s)167 void kvm_pic_update_irq(struct kvm_pic *s)
168 {
169 pic_lock(s);
170 pic_update_irq(s);
171 pic_unlock(s);
172 }
173
kvm_pic_set_irq(void * opaque,int irq,int level)174 void kvm_pic_set_irq(void *opaque, int irq, int level)
175 {
176 struct kvm_pic *s = opaque;
177
178 pic_lock(s);
179 if (irq >= 0 && irq < PIC_NUM_PINS) {
180 pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
181 pic_update_irq(s);
182 }
183 pic_unlock(s);
184 }
185
186 /*
187 * acknowledge interrupt 'irq'
188 */
pic_intack(struct kvm_kpic_state * s,int irq)189 static inline void pic_intack(struct kvm_kpic_state *s, int irq)
190 {
191 s->isr |= 1 << irq;
192 if (s->auto_eoi) {
193 if (s->rotate_on_auto_eoi)
194 s->priority_add = (irq + 1) & 7;
195 pic_clear_isr(s, irq);
196 }
197 /*
198 * We don't clear a level sensitive interrupt here
199 */
200 if (!(s->elcr & (1 << irq)))
201 s->irr &= ~(1 << irq);
202 }
203
kvm_pic_read_irq(struct kvm * kvm)204 int kvm_pic_read_irq(struct kvm *kvm)
205 {
206 int irq, irq2, intno;
207 struct kvm_pic *s = pic_irqchip(kvm);
208
209 pic_lock(s);
210 irq = pic_get_irq(&s->pics[0]);
211 if (irq >= 0) {
212 pic_intack(&s->pics[0], irq);
213 if (irq == 2) {
214 irq2 = pic_get_irq(&s->pics[1]);
215 if (irq2 >= 0)
216 pic_intack(&s->pics[1], irq2);
217 else
218 /*
219 * spurious IRQ on slave controller
220 */
221 irq2 = 7;
222 intno = s->pics[1].irq_base + irq2;
223 irq = irq2 + 8;
224 } else
225 intno = s->pics[0].irq_base + irq;
226 } else {
227 /*
228 * spurious IRQ on host controller
229 */
230 irq = 7;
231 intno = s->pics[0].irq_base + irq;
232 }
233 pic_update_irq(s);
234 pic_unlock(s);
235 kvm_notify_acked_irq(kvm, irq);
236
237 return intno;
238 }
239
kvm_pic_reset(struct kvm_kpic_state * s)240 void kvm_pic_reset(struct kvm_kpic_state *s)
241 {
242 int irq, irqbase, n;
243 struct kvm *kvm = s->pics_state->irq_request_opaque;
244 struct kvm_vcpu *vcpu0 = kvm->vcpus[0];
245
246 if (s == &s->pics_state->pics[0])
247 irqbase = 0;
248 else
249 irqbase = 8;
250
251 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
252 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
253 if (s->irr & (1 << irq) || s->isr & (1 << irq)) {
254 n = irq + irqbase;
255 s->pics_state->pending_acks |= 1 << n;
256 }
257 }
258 s->last_irr = 0;
259 s->irr = 0;
260 s->imr = 0;
261 s->isr = 0;
262 s->isr_ack = 0xff;
263 s->priority_add = 0;
264 s->irq_base = 0;
265 s->read_reg_select = 0;
266 s->poll = 0;
267 s->special_mask = 0;
268 s->init_state = 0;
269 s->auto_eoi = 0;
270 s->rotate_on_auto_eoi = 0;
271 s->special_fully_nested_mode = 0;
272 s->init4 = 0;
273 }
274
pic_ioport_write(void * opaque,u32 addr,u32 val)275 static void pic_ioport_write(void *opaque, u32 addr, u32 val)
276 {
277 struct kvm_kpic_state *s = opaque;
278 int priority, cmd, irq;
279
280 addr &= 1;
281 if (addr == 0) {
282 if (val & 0x10) {
283 kvm_pic_reset(s); /* init */
284 /*
285 * deassert a pending interrupt
286 */
287 s->pics_state->irq_request(s->pics_state->
288 irq_request_opaque, 0);
289 s->init_state = 1;
290 s->init4 = val & 1;
291 if (val & 0x02)
292 printk(KERN_ERR "single mode not supported");
293 if (val & 0x08)
294 printk(KERN_ERR
295 "level sensitive irq not supported");
296 } else if (val & 0x08) {
297 if (val & 0x04)
298 s->poll = 1;
299 if (val & 0x02)
300 s->read_reg_select = val & 1;
301 if (val & 0x40)
302 s->special_mask = (val >> 5) & 1;
303 } else {
304 cmd = val >> 5;
305 switch (cmd) {
306 case 0:
307 case 4:
308 s->rotate_on_auto_eoi = cmd >> 2;
309 break;
310 case 1: /* end of interrupt */
311 case 5:
312 priority = get_priority(s, s->isr);
313 if (priority != 8) {
314 irq = (priority + s->priority_add) & 7;
315 pic_clear_isr(s, irq);
316 if (cmd == 5)
317 s->priority_add = (irq + 1) & 7;
318 pic_update_irq(s->pics_state);
319 }
320 break;
321 case 3:
322 irq = val & 7;
323 pic_clear_isr(s, irq);
324 pic_update_irq(s->pics_state);
325 break;
326 case 6:
327 s->priority_add = (val + 1) & 7;
328 pic_update_irq(s->pics_state);
329 break;
330 case 7:
331 irq = val & 7;
332 s->priority_add = (irq + 1) & 7;
333 pic_clear_isr(s, irq);
334 pic_update_irq(s->pics_state);
335 break;
336 default:
337 break; /* no operation */
338 }
339 }
340 } else
341 switch (s->init_state) {
342 case 0: /* normal mode */
343 s->imr = val;
344 pic_update_irq(s->pics_state);
345 break;
346 case 1:
347 s->irq_base = val & 0xf8;
348 s->init_state = 2;
349 break;
350 case 2:
351 if (s->init4)
352 s->init_state = 3;
353 else
354 s->init_state = 0;
355 break;
356 case 3:
357 s->special_fully_nested_mode = (val >> 4) & 1;
358 s->auto_eoi = (val >> 1) & 1;
359 s->init_state = 0;
360 break;
361 }
362 }
363
pic_poll_read(struct kvm_kpic_state * s,u32 addr1)364 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
365 {
366 int ret;
367
368 ret = pic_get_irq(s);
369 if (ret >= 0) {
370 if (addr1 >> 7) {
371 s->pics_state->pics[0].isr &= ~(1 << 2);
372 s->pics_state->pics[0].irr &= ~(1 << 2);
373 }
374 s->irr &= ~(1 << ret);
375 pic_clear_isr(s, ret);
376 if (addr1 >> 7 || ret != 2)
377 pic_update_irq(s->pics_state);
378 } else {
379 ret = 0x07;
380 pic_update_irq(s->pics_state);
381 }
382
383 return ret;
384 }
385
pic_ioport_read(void * opaque,u32 addr1)386 static u32 pic_ioport_read(void *opaque, u32 addr1)
387 {
388 struct kvm_kpic_state *s = opaque;
389 unsigned int addr;
390 int ret;
391
392 addr = addr1;
393 addr &= 1;
394 if (s->poll) {
395 ret = pic_poll_read(s, addr1);
396 s->poll = 0;
397 } else
398 if (addr == 0)
399 if (s->read_reg_select)
400 ret = s->isr;
401 else
402 ret = s->irr;
403 else
404 ret = s->imr;
405 return ret;
406 }
407
elcr_ioport_write(void * opaque,u32 addr,u32 val)408 static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
409 {
410 struct kvm_kpic_state *s = opaque;
411 s->elcr = val & s->elcr_mask;
412 }
413
elcr_ioport_read(void * opaque,u32 addr1)414 static u32 elcr_ioport_read(void *opaque, u32 addr1)
415 {
416 struct kvm_kpic_state *s = opaque;
417 return s->elcr;
418 }
419
picdev_in_range(struct kvm_io_device * this,gpa_t addr,int len,int is_write)420 static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
421 int len, int is_write)
422 {
423 switch (addr) {
424 case 0x20:
425 case 0x21:
426 case 0xa0:
427 case 0xa1:
428 case 0x4d0:
429 case 0x4d1:
430 return 1;
431 default:
432 return 0;
433 }
434 }
435
picdev_write(struct kvm_io_device * this,gpa_t addr,int len,const void * val)436 static void picdev_write(struct kvm_io_device *this,
437 gpa_t addr, int len, const void *val)
438 {
439 struct kvm_pic *s = this->private;
440 unsigned char data = *(unsigned char *)val;
441
442 if (len != 1) {
443 if (printk_ratelimit())
444 printk(KERN_ERR "PIC: non byte write\n");
445 return;
446 }
447 pic_lock(s);
448 switch (addr) {
449 case 0x20:
450 case 0x21:
451 case 0xa0:
452 case 0xa1:
453 pic_ioport_write(&s->pics[addr >> 7], addr, data);
454 break;
455 case 0x4d0:
456 case 0x4d1:
457 elcr_ioport_write(&s->pics[addr & 1], addr, data);
458 break;
459 }
460 pic_unlock(s);
461 }
462
picdev_read(struct kvm_io_device * this,gpa_t addr,int len,void * val)463 static void picdev_read(struct kvm_io_device *this,
464 gpa_t addr, int len, void *val)
465 {
466 struct kvm_pic *s = this->private;
467 unsigned char data = 0;
468
469 if (len != 1) {
470 if (printk_ratelimit())
471 printk(KERN_ERR "PIC: non byte read\n");
472 return;
473 }
474 pic_lock(s);
475 switch (addr) {
476 case 0x20:
477 case 0x21:
478 case 0xa0:
479 case 0xa1:
480 data = pic_ioport_read(&s->pics[addr >> 7], addr);
481 break;
482 case 0x4d0:
483 case 0x4d1:
484 data = elcr_ioport_read(&s->pics[addr & 1], addr);
485 break;
486 }
487 *(unsigned char *)val = data;
488 pic_unlock(s);
489 }
490
491 /*
492 * callback when PIC0 irq status changed
493 */
pic_irq_request(void * opaque,int level)494 static void pic_irq_request(void *opaque, int level)
495 {
496 struct kvm *kvm = opaque;
497 struct kvm_vcpu *vcpu = kvm->vcpus[0];
498 struct kvm_pic *s = pic_irqchip(kvm);
499 int irq = pic_get_irq(&s->pics[0]);
500
501 s->output = level;
502 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
503 s->pics[0].isr_ack &= ~(1 << irq);
504 s->wakeup_needed = true;
505 }
506 }
507
kvm_create_pic(struct kvm * kvm)508 struct kvm_pic *kvm_create_pic(struct kvm *kvm)
509 {
510 struct kvm_pic *s;
511 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
512 if (!s)
513 return NULL;
514 spin_lock_init(&s->lock);
515 s->kvm = kvm;
516 s->pics[0].elcr_mask = 0xf8;
517 s->pics[1].elcr_mask = 0xde;
518 s->irq_request = pic_irq_request;
519 s->irq_request_opaque = kvm;
520 s->pics[0].pics_state = s;
521 s->pics[1].pics_state = s;
522
523 /*
524 * Initialize PIO device
525 */
526 s->dev.read = picdev_read;
527 s->dev.write = picdev_write;
528 s->dev.in_range = picdev_in_range;
529 s->dev.private = s;
530 kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev);
531 return s;
532 }
533