• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/pci.h>
6 #include <linux/irq.h>
7 #include <asm/io_apic.h>
8 #include <asm/smp.h>
9 #include <linux/intel-iommu.h>
10 #include "intr_remapping.h"
11 
12 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
13 static int ir_ioapic_num;
14 int intr_remapping_enabled;
15 
16 struct irq_2_iommu {
17 	struct intel_iommu *iommu;
18 	u16 irte_index;
19 	u16 sub_handle;
20 	u8  irte_mask;
21 };
22 
23 #ifdef CONFIG_SPARSE_IRQ
get_one_free_irq_2_iommu(int cpu)24 static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
25 {
26 	struct irq_2_iommu *iommu;
27 	int node;
28 
29 	node = cpu_to_node(cpu);
30 
31 	iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
32 	printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
33 
34 	return iommu;
35 }
36 
irq_2_iommu(unsigned int irq)37 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
38 {
39 	struct irq_desc *desc;
40 
41 	desc = irq_to_desc(irq);
42 
43 	if (WARN_ON_ONCE(!desc))
44 		return NULL;
45 
46 	return desc->irq_2_iommu;
47 }
48 
irq_2_iommu_alloc_cpu(unsigned int irq,int cpu)49 static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
50 {
51 	struct irq_desc *desc;
52 	struct irq_2_iommu *irq_iommu;
53 
54 	/*
55 	 * alloc irq desc if not allocated already.
56 	 */
57 	desc = irq_to_desc_alloc_cpu(irq, cpu);
58 	if (!desc) {
59 		printk(KERN_INFO "can not get irq_desc for %d\n", irq);
60 		return NULL;
61 	}
62 
63 	irq_iommu = desc->irq_2_iommu;
64 
65 	if (!irq_iommu)
66 		desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
67 
68 	return desc->irq_2_iommu;
69 }
70 
irq_2_iommu_alloc(unsigned int irq)71 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
72 {
73 	return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
74 }
75 
76 #else /* !CONFIG_SPARSE_IRQ */
77 
78 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
79 
irq_2_iommu(unsigned int irq)80 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
81 {
82 	if (irq < nr_irqs)
83 		return &irq_2_iommuX[irq];
84 
85 	return NULL;
86 }
irq_2_iommu_alloc(unsigned int irq)87 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
88 {
89 	return irq_2_iommu(irq);
90 }
91 #endif
92 
93 static DEFINE_SPINLOCK(irq_2_ir_lock);
94 
valid_irq_2_iommu(unsigned int irq)95 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
96 {
97 	struct irq_2_iommu *irq_iommu;
98 
99 	irq_iommu = irq_2_iommu(irq);
100 
101 	if (!irq_iommu)
102 		return NULL;
103 
104 	if (!irq_iommu->iommu)
105 		return NULL;
106 
107 	return irq_iommu;
108 }
109 
irq_remapped(int irq)110 int irq_remapped(int irq)
111 {
112 	return valid_irq_2_iommu(irq) != NULL;
113 }
114 
get_irte(int irq,struct irte * entry)115 int get_irte(int irq, struct irte *entry)
116 {
117 	int index;
118 	struct irq_2_iommu *irq_iommu;
119 
120 	if (!entry)
121 		return -1;
122 
123 	spin_lock(&irq_2_ir_lock);
124 	irq_iommu = valid_irq_2_iommu(irq);
125 	if (!irq_iommu) {
126 		spin_unlock(&irq_2_ir_lock);
127 		return -1;
128 	}
129 
130 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
131 	*entry = *(irq_iommu->iommu->ir_table->base + index);
132 
133 	spin_unlock(&irq_2_ir_lock);
134 	return 0;
135 }
136 
alloc_irte(struct intel_iommu * iommu,int irq,u16 count)137 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
138 {
139 	struct ir_table *table = iommu->ir_table;
140 	struct irq_2_iommu *irq_iommu;
141 	u16 index, start_index;
142 	unsigned int mask = 0;
143 	int i;
144 
145 	if (!count)
146 		return -1;
147 
148 #ifndef CONFIG_SPARSE_IRQ
149 	/* protect irq_2_iommu_alloc later */
150 	if (irq >= nr_irqs)
151 		return -1;
152 #endif
153 
154 	/*
155 	 * start the IRTE search from index 0.
156 	 */
157 	index = start_index = 0;
158 
159 	if (count > 1) {
160 		count = __roundup_pow_of_two(count);
161 		mask = ilog2(count);
162 	}
163 
164 	if (mask > ecap_max_handle_mask(iommu->ecap)) {
165 		printk(KERN_ERR
166 		       "Requested mask %x exceeds the max invalidation handle"
167 		       " mask value %Lx\n", mask,
168 		       ecap_max_handle_mask(iommu->ecap));
169 		return -1;
170 	}
171 
172 	spin_lock(&irq_2_ir_lock);
173 	do {
174 		for (i = index; i < index + count; i++)
175 			if  (table->base[i].present)
176 				break;
177 		/* empty index found */
178 		if (i == index + count)
179 			break;
180 
181 		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
182 
183 		if (index == start_index) {
184 			spin_unlock(&irq_2_ir_lock);
185 			printk(KERN_ERR "can't allocate an IRTE\n");
186 			return -1;
187 		}
188 	} while (1);
189 
190 	for (i = index; i < index + count; i++)
191 		table->base[i].present = 1;
192 
193 	irq_iommu = irq_2_iommu_alloc(irq);
194 	if (!irq_iommu) {
195 		spin_unlock(&irq_2_ir_lock);
196 		printk(KERN_ERR "can't allocate irq_2_iommu\n");
197 		return -1;
198 	}
199 
200 	irq_iommu->iommu = iommu;
201 	irq_iommu->irte_index =  index;
202 	irq_iommu->sub_handle = 0;
203 	irq_iommu->irte_mask = mask;
204 
205 	spin_unlock(&irq_2_ir_lock);
206 
207 	return index;
208 }
209 
qi_flush_iec(struct intel_iommu * iommu,int index,int mask)210 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
211 {
212 	struct qi_desc desc;
213 
214 	desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
215 		   | QI_IEC_SELECTIVE;
216 	desc.high = 0;
217 
218 	return qi_submit_sync(&desc, iommu);
219 }
220 
map_irq_to_irte_handle(int irq,u16 * sub_handle)221 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
222 {
223 	int index;
224 	struct irq_2_iommu *irq_iommu;
225 
226 	spin_lock(&irq_2_ir_lock);
227 	irq_iommu = valid_irq_2_iommu(irq);
228 	if (!irq_iommu) {
229 		spin_unlock(&irq_2_ir_lock);
230 		return -1;
231 	}
232 
233 	*sub_handle = irq_iommu->sub_handle;
234 	index = irq_iommu->irte_index;
235 	spin_unlock(&irq_2_ir_lock);
236 	return index;
237 }
238 
set_irte_irq(int irq,struct intel_iommu * iommu,u16 index,u16 subhandle)239 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
240 {
241 	struct irq_2_iommu *irq_iommu;
242 
243 	spin_lock(&irq_2_ir_lock);
244 
245 	irq_iommu = irq_2_iommu_alloc(irq);
246 
247 	if (!irq_iommu) {
248 		spin_unlock(&irq_2_ir_lock);
249 		printk(KERN_ERR "can't allocate irq_2_iommu\n");
250 		return -1;
251 	}
252 
253 	irq_iommu->iommu = iommu;
254 	irq_iommu->irte_index = index;
255 	irq_iommu->sub_handle = subhandle;
256 	irq_iommu->irte_mask = 0;
257 
258 	spin_unlock(&irq_2_ir_lock);
259 
260 	return 0;
261 }
262 
clear_irte_irq(int irq,struct intel_iommu * iommu,u16 index)263 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
264 {
265 	struct irq_2_iommu *irq_iommu;
266 
267 	spin_lock(&irq_2_ir_lock);
268 	irq_iommu = valid_irq_2_iommu(irq);
269 	if (!irq_iommu) {
270 		spin_unlock(&irq_2_ir_lock);
271 		return -1;
272 	}
273 
274 	irq_iommu->iommu = NULL;
275 	irq_iommu->irte_index = 0;
276 	irq_iommu->sub_handle = 0;
277 	irq_2_iommu(irq)->irte_mask = 0;
278 
279 	spin_unlock(&irq_2_ir_lock);
280 
281 	return 0;
282 }
283 
modify_irte(int irq,struct irte * irte_modified)284 int modify_irte(int irq, struct irte *irte_modified)
285 {
286 	int rc;
287 	int index;
288 	struct irte *irte;
289 	struct intel_iommu *iommu;
290 	struct irq_2_iommu *irq_iommu;
291 
292 	spin_lock(&irq_2_ir_lock);
293 	irq_iommu = valid_irq_2_iommu(irq);
294 	if (!irq_iommu) {
295 		spin_unlock(&irq_2_ir_lock);
296 		return -1;
297 	}
298 
299 	iommu = irq_iommu->iommu;
300 
301 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
302 	irte = &iommu->ir_table->base[index];
303 
304 	set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
305 	__iommu_flush_cache(iommu, irte, sizeof(*irte));
306 
307 	rc = qi_flush_iec(iommu, index, 0);
308 	spin_unlock(&irq_2_ir_lock);
309 
310 	return rc;
311 }
312 
flush_irte(int irq)313 int flush_irte(int irq)
314 {
315 	int rc;
316 	int index;
317 	struct intel_iommu *iommu;
318 	struct irq_2_iommu *irq_iommu;
319 
320 	spin_lock(&irq_2_ir_lock);
321 	irq_iommu = valid_irq_2_iommu(irq);
322 	if (!irq_iommu) {
323 		spin_unlock(&irq_2_ir_lock);
324 		return -1;
325 	}
326 
327 	iommu = irq_iommu->iommu;
328 
329 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
330 
331 	rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
332 	spin_unlock(&irq_2_ir_lock);
333 
334 	return rc;
335 }
336 
map_ioapic_to_ir(int apic)337 struct intel_iommu *map_ioapic_to_ir(int apic)
338 {
339 	int i;
340 
341 	for (i = 0; i < MAX_IO_APICS; i++)
342 		if (ir_ioapic[i].id == apic)
343 			return ir_ioapic[i].iommu;
344 	return NULL;
345 }
346 
map_dev_to_ir(struct pci_dev * dev)347 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
348 {
349 	struct dmar_drhd_unit *drhd;
350 
351 	drhd = dmar_find_matched_drhd_unit(dev);
352 	if (!drhd)
353 		return NULL;
354 
355 	return drhd->iommu;
356 }
357 
free_irte(int irq)358 int free_irte(int irq)
359 {
360 	int rc = 0;
361 	int index, i;
362 	struct irte *irte;
363 	struct intel_iommu *iommu;
364 	struct irq_2_iommu *irq_iommu;
365 
366 	spin_lock(&irq_2_ir_lock);
367 	irq_iommu = valid_irq_2_iommu(irq);
368 	if (!irq_iommu) {
369 		spin_unlock(&irq_2_ir_lock);
370 		return -1;
371 	}
372 
373 	iommu = irq_iommu->iommu;
374 
375 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
376 	irte = &iommu->ir_table->base[index];
377 
378 	if (!irq_iommu->sub_handle) {
379 		for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
380 			set_64bit((unsigned long *)irte, 0);
381 		rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
382 	}
383 
384 	irq_iommu->iommu = NULL;
385 	irq_iommu->irte_index = 0;
386 	irq_iommu->sub_handle = 0;
387 	irq_iommu->irte_mask = 0;
388 
389 	spin_unlock(&irq_2_ir_lock);
390 
391 	return rc;
392 }
393 
iommu_set_intr_remapping(struct intel_iommu * iommu,int mode)394 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
395 {
396 	u64 addr;
397 	u32 cmd, sts;
398 	unsigned long flags;
399 
400 	addr = virt_to_phys((void *)iommu->ir_table->base);
401 
402 	spin_lock_irqsave(&iommu->register_lock, flags);
403 
404 	dmar_writeq(iommu->reg + DMAR_IRTA_REG,
405 		    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
406 
407 	/* Set interrupt-remapping table pointer */
408 	cmd = iommu->gcmd | DMA_GCMD_SIRTP;
409 	writel(cmd, iommu->reg + DMAR_GCMD_REG);
410 
411 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
412 		      readl, (sts & DMA_GSTS_IRTPS), sts);
413 	spin_unlock_irqrestore(&iommu->register_lock, flags);
414 
415 	/*
416 	 * global invalidation of interrupt entry cache before enabling
417 	 * interrupt-remapping.
418 	 */
419 	qi_global_iec(iommu);
420 
421 	spin_lock_irqsave(&iommu->register_lock, flags);
422 
423 	/* Enable interrupt-remapping */
424 	cmd = iommu->gcmd | DMA_GCMD_IRE;
425 	iommu->gcmd |= DMA_GCMD_IRE;
426 	writel(cmd, iommu->reg + DMAR_GCMD_REG);
427 
428 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
429 		      readl, (sts & DMA_GSTS_IRES), sts);
430 
431 	spin_unlock_irqrestore(&iommu->register_lock, flags);
432 }
433 
434 
setup_intr_remapping(struct intel_iommu * iommu,int mode)435 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
436 {
437 	struct ir_table *ir_table;
438 	struct page *pages;
439 
440 	ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
441 					     GFP_KERNEL);
442 
443 	if (!iommu->ir_table)
444 		return -ENOMEM;
445 
446 	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
447 
448 	if (!pages) {
449 		printk(KERN_ERR "failed to allocate pages of order %d\n",
450 		       INTR_REMAP_PAGE_ORDER);
451 		kfree(iommu->ir_table);
452 		return -ENOMEM;
453 	}
454 
455 	ir_table->base = page_address(pages);
456 
457 	iommu_set_intr_remapping(iommu, mode);
458 	return 0;
459 }
460 
enable_intr_remapping(int eim)461 int __init enable_intr_remapping(int eim)
462 {
463 	struct dmar_drhd_unit *drhd;
464 	int setup = 0;
465 
466 	/*
467 	 * check for the Interrupt-remapping support
468 	 */
469 	for_each_drhd_unit(drhd) {
470 		struct intel_iommu *iommu = drhd->iommu;
471 
472 		if (!ecap_ir_support(iommu->ecap))
473 			continue;
474 
475 		if (eim && !ecap_eim_support(iommu->ecap)) {
476 			printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
477 			       " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
478 			return -1;
479 		}
480 	}
481 
482 	/*
483 	 * Enable queued invalidation for all the DRHD's.
484 	 */
485 	for_each_drhd_unit(drhd) {
486 		int ret;
487 		struct intel_iommu *iommu = drhd->iommu;
488 		ret = dmar_enable_qi(iommu);
489 
490 		if (ret) {
491 			printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
492 			       " invalidation, ecap %Lx, ret %d\n",
493 			       drhd->reg_base_addr, iommu->ecap, ret);
494 			return -1;
495 		}
496 	}
497 
498 	/*
499 	 * Setup Interrupt-remapping for all the DRHD's now.
500 	 */
501 	for_each_drhd_unit(drhd) {
502 		struct intel_iommu *iommu = drhd->iommu;
503 
504 		if (!ecap_ir_support(iommu->ecap))
505 			continue;
506 
507 		if (setup_intr_remapping(iommu, eim))
508 			goto error;
509 
510 		setup = 1;
511 	}
512 
513 	if (!setup)
514 		goto error;
515 
516 	intr_remapping_enabled = 1;
517 
518 	return 0;
519 
520 error:
521 	/*
522 	 * handle error condition gracefully here!
523 	 */
524 	return -1;
525 }
526 
ir_parse_ioapic_scope(struct acpi_dmar_header * header,struct intel_iommu * iommu)527 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
528 				 struct intel_iommu *iommu)
529 {
530 	struct acpi_dmar_hardware_unit *drhd;
531 	struct acpi_dmar_device_scope *scope;
532 	void *start, *end;
533 
534 	drhd = (struct acpi_dmar_hardware_unit *)header;
535 
536 	start = (void *)(drhd + 1);
537 	end = ((void *)drhd) + header->length;
538 
539 	while (start < end) {
540 		scope = start;
541 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
542 			if (ir_ioapic_num == MAX_IO_APICS) {
543 				printk(KERN_WARNING "Exceeded Max IO APICS\n");
544 				return -1;
545 			}
546 
547 			printk(KERN_INFO "IOAPIC id %d under DRHD base"
548 			       " 0x%Lx\n", scope->enumeration_id,
549 			       drhd->address);
550 
551 			ir_ioapic[ir_ioapic_num].iommu = iommu;
552 			ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
553 			ir_ioapic_num++;
554 		}
555 		start += scope->length;
556 	}
557 
558 	return 0;
559 }
560 
561 /*
562  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
563  * hardware unit.
564  */
parse_ioapics_under_ir(void)565 int __init parse_ioapics_under_ir(void)
566 {
567 	struct dmar_drhd_unit *drhd;
568 	int ir_supported = 0;
569 
570 	for_each_drhd_unit(drhd) {
571 		struct intel_iommu *iommu = drhd->iommu;
572 
573 		if (ecap_ir_support(iommu->ecap)) {
574 			if (ir_parse_ioapic_scope(drhd->hdr, iommu))
575 				return -1;
576 
577 			ir_supported = 1;
578 		}
579 	}
580 
581 	if (ir_supported && ir_ioapic_num != nr_ioapics) {
582 		printk(KERN_WARNING
583 		       "Not all IO-APIC's listed under remapping hardware\n");
584 		return -1;
585 	}
586 
587 	return ir_supported;
588 }
589