• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3  *
4  * Author: Tony Li <tony.li@freescale.com>
5  *	   Jason Jin <Jason.jin@freescale.com>
6  *
7  * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2 of the
12  * License.
13  *
14  */
15 #include <linux/irq.h>
16 #include <linux/bootmem.h>
17 #include <linux/msi.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/of_platform.h>
21 #include <linux/interrupt.h>
22 #include <linux/seq_file.h>
23 #include <sysdev/fsl_soc.h>
24 #include <asm/prom.h>
25 #include <asm/hw_irq.h>
26 #include <asm/ppc-pci.h>
27 #include <asm/mpic.h>
28 #include <asm/fsl_hcalls.h>
29 
30 #include "fsl_msi.h"
31 #include "fsl_pci.h"
32 
33 #define MSIIR_OFFSET_MASK	0xfffff
34 #define MSIIR_IBS_SHIFT		0
35 #define MSIIR_SRS_SHIFT		5
36 #define MSIIR1_IBS_SHIFT	4
37 #define MSIIR1_SRS_SHIFT	0
38 #define MSI_SRS_MASK		0xf
39 #define MSI_IBS_MASK		0x1f
40 
41 #define msi_hwirq(msi, msir_index, intr_index) \
42 		((msir_index) << (msi)->srs_shift | \
43 		 ((intr_index) << (msi)->ibs_shift))
44 
45 static LIST_HEAD(msi_head);
46 
47 struct fsl_msi_feature {
48 	u32 fsl_pic_ip;
49 	u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
50 };
51 
52 struct fsl_msi_cascade_data {
53 	struct fsl_msi *msi_data;
54 	int index;
55 	int virq;
56 };
57 
fsl_msi_read(u32 __iomem * base,unsigned int reg)58 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
59 {
60 	return in_be32(base + (reg >> 2));
61 }
62 
63 /*
64  * We do not need this actually. The MSIR register has been read once
65  * in the cascade interrupt. So, this MSI interrupt has been acked
66 */
fsl_msi_end_irq(struct irq_data * d)67 static void fsl_msi_end_irq(struct irq_data *d)
68 {
69 }
70 
fsl_msi_print_chip(struct irq_data * irqd,struct seq_file * p)71 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
72 {
73 	struct fsl_msi *msi_data = irqd->domain->host_data;
74 	irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
75 	int cascade_virq, srs;
76 
77 	srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
78 	cascade_virq = msi_data->cascade_array[srs]->virq;
79 
80 	seq_printf(p, " fsl-msi-%d", cascade_virq);
81 }
82 
83 
84 static struct irq_chip fsl_msi_chip = {
85 	.irq_mask	= mask_msi_irq,
86 	.irq_unmask	= unmask_msi_irq,
87 	.irq_ack	= fsl_msi_end_irq,
88 	.irq_print_chip = fsl_msi_print_chip,
89 };
90 
fsl_msi_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)91 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
92 				irq_hw_number_t hw)
93 {
94 	struct fsl_msi *msi_data = h->host_data;
95 	struct irq_chip *chip = &fsl_msi_chip;
96 
97 	irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
98 
99 	irq_set_chip_data(virq, msi_data);
100 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
101 
102 	return 0;
103 }
104 
105 static const struct irq_domain_ops fsl_msi_host_ops = {
106 	.map = fsl_msi_host_map,
107 };
108 
fsl_msi_init_allocator(struct fsl_msi * msi_data)109 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
110 {
111 	int rc, hwirq;
112 
113 	rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
114 			      msi_data->irqhost->of_node);
115 	if (rc)
116 		return rc;
117 
118 	/*
119 	 * Reserve all the hwirqs
120 	 * The available hwirqs will be released in fsl_msi_setup_hwirq()
121 	 */
122 	for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
123 		msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
124 
125 	return 0;
126 }
127 
fsl_teardown_msi_irqs(struct pci_dev * pdev)128 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
129 {
130 	struct msi_desc *entry;
131 	struct fsl_msi *msi_data;
132 	irq_hw_number_t hwirq;
133 
134 	list_for_each_entry(entry, &pdev->msi_list, list) {
135 		if (entry->irq == NO_IRQ)
136 			continue;
137 		hwirq = virq_to_hw(entry->irq);
138 		msi_data = irq_get_chip_data(entry->irq);
139 		irq_set_msi_desc(entry->irq, NULL);
140 		irq_dispose_mapping(entry->irq);
141 		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
142 	}
143 
144 	return;
145 }
146 
fsl_compose_msi_msg(struct pci_dev * pdev,int hwirq,struct msi_msg * msg,struct fsl_msi * fsl_msi_data)147 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
148 				struct msi_msg *msg,
149 				struct fsl_msi *fsl_msi_data)
150 {
151 	struct fsl_msi *msi_data = fsl_msi_data;
152 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
153 	u64 address; /* Physical address of the MSIIR */
154 	int len;
155 	const __be64 *reg;
156 
157 	/* If the msi-address-64 property exists, then use it */
158 	reg = of_get_property(hose->dn, "msi-address-64", &len);
159 	if (reg && (len == sizeof(u64)))
160 		address = be64_to_cpup(reg);
161 	else
162 		address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
163 
164 	msg->address_lo = lower_32_bits(address);
165 	msg->address_hi = upper_32_bits(address);
166 
167 	msg->data = hwirq;
168 
169 	pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
170 		 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
171 		 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
172 }
173 
fsl_setup_msi_irqs(struct pci_dev * pdev,int nvec,int type)174 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
175 {
176 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
177 	struct device_node *np;
178 	phandle phandle = 0;
179 	int rc, hwirq = -ENOMEM;
180 	unsigned int virq;
181 	struct msi_desc *entry;
182 	struct msi_msg msg;
183 	struct fsl_msi *msi_data;
184 
185 	if (type == PCI_CAP_ID_MSIX)
186 		pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
187 
188 	/*
189 	 * If the PCI node has an fsl,msi property, then we need to use it
190 	 * to find the specific MSI.
191 	 */
192 	np = of_parse_phandle(hose->dn, "fsl,msi", 0);
193 	if (np) {
194 		if (of_device_is_compatible(np, "fsl,mpic-msi") ||
195 		    of_device_is_compatible(np, "fsl,vmpic-msi") ||
196 		    of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
197 			phandle = np->phandle;
198 		else {
199 			dev_err(&pdev->dev,
200 				"node %s has an invalid fsl,msi phandle %u\n",
201 				hose->dn->full_name, np->phandle);
202 			return -EINVAL;
203 		}
204 	}
205 
206 	list_for_each_entry(entry, &pdev->msi_list, list) {
207 		/*
208 		 * Loop over all the MSI devices until we find one that has an
209 		 * available interrupt.
210 		 */
211 		list_for_each_entry(msi_data, &msi_head, list) {
212 			/*
213 			 * If the PCI node has an fsl,msi property, then we
214 			 * restrict our search to the corresponding MSI node.
215 			 * The simplest way is to skip over MSI nodes with the
216 			 * wrong phandle. Under the Freescale hypervisor, this
217 			 * has the additional benefit of skipping over MSI
218 			 * nodes that are not mapped in the PAMU.
219 			 */
220 			if (phandle && (phandle != msi_data->phandle))
221 				continue;
222 
223 			hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
224 			if (hwirq >= 0)
225 				break;
226 		}
227 
228 		if (hwirq < 0) {
229 			rc = hwirq;
230 			dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
231 			goto out_free;
232 		}
233 
234 		virq = irq_create_mapping(msi_data->irqhost, hwirq);
235 
236 		if (virq == NO_IRQ) {
237 			dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
238 			msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
239 			rc = -ENOSPC;
240 			goto out_free;
241 		}
242 		/* chip_data is msi_data via host->hostdata in host->map() */
243 		irq_set_msi_desc(virq, entry);
244 
245 		fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
246 		write_msi_msg(virq, &msg);
247 	}
248 	return 0;
249 
250 out_free:
251 	/* free by the caller of this function */
252 	return rc;
253 }
254 
fsl_msi_cascade(int irq,void * data)255 static irqreturn_t fsl_msi_cascade(int irq, void *data)
256 {
257 	unsigned int cascade_irq;
258 	struct fsl_msi *msi_data;
259 	int msir_index = -1;
260 	u32 msir_value = 0;
261 	u32 intr_index;
262 	u32 have_shift = 0;
263 	struct fsl_msi_cascade_data *cascade_data = data;
264 	irqreturn_t ret = IRQ_NONE;
265 
266 	msi_data = cascade_data->msi_data;
267 
268 	msir_index = cascade_data->index;
269 
270 	if (msir_index >= NR_MSI_REG_MAX)
271 		cascade_irq = NO_IRQ;
272 
273 	switch (msi_data->feature & FSL_PIC_IP_MASK) {
274 	case FSL_PIC_IP_MPIC:
275 		msir_value = fsl_msi_read(msi_data->msi_regs,
276 			msir_index * 0x10);
277 		break;
278 	case FSL_PIC_IP_IPIC:
279 		msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
280 		break;
281 #ifdef CONFIG_EPAPR_PARAVIRT
282 	case FSL_PIC_IP_VMPIC: {
283 		unsigned int ret;
284 		ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
285 		if (ret) {
286 			pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
287 			       "irq %u (ret=%u)\n", irq, ret);
288 			msir_value = 0;
289 		}
290 		break;
291 	}
292 #endif
293 	}
294 
295 	while (msir_value) {
296 		intr_index = ffs(msir_value) - 1;
297 
298 		cascade_irq = irq_linear_revmap(msi_data->irqhost,
299 				msi_hwirq(msi_data, msir_index,
300 					  intr_index + have_shift));
301 		if (cascade_irq != NO_IRQ) {
302 			generic_handle_irq(cascade_irq);
303 			ret = IRQ_HANDLED;
304 		}
305 		have_shift += intr_index + 1;
306 		msir_value = msir_value >> (intr_index + 1);
307 	}
308 
309 	return ret;
310 }
311 
fsl_of_msi_remove(struct platform_device * ofdev)312 static int fsl_of_msi_remove(struct platform_device *ofdev)
313 {
314 	struct fsl_msi *msi = platform_get_drvdata(ofdev);
315 	int virq, i;
316 
317 	if (msi->list.prev != NULL)
318 		list_del(&msi->list);
319 	for (i = 0; i < NR_MSI_REG_MAX; i++) {
320 		if (msi->cascade_array[i]) {
321 			virq = msi->cascade_array[i]->virq;
322 
323 			BUG_ON(virq == NO_IRQ);
324 
325 			free_irq(virq, msi->cascade_array[i]);
326 			kfree(msi->cascade_array[i]);
327 			irq_dispose_mapping(virq);
328 		}
329 	}
330 	if (msi->bitmap.bitmap)
331 		msi_bitmap_free(&msi->bitmap);
332 	if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
333 		iounmap(msi->msi_regs);
334 	kfree(msi);
335 
336 	return 0;
337 }
338 
339 static struct lock_class_key fsl_msi_irq_class;
340 
fsl_msi_setup_hwirq(struct fsl_msi * msi,struct platform_device * dev,int offset,int irq_index)341 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
342 			       int offset, int irq_index)
343 {
344 	struct fsl_msi_cascade_data *cascade_data = NULL;
345 	int virt_msir, i, ret;
346 
347 	virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
348 	if (virt_msir == NO_IRQ) {
349 		dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
350 			__func__, irq_index);
351 		return 0;
352 	}
353 
354 	cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
355 	if (!cascade_data) {
356 		dev_err(&dev->dev, "No memory for MSI cascade data\n");
357 		return -ENOMEM;
358 	}
359 	irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
360 	cascade_data->index = offset;
361 	cascade_data->msi_data = msi;
362 	cascade_data->virq = virt_msir;
363 	msi->cascade_array[irq_index] = cascade_data;
364 
365 	ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
366 			  "fsl-msi-cascade", cascade_data);
367 	if (ret) {
368 		dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
369 			virt_msir, ret);
370 		return ret;
371 	}
372 
373 	/* Release the hwirqs corresponding to this MSI register */
374 	for (i = 0; i < IRQS_PER_MSI_REG; i++)
375 		msi_bitmap_free_hwirqs(&msi->bitmap,
376 				       msi_hwirq(msi, offset, i), 1);
377 
378 	return 0;
379 }
380 
381 static const struct of_device_id fsl_of_msi_ids[];
fsl_of_msi_probe(struct platform_device * dev)382 static int fsl_of_msi_probe(struct platform_device *dev)
383 {
384 	const struct of_device_id *match;
385 	struct fsl_msi *msi;
386 	struct resource res, msiir;
387 	int err, i, j, irq_index, count;
388 	const u32 *p;
389 	const struct fsl_msi_feature *features;
390 	int len;
391 	u32 offset;
392 
393 	match = of_match_device(fsl_of_msi_ids, &dev->dev);
394 	if (!match)
395 		return -EINVAL;
396 	features = match->data;
397 
398 	printk(KERN_DEBUG "Setting up Freescale MSI support\n");
399 
400 	msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
401 	if (!msi) {
402 		dev_err(&dev->dev, "No memory for MSI structure\n");
403 		return -ENOMEM;
404 	}
405 	platform_set_drvdata(dev, msi);
406 
407 	msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
408 				      NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
409 
410 	if (msi->irqhost == NULL) {
411 		dev_err(&dev->dev, "No memory for MSI irqhost\n");
412 		err = -ENOMEM;
413 		goto error_out;
414 	}
415 
416 	/*
417 	 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
418 	 * property.  Instead, we use hypercalls to access the MSI.
419 	 */
420 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
421 		err = of_address_to_resource(dev->dev.of_node, 0, &res);
422 		if (err) {
423 			dev_err(&dev->dev, "invalid resource for node %s\n",
424 				dev->dev.of_node->full_name);
425 			goto error_out;
426 		}
427 
428 		msi->msi_regs = ioremap(res.start, resource_size(&res));
429 		if (!msi->msi_regs) {
430 			err = -ENOMEM;
431 			dev_err(&dev->dev, "could not map node %s\n",
432 				dev->dev.of_node->full_name);
433 			goto error_out;
434 		}
435 		msi->msiir_offset =
436 			features->msiir_offset + (res.start & 0xfffff);
437 
438 		/*
439 		 * First read the MSIIR/MSIIR1 offset from dts
440 		 * On failure use the hardcode MSIIR offset
441 		 */
442 		if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
443 			msi->msiir_offset = features->msiir_offset +
444 					    (res.start & MSIIR_OFFSET_MASK);
445 		else
446 			msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
447 	}
448 
449 	msi->feature = features->fsl_pic_ip;
450 
451 	/*
452 	 * Remember the phandle, so that we can match with any PCI nodes
453 	 * that have an "fsl,msi" property.
454 	 */
455 	msi->phandle = dev->dev.of_node->phandle;
456 
457 	err = fsl_msi_init_allocator(msi);
458 	if (err) {
459 		dev_err(&dev->dev, "Error allocating MSI bitmap\n");
460 		goto error_out;
461 	}
462 
463 	p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
464 
465 	if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
466 	    of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
467 		msi->srs_shift = MSIIR1_SRS_SHIFT;
468 		msi->ibs_shift = MSIIR1_IBS_SHIFT;
469 		if (p)
470 			dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
471 				__func__);
472 
473 		for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
474 		     irq_index++) {
475 			err = fsl_msi_setup_hwirq(msi, dev,
476 						  irq_index, irq_index);
477 			if (err)
478 				goto error_out;
479 		}
480 	} else {
481 		static const u32 all_avail[] =
482 			{ 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
483 
484 		msi->srs_shift = MSIIR_SRS_SHIFT;
485 		msi->ibs_shift = MSIIR_IBS_SHIFT;
486 
487 		if (p && len % (2 * sizeof(u32)) != 0) {
488 			dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
489 				__func__);
490 			err = -EINVAL;
491 			goto error_out;
492 		}
493 
494 		if (!p) {
495 			p = all_avail;
496 			len = sizeof(all_avail);
497 		}
498 
499 		for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
500 			if (p[i * 2] % IRQS_PER_MSI_REG ||
501 			    p[i * 2 + 1] % IRQS_PER_MSI_REG) {
502 				pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
503 				       __func__, dev->dev.of_node->full_name,
504 				       p[i * 2 + 1], p[i * 2]);
505 				err = -EINVAL;
506 				goto error_out;
507 			}
508 
509 			offset = p[i * 2] / IRQS_PER_MSI_REG;
510 			count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
511 
512 			for (j = 0; j < count; j++, irq_index++) {
513 				err = fsl_msi_setup_hwirq(msi, dev, offset + j,
514 							  irq_index);
515 				if (err)
516 					goto error_out;
517 			}
518 		}
519 	}
520 
521 	list_add_tail(&msi->list, &msi_head);
522 
523 	/* The multiple setting ppc_md.setup_msi_irqs will not harm things */
524 	if (!ppc_md.setup_msi_irqs) {
525 		ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
526 		ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
527 	} else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
528 		dev_err(&dev->dev, "Different MSI driver already installed!\n");
529 		err = -ENODEV;
530 		goto error_out;
531 	}
532 	return 0;
533 error_out:
534 	fsl_of_msi_remove(dev);
535 	return err;
536 }
537 
538 static const struct fsl_msi_feature mpic_msi_feature = {
539 	.fsl_pic_ip = FSL_PIC_IP_MPIC,
540 	.msiir_offset = 0x140,
541 };
542 
543 static const struct fsl_msi_feature ipic_msi_feature = {
544 	.fsl_pic_ip = FSL_PIC_IP_IPIC,
545 	.msiir_offset = 0x38,
546 };
547 
548 static const struct fsl_msi_feature vmpic_msi_feature = {
549 	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
550 	.msiir_offset = 0,
551 };
552 
553 static const struct of_device_id fsl_of_msi_ids[] = {
554 	{
555 		.compatible = "fsl,mpic-msi",
556 		.data = &mpic_msi_feature,
557 	},
558 	{
559 		.compatible = "fsl,mpic-msi-v4.3",
560 		.data = &mpic_msi_feature,
561 	},
562 	{
563 		.compatible = "fsl,ipic-msi",
564 		.data = &ipic_msi_feature,
565 	},
566 #ifdef CONFIG_EPAPR_PARAVIRT
567 	{
568 		.compatible = "fsl,vmpic-msi",
569 		.data = &vmpic_msi_feature,
570 	},
571 	{
572 		.compatible = "fsl,vmpic-msi-v4.3",
573 		.data = &vmpic_msi_feature,
574 	},
575 #endif
576 	{}
577 };
578 
579 static struct platform_driver fsl_of_msi_driver = {
580 	.driver = {
581 		.name = "fsl-msi",
582 		.owner = THIS_MODULE,
583 		.of_match_table = fsl_of_msi_ids,
584 	},
585 	.probe = fsl_of_msi_probe,
586 	.remove = fsl_of_msi_remove,
587 };
588 
fsl_of_msi_init(void)589 static __init int fsl_of_msi_init(void)
590 {
591 	return platform_driver_register(&fsl_of_msi_driver);
592 }
593 
594 subsys_initcall(fsl_of_msi_init);
595