1 /*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15 #include <linux/irq.h>
16 #include <linux/bootmem.h>
17 #include <linux/msi.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/of_platform.h>
21 #include <sysdev/fsl_soc.h>
22 #include <asm/prom.h>
23 #include <asm/hw_irq.h>
24 #include <asm/ppc-pci.h>
25 #include <asm/mpic.h>
26 #include <asm/fsl_hcalls.h>
27
28 #include "fsl_msi.h"
29 #include "fsl_pci.h"
30
31 static LIST_HEAD(msi_head);
32
33 struct fsl_msi_feature {
34 u32 fsl_pic_ip;
35 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
36 };
37
38 struct fsl_msi_cascade_data {
39 struct fsl_msi *msi_data;
40 int index;
41 };
42
fsl_msi_read(u32 __iomem * base,unsigned int reg)43 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
44 {
45 return in_be32(base + (reg >> 2));
46 }
47
48 /*
49 * We do not need this actually. The MSIR register has been read once
50 * in the cascade interrupt. So, this MSI interrupt has been acked
51 */
fsl_msi_end_irq(struct irq_data * d)52 static void fsl_msi_end_irq(struct irq_data *d)
53 {
54 }
55
56 static struct irq_chip fsl_msi_chip = {
57 .irq_mask = mask_msi_irq,
58 .irq_unmask = unmask_msi_irq,
59 .irq_ack = fsl_msi_end_irq,
60 .name = "FSL-MSI",
61 };
62
fsl_msi_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)63 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
64 irq_hw_number_t hw)
65 {
66 struct fsl_msi *msi_data = h->host_data;
67 struct irq_chip *chip = &fsl_msi_chip;
68
69 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
70
71 irq_set_chip_data(virq, msi_data);
72 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
73
74 return 0;
75 }
76
77 static const struct irq_domain_ops fsl_msi_host_ops = {
78 .map = fsl_msi_host_map,
79 };
80
fsl_msi_init_allocator(struct fsl_msi * msi_data)81 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
82 {
83 int rc;
84
85 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
86 msi_data->irqhost->of_node);
87 if (rc)
88 return rc;
89
90 rc = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
91 if (rc < 0) {
92 msi_bitmap_free(&msi_data->bitmap);
93 return rc;
94 }
95
96 return 0;
97 }
98
fsl_msi_check_device(struct pci_dev * pdev,int nvec,int type)99 static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
100 {
101 if (type == PCI_CAP_ID_MSIX)
102 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
103
104 return 0;
105 }
106
fsl_teardown_msi_irqs(struct pci_dev * pdev)107 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
108 {
109 struct msi_desc *entry;
110 struct fsl_msi *msi_data;
111
112 list_for_each_entry(entry, &pdev->msi_list, list) {
113 if (entry->irq == NO_IRQ)
114 continue;
115 msi_data = irq_get_chip_data(entry->irq);
116 irq_set_msi_desc(entry->irq, NULL);
117 msi_bitmap_free_hwirqs(&msi_data->bitmap,
118 virq_to_hw(entry->irq), 1);
119 irq_dispose_mapping(entry->irq);
120 }
121
122 return;
123 }
124
fsl_compose_msi_msg(struct pci_dev * pdev,int hwirq,struct msi_msg * msg,struct fsl_msi * fsl_msi_data)125 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
126 struct msi_msg *msg,
127 struct fsl_msi *fsl_msi_data)
128 {
129 struct fsl_msi *msi_data = fsl_msi_data;
130 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
131 u64 address; /* Physical address of the MSIIR */
132 int len;
133 const __be64 *reg;
134
135 /* If the msi-address-64 property exists, then use it */
136 reg = of_get_property(hose->dn, "msi-address-64", &len);
137 if (reg && (len == sizeof(u64)))
138 address = be64_to_cpup(reg);
139 else
140 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
141
142 msg->address_lo = lower_32_bits(address);
143 msg->address_hi = upper_32_bits(address);
144
145 msg->data = hwirq;
146
147 pr_debug("%s: allocated srs: %d, ibs: %d\n",
148 __func__, hwirq / IRQS_PER_MSI_REG, hwirq % IRQS_PER_MSI_REG);
149 }
150
fsl_setup_msi_irqs(struct pci_dev * pdev,int nvec,int type)151 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
152 {
153 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
154 struct device_node *np;
155 phandle phandle = 0;
156 int rc, hwirq = -ENOMEM;
157 unsigned int virq;
158 struct msi_desc *entry;
159 struct msi_msg msg;
160 struct fsl_msi *msi_data;
161
162 /*
163 * If the PCI node has an fsl,msi property, then we need to use it
164 * to find the specific MSI.
165 */
166 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
167 if (np) {
168 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
169 of_device_is_compatible(np, "fsl,vmpic-msi"))
170 phandle = np->phandle;
171 else {
172 dev_err(&pdev->dev,
173 "node %s has an invalid fsl,msi phandle %u\n",
174 hose->dn->full_name, np->phandle);
175 return -EINVAL;
176 }
177 }
178
179 list_for_each_entry(entry, &pdev->msi_list, list) {
180 /*
181 * Loop over all the MSI devices until we find one that has an
182 * available interrupt.
183 */
184 list_for_each_entry(msi_data, &msi_head, list) {
185 /*
186 * If the PCI node has an fsl,msi property, then we
187 * restrict our search to the corresponding MSI node.
188 * The simplest way is to skip over MSI nodes with the
189 * wrong phandle. Under the Freescale hypervisor, this
190 * has the additional benefit of skipping over MSI
191 * nodes that are not mapped in the PAMU.
192 */
193 if (phandle && (phandle != msi_data->phandle))
194 continue;
195
196 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
197 if (hwirq >= 0)
198 break;
199 }
200
201 if (hwirq < 0) {
202 rc = hwirq;
203 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
204 goto out_free;
205 }
206
207 virq = irq_create_mapping(msi_data->irqhost, hwirq);
208
209 if (virq == NO_IRQ) {
210 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
211 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
212 rc = -ENOSPC;
213 goto out_free;
214 }
215 /* chip_data is msi_data via host->hostdata in host->map() */
216 irq_set_msi_desc(virq, entry);
217
218 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
219 write_msi_msg(virq, &msg);
220 }
221 return 0;
222
223 out_free:
224 /* free by the caller of this function */
225 return rc;
226 }
227
fsl_msi_cascade(unsigned int irq,struct irq_desc * desc)228 static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
229 {
230 struct irq_chip *chip = irq_desc_get_chip(desc);
231 struct irq_data *idata = irq_desc_get_irq_data(desc);
232 unsigned int cascade_irq;
233 struct fsl_msi *msi_data;
234 int msir_index = -1;
235 u32 msir_value = 0;
236 u32 intr_index;
237 u32 have_shift = 0;
238 struct fsl_msi_cascade_data *cascade_data;
239
240 cascade_data = irq_get_handler_data(irq);
241 msi_data = cascade_data->msi_data;
242
243 raw_spin_lock(&desc->lock);
244 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
245 if (chip->irq_mask_ack)
246 chip->irq_mask_ack(idata);
247 else {
248 chip->irq_mask(idata);
249 chip->irq_ack(idata);
250 }
251 }
252
253 if (unlikely(irqd_irq_inprogress(idata)))
254 goto unlock;
255
256 msir_index = cascade_data->index;
257
258 if (msir_index >= NR_MSI_REG)
259 cascade_irq = NO_IRQ;
260
261 irqd_set_chained_irq_inprogress(idata);
262 switch (msi_data->feature & FSL_PIC_IP_MASK) {
263 case FSL_PIC_IP_MPIC:
264 msir_value = fsl_msi_read(msi_data->msi_regs,
265 msir_index * 0x10);
266 break;
267 case FSL_PIC_IP_IPIC:
268 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
269 break;
270 #ifdef CONFIG_EPAPR_PARAVIRT
271 case FSL_PIC_IP_VMPIC: {
272 unsigned int ret;
273 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
274 if (ret) {
275 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
276 "irq %u (ret=%u)\n", irq, ret);
277 msir_value = 0;
278 }
279 break;
280 }
281 #endif
282 }
283
284 while (msir_value) {
285 intr_index = ffs(msir_value) - 1;
286
287 cascade_irq = irq_linear_revmap(msi_data->irqhost,
288 msir_index * IRQS_PER_MSI_REG +
289 intr_index + have_shift);
290 if (cascade_irq != NO_IRQ)
291 generic_handle_irq(cascade_irq);
292 have_shift += intr_index + 1;
293 msir_value = msir_value >> (intr_index + 1);
294 }
295 irqd_clr_chained_irq_inprogress(idata);
296
297 switch (msi_data->feature & FSL_PIC_IP_MASK) {
298 case FSL_PIC_IP_MPIC:
299 case FSL_PIC_IP_VMPIC:
300 chip->irq_eoi(idata);
301 break;
302 case FSL_PIC_IP_IPIC:
303 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
304 chip->irq_unmask(idata);
305 break;
306 }
307 unlock:
308 raw_spin_unlock(&desc->lock);
309 }
310
fsl_of_msi_remove(struct platform_device * ofdev)311 static int fsl_of_msi_remove(struct platform_device *ofdev)
312 {
313 struct fsl_msi *msi = platform_get_drvdata(ofdev);
314 int virq, i;
315 struct fsl_msi_cascade_data *cascade_data;
316
317 if (msi->list.prev != NULL)
318 list_del(&msi->list);
319 for (i = 0; i < NR_MSI_REG; i++) {
320 virq = msi->msi_virqs[i];
321 if (virq != NO_IRQ) {
322 cascade_data = irq_get_handler_data(virq);
323 kfree(cascade_data);
324 irq_dispose_mapping(virq);
325 }
326 }
327 if (msi->bitmap.bitmap)
328 msi_bitmap_free(&msi->bitmap);
329 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
330 iounmap(msi->msi_regs);
331 kfree(msi);
332
333 return 0;
334 }
335
336 static struct lock_class_key fsl_msi_irq_class;
337
fsl_msi_setup_hwirq(struct fsl_msi * msi,struct platform_device * dev,int offset,int irq_index)338 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
339 int offset, int irq_index)
340 {
341 struct fsl_msi_cascade_data *cascade_data = NULL;
342 int virt_msir;
343
344 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
345 if (virt_msir == NO_IRQ) {
346 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
347 __func__, irq_index);
348 return 0;
349 }
350
351 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
352 if (!cascade_data) {
353 dev_err(&dev->dev, "No memory for MSI cascade data\n");
354 return -ENOMEM;
355 }
356 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
357 msi->msi_virqs[irq_index] = virt_msir;
358 cascade_data->index = offset;
359 cascade_data->msi_data = msi;
360 irq_set_handler_data(virt_msir, cascade_data);
361 irq_set_chained_handler(virt_msir, fsl_msi_cascade);
362
363 return 0;
364 }
365
366 static const struct of_device_id fsl_of_msi_ids[];
fsl_of_msi_probe(struct platform_device * dev)367 static int fsl_of_msi_probe(struct platform_device *dev)
368 {
369 const struct of_device_id *match;
370 struct fsl_msi *msi;
371 struct resource res;
372 int err, i, j, irq_index, count;
373 int rc;
374 const u32 *p;
375 const struct fsl_msi_feature *features;
376 int len;
377 u32 offset;
378 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
379
380 match = of_match_device(fsl_of_msi_ids, &dev->dev);
381 if (!match)
382 return -EINVAL;
383 features = match->data;
384
385 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
386
387 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
388 if (!msi) {
389 dev_err(&dev->dev, "No memory for MSI structure\n");
390 return -ENOMEM;
391 }
392 platform_set_drvdata(dev, msi);
393
394 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
395 NR_MSI_IRQS, &fsl_msi_host_ops, msi);
396
397 if (msi->irqhost == NULL) {
398 dev_err(&dev->dev, "No memory for MSI irqhost\n");
399 err = -ENOMEM;
400 goto error_out;
401 }
402
403 /*
404 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
405 * property. Instead, we use hypercalls to access the MSI.
406 */
407 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
408 err = of_address_to_resource(dev->dev.of_node, 0, &res);
409 if (err) {
410 dev_err(&dev->dev, "invalid resource for node %s\n",
411 dev->dev.of_node->full_name);
412 goto error_out;
413 }
414
415 msi->msi_regs = ioremap(res.start, resource_size(&res));
416 if (!msi->msi_regs) {
417 err = -ENOMEM;
418 dev_err(&dev->dev, "could not map node %s\n",
419 dev->dev.of_node->full_name);
420 goto error_out;
421 }
422 msi->msiir_offset =
423 features->msiir_offset + (res.start & 0xfffff);
424 }
425
426 msi->feature = features->fsl_pic_ip;
427
428 /*
429 * Remember the phandle, so that we can match with any PCI nodes
430 * that have an "fsl,msi" property.
431 */
432 msi->phandle = dev->dev.of_node->phandle;
433
434 rc = fsl_msi_init_allocator(msi);
435 if (rc) {
436 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
437 goto error_out;
438 }
439
440 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
441 if (p && len % (2 * sizeof(u32)) != 0) {
442 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
443 __func__);
444 err = -EINVAL;
445 goto error_out;
446 }
447
448 if (!p) {
449 p = all_avail;
450 len = sizeof(all_avail);
451 }
452
453 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
454 if (p[i * 2] % IRQS_PER_MSI_REG ||
455 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
456 printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
457 __func__, dev->dev.of_node->full_name,
458 p[i * 2 + 1], p[i * 2]);
459 err = -EINVAL;
460 goto error_out;
461 }
462
463 offset = p[i * 2] / IRQS_PER_MSI_REG;
464 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
465
466 for (j = 0; j < count; j++, irq_index++) {
467 err = fsl_msi_setup_hwirq(msi, dev, offset + j, irq_index);
468 if (err)
469 goto error_out;
470 }
471 }
472
473 list_add_tail(&msi->list, &msi_head);
474
475 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */
476 if (!ppc_md.setup_msi_irqs) {
477 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
478 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
479 ppc_md.msi_check_device = fsl_msi_check_device;
480 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
481 dev_err(&dev->dev, "Different MSI driver already installed!\n");
482 err = -ENODEV;
483 goto error_out;
484 }
485 return 0;
486 error_out:
487 fsl_of_msi_remove(dev);
488 return err;
489 }
490
491 static const struct fsl_msi_feature mpic_msi_feature = {
492 .fsl_pic_ip = FSL_PIC_IP_MPIC,
493 .msiir_offset = 0x140,
494 };
495
496 static const struct fsl_msi_feature ipic_msi_feature = {
497 .fsl_pic_ip = FSL_PIC_IP_IPIC,
498 .msiir_offset = 0x38,
499 };
500
501 static const struct fsl_msi_feature vmpic_msi_feature = {
502 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
503 .msiir_offset = 0,
504 };
505
506 static const struct of_device_id fsl_of_msi_ids[] = {
507 {
508 .compatible = "fsl,mpic-msi",
509 .data = &mpic_msi_feature,
510 },
511 {
512 .compatible = "fsl,ipic-msi",
513 .data = &ipic_msi_feature,
514 },
515 #ifdef CONFIG_EPAPR_PARAVIRT
516 {
517 .compatible = "fsl,vmpic-msi",
518 .data = &vmpic_msi_feature,
519 },
520 #endif
521 {}
522 };
523
524 static struct platform_driver fsl_of_msi_driver = {
525 .driver = {
526 .name = "fsl-msi",
527 .owner = THIS_MODULE,
528 .of_match_table = fsl_of_msi_ids,
529 },
530 .probe = fsl_of_msi_probe,
531 .remove = fsl_of_msi_remove,
532 };
533
fsl_of_msi_init(void)534 static __init int fsl_of_msi_init(void)
535 {
536 return platform_driver_register(&fsl_of_msi_driver);
537 }
538
539 subsys_initcall(fsl_of_msi_init);
540