1 /*
2 * Copyright 2007, Michael Ellerman, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/msi.h>
16 #include <linux/export.h>
17 #include <linux/of_platform.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20
21 #include <asm/dcr.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24
25
26 /*
27 * MSIC registers, specified as offsets from dcr_base
28 */
29 #define MSIC_CTRL_REG 0x0
30
31 /* Base Address registers specify FIFO location in BE memory */
32 #define MSIC_BASE_ADDR_HI_REG 0x3
33 #define MSIC_BASE_ADDR_LO_REG 0x4
34
35 /* Hold the read/write offsets into the FIFO */
36 #define MSIC_READ_OFFSET_REG 0x5
37 #define MSIC_WRITE_OFFSET_REG 0x6
38
39
40 /* MSIC control register flags */
41 #define MSIC_CTRL_ENABLE 0x0001
42 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
43 #define MSIC_CTRL_IRQ_ENABLE 0x0008
44 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
45
46 /*
47 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
48 * Currently we're using a 64KB FIFO size.
49 */
50 #define MSIC_FIFO_SIZE_SHIFT 16
51 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
52
53 /*
54 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
55 * 8-9 of the MSIC control reg.
56 */
57 #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
58
59 /*
60 * We need to mask the read/write offsets to make sure they stay within
61 * the bounds of the FIFO. Also they should always be 16-byte aligned.
62 */
63 #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
64
65 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
66 #define MSIC_FIFO_ENTRY_SIZE 0x10
67
68
69 struct axon_msic {
70 struct irq_domain *irq_domain;
71 __le32 *fifo_virt;
72 dma_addr_t fifo_phys;
73 dcr_host_t dcr_host;
74 u32 read_offset;
75 #ifdef DEBUG
76 u32 __iomem *trigger;
77 #endif
78 };
79
80 #ifdef DEBUG
81 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
82 #else
axon_msi_debug_setup(struct device_node * dn,struct axon_msic * msic)83 static inline void axon_msi_debug_setup(struct device_node *dn,
84 struct axon_msic *msic) { }
85 #endif
86
87
msic_dcr_write(struct axon_msic * msic,unsigned int dcr_n,u32 val)88 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
89 {
90 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
91
92 dcr_write(msic->dcr_host, dcr_n, val);
93 }
94
axon_msi_cascade(unsigned int irq,struct irq_desc * desc)95 static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
96 {
97 struct irq_chip *chip = irq_desc_get_chip(desc);
98 struct axon_msic *msic = irq_get_handler_data(irq);
99 u32 write_offset, msi;
100 int idx;
101 int retry = 0;
102
103 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
104 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
105
106 /* write_offset doesn't wrap properly, so we have to mask it */
107 write_offset &= MSIC_FIFO_SIZE_MASK;
108
109 while (msic->read_offset != write_offset && retry < 100) {
110 idx = msic->read_offset / sizeof(__le32);
111 msi = le32_to_cpu(msic->fifo_virt[idx]);
112 msi &= 0xFFFF;
113
114 pr_devel("axon_msi: woff %x roff %x msi %x\n",
115 write_offset, msic->read_offset, msi);
116
117 if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
118 generic_handle_irq(msi);
119 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
120 } else {
121 /*
122 * Reading the MSIC_WRITE_OFFSET_REG does not
123 * reliably flush the outstanding DMA to the
124 * FIFO buffer. Here we were reading stale
125 * data, so we need to retry.
126 */
127 udelay(1);
128 retry++;
129 pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
130 continue;
131 }
132
133 if (retry) {
134 pr_devel("axon_msi: late irq 0x%x, retry %d\n",
135 msi, retry);
136 retry = 0;
137 }
138
139 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
140 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
141 }
142
143 if (retry) {
144 printk(KERN_WARNING "axon_msi: irq timed out\n");
145
146 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
147 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
148 }
149
150 chip->irq_eoi(&desc->irq_data);
151 }
152
find_msi_translator(struct pci_dev * dev)153 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
154 {
155 struct irq_domain *irq_domain;
156 struct device_node *dn, *tmp;
157 const phandle *ph;
158 struct axon_msic *msic = NULL;
159
160 dn = of_node_get(pci_device_to_OF_node(dev));
161 if (!dn) {
162 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
163 return NULL;
164 }
165
166 for (; dn; dn = of_get_next_parent(dn)) {
167 ph = of_get_property(dn, "msi-translator", NULL);
168 if (ph)
169 break;
170 }
171
172 if (!ph) {
173 dev_dbg(&dev->dev,
174 "axon_msi: no msi-translator property found\n");
175 goto out_error;
176 }
177
178 tmp = dn;
179 dn = of_find_node_by_phandle(*ph);
180 of_node_put(tmp);
181 if (!dn) {
182 dev_dbg(&dev->dev,
183 "axon_msi: msi-translator doesn't point to a node\n");
184 goto out_error;
185 }
186
187 irq_domain = irq_find_host(dn);
188 if (!irq_domain) {
189 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
190 dn->full_name);
191 goto out_error;
192 }
193
194 msic = irq_domain->host_data;
195
196 out_error:
197 of_node_put(dn);
198
199 return msic;
200 }
201
setup_msi_msg_address(struct pci_dev * dev,struct msi_msg * msg)202 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
203 {
204 struct device_node *dn;
205 struct msi_desc *entry;
206 int len;
207 const u32 *prop;
208
209 dn = of_node_get(pci_device_to_OF_node(dev));
210 if (!dn) {
211 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
212 return -ENODEV;
213 }
214
215 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
216
217 for (; dn; dn = of_get_next_parent(dn)) {
218 if (entry->msi_attrib.is_64) {
219 prop = of_get_property(dn, "msi-address-64", &len);
220 if (prop)
221 break;
222 }
223
224 prop = of_get_property(dn, "msi-address-32", &len);
225 if (prop)
226 break;
227 }
228
229 if (!prop) {
230 dev_dbg(&dev->dev,
231 "axon_msi: no msi-address-(32|64) properties found\n");
232 return -ENOENT;
233 }
234
235 switch (len) {
236 case 8:
237 msg->address_hi = prop[0];
238 msg->address_lo = prop[1];
239 break;
240 case 4:
241 msg->address_hi = 0;
242 msg->address_lo = prop[0];
243 break;
244 default:
245 dev_dbg(&dev->dev,
246 "axon_msi: malformed msi-address-(32|64) property\n");
247 of_node_put(dn);
248 return -EINVAL;
249 }
250
251 of_node_put(dn);
252
253 return 0;
254 }
255
axon_msi_setup_msi_irqs(struct pci_dev * dev,int nvec,int type)256 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
257 {
258 unsigned int virq, rc;
259 struct msi_desc *entry;
260 struct msi_msg msg;
261 struct axon_msic *msic;
262
263 msic = find_msi_translator(dev);
264 if (!msic)
265 return -ENODEV;
266
267 rc = setup_msi_msg_address(dev, &msg);
268 if (rc)
269 return rc;
270
271 list_for_each_entry(entry, &dev->msi_list, list) {
272 virq = irq_create_direct_mapping(msic->irq_domain);
273 if (virq == NO_IRQ) {
274 dev_warn(&dev->dev,
275 "axon_msi: virq allocation failed!\n");
276 return -1;
277 }
278 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
279
280 irq_set_msi_desc(virq, entry);
281 msg.data = virq;
282 write_msi_msg(virq, &msg);
283 }
284
285 return 0;
286 }
287
axon_msi_teardown_msi_irqs(struct pci_dev * dev)288 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
289 {
290 struct msi_desc *entry;
291
292 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
293
294 list_for_each_entry(entry, &dev->msi_list, list) {
295 if (entry->irq == NO_IRQ)
296 continue;
297
298 irq_set_msi_desc(entry->irq, NULL);
299 irq_dispose_mapping(entry->irq);
300 }
301 }
302
303 static struct irq_chip msic_irq_chip = {
304 .irq_mask = mask_msi_irq,
305 .irq_unmask = unmask_msi_irq,
306 .irq_shutdown = mask_msi_irq,
307 .name = "AXON-MSI",
308 };
309
msic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)310 static int msic_host_map(struct irq_domain *h, unsigned int virq,
311 irq_hw_number_t hw)
312 {
313 irq_set_chip_data(virq, h->host_data);
314 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
315
316 return 0;
317 }
318
319 static const struct irq_domain_ops msic_host_ops = {
320 .map = msic_host_map,
321 };
322
axon_msi_shutdown(struct platform_device * device)323 static void axon_msi_shutdown(struct platform_device *device)
324 {
325 struct axon_msic *msic = dev_get_drvdata(&device->dev);
326 u32 tmp;
327
328 pr_devel("axon_msi: disabling %s\n",
329 msic->irq_domain->of_node->full_name);
330 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
331 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
332 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
333 }
334
axon_msi_probe(struct platform_device * device)335 static int axon_msi_probe(struct platform_device *device)
336 {
337 struct device_node *dn = device->dev.of_node;
338 struct axon_msic *msic;
339 unsigned int virq;
340 int dcr_base, dcr_len;
341
342 pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
343
344 msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
345 if (!msic) {
346 printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
347 dn->full_name);
348 goto out;
349 }
350
351 dcr_base = dcr_resource_start(dn, 0);
352 dcr_len = dcr_resource_len(dn, 0);
353
354 if (dcr_base == 0 || dcr_len == 0) {
355 printk(KERN_ERR
356 "axon_msi: couldn't parse dcr properties on %s\n",
357 dn->full_name);
358 goto out_free_msic;
359 }
360
361 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
362 if (!DCR_MAP_OK(msic->dcr_host)) {
363 printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
364 dn->full_name);
365 goto out_free_msic;
366 }
367
368 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
369 &msic->fifo_phys, GFP_KERNEL);
370 if (!msic->fifo_virt) {
371 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
372 dn->full_name);
373 goto out_free_msic;
374 }
375
376 virq = irq_of_parse_and_map(dn, 0);
377 if (virq == NO_IRQ) {
378 printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
379 dn->full_name);
380 goto out_free_fifo;
381 }
382 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
383
384 /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
385 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
386 if (!msic->irq_domain) {
387 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
388 dn->full_name);
389 goto out_free_fifo;
390 }
391
392 irq_set_handler_data(virq, msic);
393 irq_set_chained_handler(virq, axon_msi_cascade);
394 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
395
396 /* Enable the MSIC hardware */
397 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
398 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
399 msic->fifo_phys & 0xFFFFFFFF);
400 msic_dcr_write(msic, MSIC_CTRL_REG,
401 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
402 MSIC_CTRL_FIFO_SIZE);
403
404 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
405 & MSIC_FIFO_SIZE_MASK;
406
407 dev_set_drvdata(&device->dev, msic);
408
409 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
410 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
411
412 axon_msi_debug_setup(dn, msic);
413
414 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
415
416 return 0;
417
418 out_free_fifo:
419 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
420 msic->fifo_phys);
421 out_free_msic:
422 kfree(msic);
423 out:
424
425 return -1;
426 }
427
428 static const struct of_device_id axon_msi_device_id[] = {
429 {
430 .compatible = "ibm,axon-msic"
431 },
432 {}
433 };
434
435 static struct platform_driver axon_msi_driver = {
436 .probe = axon_msi_probe,
437 .shutdown = axon_msi_shutdown,
438 .driver = {
439 .name = "axon-msi",
440 .owner = THIS_MODULE,
441 .of_match_table = axon_msi_device_id,
442 },
443 };
444
axon_msi_init(void)445 static int __init axon_msi_init(void)
446 {
447 return platform_driver_register(&axon_msi_driver);
448 }
449 subsys_initcall(axon_msi_init);
450
451
452 #ifdef DEBUG
msic_set(void * data,u64 val)453 static int msic_set(void *data, u64 val)
454 {
455 struct axon_msic *msic = data;
456 out_le32(msic->trigger, val);
457 return 0;
458 }
459
msic_get(void * data,u64 * val)460 static int msic_get(void *data, u64 *val)
461 {
462 *val = 0;
463 return 0;
464 }
465
466 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
467
axon_msi_debug_setup(struct device_node * dn,struct axon_msic * msic)468 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
469 {
470 char name[8];
471 u64 addr;
472
473 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
474 if (addr == OF_BAD_ADDR) {
475 pr_devel("axon_msi: couldn't translate reg property\n");
476 return;
477 }
478
479 msic->trigger = ioremap(addr, 0x4);
480 if (!msic->trigger) {
481 pr_devel("axon_msi: ioremap failed\n");
482 return;
483 }
484
485 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
486
487 if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
488 msic, &fops_msic)) {
489 pr_devel("axon_msi: debugfs_create_file failed!\n");
490 return;
491 }
492 }
493 #endif /* DEBUG */
494