1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2007, Michael Ellerman, IBM Corporation.
4 */
5
6
7 #include <linux/interrupt.h>
8 #include <linux/irq.h>
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/msi.h>
12 #include <linux/export.h>
13 #include <linux/of_platform.h>
14 #include <linux/slab.h>
15
16 #include <asm/debugfs.h>
17 #include <asm/dcr.h>
18 #include <asm/machdep.h>
19 #include <asm/prom.h>
20
21 #include "cell.h"
22
23 /*
24 * MSIC registers, specified as offsets from dcr_base
25 */
26 #define MSIC_CTRL_REG 0x0
27
28 /* Base Address registers specify FIFO location in BE memory */
29 #define MSIC_BASE_ADDR_HI_REG 0x3
30 #define MSIC_BASE_ADDR_LO_REG 0x4
31
32 /* Hold the read/write offsets into the FIFO */
33 #define MSIC_READ_OFFSET_REG 0x5
34 #define MSIC_WRITE_OFFSET_REG 0x6
35
36
37 /* MSIC control register flags */
38 #define MSIC_CTRL_ENABLE 0x0001
39 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
40 #define MSIC_CTRL_IRQ_ENABLE 0x0008
41 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
42
43 /*
44 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
45 * Currently we're using a 64KB FIFO size.
46 */
47 #define MSIC_FIFO_SIZE_SHIFT 16
48 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
49
50 /*
51 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
52 * 8-9 of the MSIC control reg.
53 */
54 #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
55
56 /*
57 * We need to mask the read/write offsets to make sure they stay within
58 * the bounds of the FIFO. Also they should always be 16-byte aligned.
59 */
60 #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
61
62 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
63 #define MSIC_FIFO_ENTRY_SIZE 0x10
64
65
66 struct axon_msic {
67 struct irq_domain *irq_domain;
68 __le32 *fifo_virt;
69 dma_addr_t fifo_phys;
70 dcr_host_t dcr_host;
71 u32 read_offset;
72 #ifdef DEBUG
73 u32 __iomem *trigger;
74 #endif
75 };
76
77 #ifdef DEBUG
78 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
79 #else
axon_msi_debug_setup(struct device_node * dn,struct axon_msic * msic)80 static inline void axon_msi_debug_setup(struct device_node *dn,
81 struct axon_msic *msic) { }
82 #endif
83
84
msic_dcr_write(struct axon_msic * msic,unsigned int dcr_n,u32 val)85 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
86 {
87 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
88
89 dcr_write(msic->dcr_host, dcr_n, val);
90 }
91
axon_msi_cascade(struct irq_desc * desc)92 static void axon_msi_cascade(struct irq_desc *desc)
93 {
94 struct irq_chip *chip = irq_desc_get_chip(desc);
95 struct axon_msic *msic = irq_desc_get_handler_data(desc);
96 u32 write_offset, msi;
97 int idx;
98 int retry = 0;
99
100 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
101 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
102
103 /* write_offset doesn't wrap properly, so we have to mask it */
104 write_offset &= MSIC_FIFO_SIZE_MASK;
105
106 while (msic->read_offset != write_offset && retry < 100) {
107 idx = msic->read_offset / sizeof(__le32);
108 msi = le32_to_cpu(msic->fifo_virt[idx]);
109 msi &= 0xFFFF;
110
111 pr_devel("axon_msi: woff %x roff %x msi %x\n",
112 write_offset, msic->read_offset, msi);
113
114 if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
115 generic_handle_irq(msi);
116 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
117 } else {
118 /*
119 * Reading the MSIC_WRITE_OFFSET_REG does not
120 * reliably flush the outstanding DMA to the
121 * FIFO buffer. Here we were reading stale
122 * data, so we need to retry.
123 */
124 udelay(1);
125 retry++;
126 pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
127 continue;
128 }
129
130 if (retry) {
131 pr_devel("axon_msi: late irq 0x%x, retry %d\n",
132 msi, retry);
133 retry = 0;
134 }
135
136 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
137 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
138 }
139
140 if (retry) {
141 printk(KERN_WARNING "axon_msi: irq timed out\n");
142
143 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
144 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
145 }
146
147 chip->irq_eoi(&desc->irq_data);
148 }
149
find_msi_translator(struct pci_dev * dev)150 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
151 {
152 struct irq_domain *irq_domain;
153 struct device_node *dn, *tmp;
154 const phandle *ph;
155 struct axon_msic *msic = NULL;
156
157 dn = of_node_get(pci_device_to_OF_node(dev));
158 if (!dn) {
159 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
160 return NULL;
161 }
162
163 for (; dn; dn = of_get_next_parent(dn)) {
164 ph = of_get_property(dn, "msi-translator", NULL);
165 if (ph)
166 break;
167 }
168
169 if (!ph) {
170 dev_dbg(&dev->dev,
171 "axon_msi: no msi-translator property found\n");
172 goto out_error;
173 }
174
175 tmp = dn;
176 dn = of_find_node_by_phandle(*ph);
177 of_node_put(tmp);
178 if (!dn) {
179 dev_dbg(&dev->dev,
180 "axon_msi: msi-translator doesn't point to a node\n");
181 goto out_error;
182 }
183
184 irq_domain = irq_find_host(dn);
185 if (!irq_domain) {
186 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
187 dn);
188 goto out_error;
189 }
190
191 msic = irq_domain->host_data;
192
193 out_error:
194 of_node_put(dn);
195
196 return msic;
197 }
198
setup_msi_msg_address(struct pci_dev * dev,struct msi_msg * msg)199 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
200 {
201 struct device_node *dn;
202 struct msi_desc *entry;
203 int len;
204 const u32 *prop;
205
206 dn = of_node_get(pci_device_to_OF_node(dev));
207 if (!dn) {
208 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
209 return -ENODEV;
210 }
211
212 entry = first_pci_msi_entry(dev);
213
214 for (; dn; dn = of_get_next_parent(dn)) {
215 if (entry->msi_attrib.is_64) {
216 prop = of_get_property(dn, "msi-address-64", &len);
217 if (prop)
218 break;
219 }
220
221 prop = of_get_property(dn, "msi-address-32", &len);
222 if (prop)
223 break;
224 }
225
226 if (!prop) {
227 dev_dbg(&dev->dev,
228 "axon_msi: no msi-address-(32|64) properties found\n");
229 of_node_put(dn);
230 return -ENOENT;
231 }
232
233 switch (len) {
234 case 8:
235 msg->address_hi = prop[0];
236 msg->address_lo = prop[1];
237 break;
238 case 4:
239 msg->address_hi = 0;
240 msg->address_lo = prop[0];
241 break;
242 default:
243 dev_dbg(&dev->dev,
244 "axon_msi: malformed msi-address-(32|64) property\n");
245 of_node_put(dn);
246 return -EINVAL;
247 }
248
249 of_node_put(dn);
250
251 return 0;
252 }
253
axon_msi_setup_msi_irqs(struct pci_dev * dev,int nvec,int type)254 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
255 {
256 unsigned int virq, rc;
257 struct msi_desc *entry;
258 struct msi_msg msg;
259 struct axon_msic *msic;
260
261 msic = find_msi_translator(dev);
262 if (!msic)
263 return -ENODEV;
264
265 rc = setup_msi_msg_address(dev, &msg);
266 if (rc)
267 return rc;
268
269 for_each_pci_msi_entry(entry, dev) {
270 virq = irq_create_direct_mapping(msic->irq_domain);
271 if (!virq) {
272 dev_warn(&dev->dev,
273 "axon_msi: virq allocation failed!\n");
274 return -1;
275 }
276 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
277
278 irq_set_msi_desc(virq, entry);
279 msg.data = virq;
280 pci_write_msi_msg(virq, &msg);
281 }
282
283 return 0;
284 }
285
axon_msi_teardown_msi_irqs(struct pci_dev * dev)286 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
287 {
288 struct msi_desc *entry;
289
290 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
291
292 for_each_pci_msi_entry(entry, dev) {
293 if (!entry->irq)
294 continue;
295
296 irq_set_msi_desc(entry->irq, NULL);
297 irq_dispose_mapping(entry->irq);
298 }
299 }
300
301 static struct irq_chip msic_irq_chip = {
302 .irq_mask = pci_msi_mask_irq,
303 .irq_unmask = pci_msi_unmask_irq,
304 .irq_shutdown = pci_msi_mask_irq,
305 .name = "AXON-MSI",
306 };
307
msic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)308 static int msic_host_map(struct irq_domain *h, unsigned int virq,
309 irq_hw_number_t hw)
310 {
311 irq_set_chip_data(virq, h->host_data);
312 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
313
314 return 0;
315 }
316
317 static const struct irq_domain_ops msic_host_ops = {
318 .map = msic_host_map,
319 };
320
axon_msi_shutdown(struct platform_device * device)321 static void axon_msi_shutdown(struct platform_device *device)
322 {
323 struct axon_msic *msic = dev_get_drvdata(&device->dev);
324 u32 tmp;
325
326 pr_devel("axon_msi: disabling %pOF\n",
327 irq_domain_get_of_node(msic->irq_domain));
328 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
329 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
330 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
331 }
332
axon_msi_probe(struct platform_device * device)333 static int axon_msi_probe(struct platform_device *device)
334 {
335 struct device_node *dn = device->dev.of_node;
336 struct axon_msic *msic;
337 unsigned int virq;
338 int dcr_base, dcr_len;
339
340 pr_devel("axon_msi: setting up dn %pOF\n", dn);
341
342 msic = kzalloc(sizeof(*msic), GFP_KERNEL);
343 if (!msic) {
344 printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
345 dn);
346 goto out;
347 }
348
349 dcr_base = dcr_resource_start(dn, 0);
350 dcr_len = dcr_resource_len(dn, 0);
351
352 if (dcr_base == 0 || dcr_len == 0) {
353 printk(KERN_ERR
354 "axon_msi: couldn't parse dcr properties on %pOF\n",
355 dn);
356 goto out_free_msic;
357 }
358
359 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
360 if (!DCR_MAP_OK(msic->dcr_host)) {
361 printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
362 dn);
363 goto out_free_msic;
364 }
365
366 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
367 &msic->fifo_phys, GFP_KERNEL);
368 if (!msic->fifo_virt) {
369 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
370 dn);
371 goto out_free_msic;
372 }
373
374 virq = irq_of_parse_and_map(dn, 0);
375 if (!virq) {
376 printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
377 dn);
378 goto out_free_fifo;
379 }
380 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
381
382 /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
383 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
384 if (!msic->irq_domain) {
385 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
386 dn);
387 goto out_free_fifo;
388 }
389
390 irq_set_handler_data(virq, msic);
391 irq_set_chained_handler(virq, axon_msi_cascade);
392 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
393
394 /* Enable the MSIC hardware */
395 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
396 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
397 msic->fifo_phys & 0xFFFFFFFF);
398 msic_dcr_write(msic, MSIC_CTRL_REG,
399 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
400 MSIC_CTRL_FIFO_SIZE);
401
402 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
403 & MSIC_FIFO_SIZE_MASK;
404
405 dev_set_drvdata(&device->dev, msic);
406
407 cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
408 cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
409
410 axon_msi_debug_setup(dn, msic);
411
412 printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
413
414 return 0;
415
416 out_free_fifo:
417 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
418 msic->fifo_phys);
419 out_free_msic:
420 kfree(msic);
421 out:
422
423 return -1;
424 }
425
426 static const struct of_device_id axon_msi_device_id[] = {
427 {
428 .compatible = "ibm,axon-msic"
429 },
430 {}
431 };
432
433 static struct platform_driver axon_msi_driver = {
434 .probe = axon_msi_probe,
435 .shutdown = axon_msi_shutdown,
436 .driver = {
437 .name = "axon-msi",
438 .of_match_table = axon_msi_device_id,
439 },
440 };
441
axon_msi_init(void)442 static int __init axon_msi_init(void)
443 {
444 return platform_driver_register(&axon_msi_driver);
445 }
446 subsys_initcall(axon_msi_init);
447
448
449 #ifdef DEBUG
msic_set(void * data,u64 val)450 static int msic_set(void *data, u64 val)
451 {
452 struct axon_msic *msic = data;
453 out_le32(msic->trigger, val);
454 return 0;
455 }
456
msic_get(void * data,u64 * val)457 static int msic_get(void *data, u64 *val)
458 {
459 *val = 0;
460 return 0;
461 }
462
463 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
464
axon_msi_debug_setup(struct device_node * dn,struct axon_msic * msic)465 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
466 {
467 char name[8];
468 u64 addr;
469
470 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
471 if (addr == OF_BAD_ADDR) {
472 pr_devel("axon_msi: couldn't translate reg property\n");
473 return;
474 }
475
476 msic->trigger = ioremap(addr, 0x4);
477 if (!msic->trigger) {
478 pr_devel("axon_msi: ioremap failed\n");
479 return;
480 }
481
482 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
483
484 debugfs_create_file(name, 0600, powerpc_debugfs_root, msic, &fops_msic);
485 }
486 #endif /* DEBUG */
487