1 // SPDX-License-Identifier: GPL-2.0
2
3 #define pr_fmt(fmt) "mvebu-sei: " fmt
4
5 #include <linux/interrupt.h>
6 #include <linux/irq.h>
7 #include <linux/irqchip.h>
8 #include <linux/irqchip/chained_irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kernel.h>
11 #include <linux/msi.h>
12 #include <linux/platform_device.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
15 #include <linux/of_platform.h>
16
17 /* Cause register */
18 #define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
19 /* Mask register */
20 #define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
21 #define GICP_SET_SEI_OFFSET 0x30
22
23 #define SEI_IRQ_COUNT_PER_REG 32
24 #define SEI_IRQ_REG_COUNT 2
25 #define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
26 #define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
27 #define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
28
29 struct mvebu_sei_interrupt_range {
30 u32 first;
31 u32 size;
32 };
33
34 struct mvebu_sei_caps {
35 struct mvebu_sei_interrupt_range ap_range;
36 struct mvebu_sei_interrupt_range cp_range;
37 };
38
39 struct mvebu_sei {
40 struct device *dev;
41 void __iomem *base;
42 struct resource *res;
43 struct irq_domain *sei_domain;
44 struct irq_domain *ap_domain;
45 struct irq_domain *cp_domain;
46 const struct mvebu_sei_caps *caps;
47
48 /* Lock on MSI allocations/releases */
49 struct mutex cp_msi_lock;
50 DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
51
52 /* Lock on IRQ masking register */
53 raw_spinlock_t mask_lock;
54 };
55
mvebu_sei_ack_irq(struct irq_data * d)56 static void mvebu_sei_ack_irq(struct irq_data *d)
57 {
58 struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
59 u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
60
61 writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
62 sei->base + GICP_SECR(reg_idx));
63 }
64
mvebu_sei_mask_irq(struct irq_data * d)65 static void mvebu_sei_mask_irq(struct irq_data *d)
66 {
67 struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
68 u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
69 unsigned long flags;
70
71 /* 1 disables the interrupt */
72 raw_spin_lock_irqsave(&sei->mask_lock, flags);
73 reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
74 reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
75 writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
76 raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
77 }
78
mvebu_sei_unmask_irq(struct irq_data * d)79 static void mvebu_sei_unmask_irq(struct irq_data *d)
80 {
81 struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
82 u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
83 unsigned long flags;
84
85 /* 0 enables the interrupt */
86 raw_spin_lock_irqsave(&sei->mask_lock, flags);
87 reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
88 reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
89 writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
90 raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
91 }
92
mvebu_sei_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)93 static int mvebu_sei_set_affinity(struct irq_data *d,
94 const struct cpumask *mask_val,
95 bool force)
96 {
97 return -EINVAL;
98 }
99
mvebu_sei_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)100 static int mvebu_sei_set_irqchip_state(struct irq_data *d,
101 enum irqchip_irq_state which,
102 bool state)
103 {
104 /* We can only clear the pending state by acking the interrupt */
105 if (which != IRQCHIP_STATE_PENDING || state)
106 return -EINVAL;
107
108 mvebu_sei_ack_irq(d);
109 return 0;
110 }
111
112 static struct irq_chip mvebu_sei_irq_chip = {
113 .name = "SEI",
114 .irq_ack = mvebu_sei_ack_irq,
115 .irq_mask = mvebu_sei_mask_irq,
116 .irq_unmask = mvebu_sei_unmask_irq,
117 .irq_set_affinity = mvebu_sei_set_affinity,
118 .irq_set_irqchip_state = mvebu_sei_set_irqchip_state,
119 };
120
mvebu_sei_ap_set_type(struct irq_data * data,unsigned int type)121 static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
122 {
123 if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
124 return -EINVAL;
125
126 return 0;
127 }
128
129 static struct irq_chip mvebu_sei_ap_irq_chip = {
130 .name = "AP SEI",
131 .irq_ack = irq_chip_ack_parent,
132 .irq_mask = irq_chip_mask_parent,
133 .irq_unmask = irq_chip_unmask_parent,
134 .irq_set_affinity = irq_chip_set_affinity_parent,
135 .irq_set_type = mvebu_sei_ap_set_type,
136 };
137
mvebu_sei_cp_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)138 static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
139 struct msi_msg *msg)
140 {
141 struct mvebu_sei *sei = data->chip_data;
142 phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
143
144 msg->data = data->hwirq + sei->caps->cp_range.first;
145 msg->address_lo = lower_32_bits(set);
146 msg->address_hi = upper_32_bits(set);
147 }
148
mvebu_sei_cp_set_type(struct irq_data * data,unsigned int type)149 static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
150 {
151 if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
152 return -EINVAL;
153
154 return 0;
155 }
156
157 static struct irq_chip mvebu_sei_cp_irq_chip = {
158 .name = "CP SEI",
159 .irq_ack = irq_chip_ack_parent,
160 .irq_mask = irq_chip_mask_parent,
161 .irq_unmask = irq_chip_unmask_parent,
162 .irq_set_affinity = irq_chip_set_affinity_parent,
163 .irq_set_type = mvebu_sei_cp_set_type,
164 .irq_compose_msi_msg = mvebu_sei_cp_compose_msi_msg,
165 };
166
mvebu_sei_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)167 static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
168 unsigned int nr_irqs, void *arg)
169 {
170 struct mvebu_sei *sei = domain->host_data;
171 struct irq_fwspec *fwspec = arg;
172
173 /* Not much to do, just setup the irqdata */
174 irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
175 &mvebu_sei_irq_chip, sei);
176
177 return 0;
178 }
179
mvebu_sei_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)180 static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
181 unsigned int nr_irqs)
182 {
183 int i;
184
185 for (i = 0; i < nr_irqs; i++) {
186 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
187 irq_set_handler(virq + i, NULL);
188 irq_domain_reset_irq_data(d);
189 }
190 }
191
192 static const struct irq_domain_ops mvebu_sei_domain_ops = {
193 .alloc = mvebu_sei_domain_alloc,
194 .free = mvebu_sei_domain_free,
195 };
196
mvebu_sei_ap_translate(struct irq_domain * domain,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)197 static int mvebu_sei_ap_translate(struct irq_domain *domain,
198 struct irq_fwspec *fwspec,
199 unsigned long *hwirq,
200 unsigned int *type)
201 {
202 *hwirq = fwspec->param[0];
203 *type = IRQ_TYPE_LEVEL_HIGH;
204
205 return 0;
206 }
207
mvebu_sei_ap_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)208 static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
209 unsigned int nr_irqs, void *arg)
210 {
211 struct mvebu_sei *sei = domain->host_data;
212 struct irq_fwspec fwspec;
213 unsigned long hwirq;
214 unsigned int type;
215 int err;
216
217 mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
218
219 fwspec.fwnode = domain->parent->fwnode;
220 fwspec.param_count = 1;
221 fwspec.param[0] = hwirq + sei->caps->ap_range.first;
222
223 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
224 if (err)
225 return err;
226
227 irq_domain_set_info(domain, virq, hwirq,
228 &mvebu_sei_ap_irq_chip, sei,
229 handle_level_irq, NULL, NULL);
230 irq_set_probe(virq);
231
232 return 0;
233 }
234
235 static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
236 .translate = mvebu_sei_ap_translate,
237 .alloc = mvebu_sei_ap_alloc,
238 .free = irq_domain_free_irqs_parent,
239 };
240
mvebu_sei_cp_release_irq(struct mvebu_sei * sei,unsigned long hwirq)241 static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
242 {
243 mutex_lock(&sei->cp_msi_lock);
244 clear_bit(hwirq, sei->cp_msi_bitmap);
245 mutex_unlock(&sei->cp_msi_lock);
246 }
247
mvebu_sei_cp_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)248 static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
249 unsigned int virq, unsigned int nr_irqs,
250 void *args)
251 {
252 struct mvebu_sei *sei = domain->host_data;
253 struct irq_fwspec fwspec;
254 unsigned long hwirq;
255 int ret;
256
257 /* The software only supports single allocations for now */
258 if (nr_irqs != 1)
259 return -ENOTSUPP;
260
261 mutex_lock(&sei->cp_msi_lock);
262 hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
263 sei->caps->cp_range.size);
264 if (hwirq < sei->caps->cp_range.size)
265 set_bit(hwirq, sei->cp_msi_bitmap);
266 mutex_unlock(&sei->cp_msi_lock);
267
268 if (hwirq == sei->caps->cp_range.size)
269 return -ENOSPC;
270
271 fwspec.fwnode = domain->parent->fwnode;
272 fwspec.param_count = 1;
273 fwspec.param[0] = hwirq + sei->caps->cp_range.first;
274
275 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
276 if (ret)
277 goto free_irq;
278
279 irq_domain_set_info(domain, virq, hwirq,
280 &mvebu_sei_cp_irq_chip, sei,
281 handle_edge_irq, NULL, NULL);
282
283 return 0;
284
285 free_irq:
286 mvebu_sei_cp_release_irq(sei, hwirq);
287 return ret;
288 }
289
mvebu_sei_cp_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)290 static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
291 unsigned int virq, unsigned int nr_irqs)
292 {
293 struct mvebu_sei *sei = domain->host_data;
294 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
295
296 if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
297 dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
298 return;
299 }
300
301 mvebu_sei_cp_release_irq(sei, d->hwirq);
302 irq_domain_free_irqs_parent(domain, virq, 1);
303 }
304
305 static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
306 .alloc = mvebu_sei_cp_domain_alloc,
307 .free = mvebu_sei_cp_domain_free,
308 };
309
310 static struct irq_chip mvebu_sei_msi_irq_chip = {
311 .name = "SEI pMSI",
312 .irq_ack = irq_chip_ack_parent,
313 .irq_set_type = irq_chip_set_type_parent,
314 };
315
316 static struct msi_domain_ops mvebu_sei_msi_ops = {
317 };
318
319 static struct msi_domain_info mvebu_sei_msi_domain_info = {
320 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
321 .ops = &mvebu_sei_msi_ops,
322 .chip = &mvebu_sei_msi_irq_chip,
323 };
324
mvebu_sei_handle_cascade_irq(struct irq_desc * desc)325 static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
326 {
327 struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
328 struct irq_chip *chip = irq_desc_get_chip(desc);
329 u32 idx;
330
331 chained_irq_enter(chip, desc);
332
333 for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
334 unsigned long irqmap;
335 int bit;
336
337 irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
338 for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
339 unsigned long hwirq;
340 unsigned int virq;
341
342 hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
343 virq = irq_find_mapping(sei->sei_domain, hwirq);
344 if (likely(virq)) {
345 generic_handle_irq(virq);
346 continue;
347 }
348
349 dev_warn(sei->dev,
350 "Spurious IRQ detected (hwirq %lu)\n", hwirq);
351 }
352 }
353
354 chained_irq_exit(chip, desc);
355 }
356
mvebu_sei_reset(struct mvebu_sei * sei)357 static void mvebu_sei_reset(struct mvebu_sei *sei)
358 {
359 u32 reg_idx;
360
361 /* Clear IRQ cause registers, mask all interrupts */
362 for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
363 writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
364 writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
365 }
366 }
367
mvebu_sei_probe(struct platform_device * pdev)368 static int mvebu_sei_probe(struct platform_device *pdev)
369 {
370 struct device_node *node = pdev->dev.of_node;
371 struct irq_domain *plat_domain;
372 struct mvebu_sei *sei;
373 u32 parent_irq;
374 int ret;
375
376 sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
377 if (!sei)
378 return -ENOMEM;
379
380 sei->dev = &pdev->dev;
381
382 mutex_init(&sei->cp_msi_lock);
383 raw_spin_lock_init(&sei->mask_lock);
384
385 sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
386 sei->base = devm_ioremap_resource(sei->dev, sei->res);
387 if (IS_ERR(sei->base)) {
388 dev_err(sei->dev, "Failed to remap SEI resource\n");
389 return PTR_ERR(sei->base);
390 }
391
392 /* Retrieve the SEI capabilities with the interrupt ranges */
393 sei->caps = of_device_get_match_data(&pdev->dev);
394 if (!sei->caps) {
395 dev_err(sei->dev,
396 "Could not retrieve controller capabilities\n");
397 return -EINVAL;
398 }
399
400 /*
401 * Reserve the single (top-level) parent SPI IRQ from which all the
402 * interrupts handled by this driver will be signaled.
403 */
404 parent_irq = irq_of_parse_and_map(node, 0);
405 if (parent_irq <= 0) {
406 dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
407 return -ENODEV;
408 }
409
410 /* Create the root SEI domain */
411 sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
412 (sei->caps->ap_range.size +
413 sei->caps->cp_range.size),
414 &mvebu_sei_domain_ops,
415 sei);
416 if (!sei->sei_domain) {
417 dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
418 ret = -ENOMEM;
419 goto dispose_irq;
420 }
421
422 irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
423
424 /* Create the 'wired' domain */
425 sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
426 sei->caps->ap_range.size,
427 of_node_to_fwnode(node),
428 &mvebu_sei_ap_domain_ops,
429 sei);
430 if (!sei->ap_domain) {
431 dev_err(sei->dev, "Failed to create AP IRQ domain\n");
432 ret = -ENOMEM;
433 goto remove_sei_domain;
434 }
435
436 irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
437
438 /* Create the 'MSI' domain */
439 sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
440 sei->caps->cp_range.size,
441 of_node_to_fwnode(node),
442 &mvebu_sei_cp_domain_ops,
443 sei);
444 if (!sei->cp_domain) {
445 pr_err("Failed to create CPs IRQ domain\n");
446 ret = -ENOMEM;
447 goto remove_ap_domain;
448 }
449
450 irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
451
452 plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
453 &mvebu_sei_msi_domain_info,
454 sei->cp_domain);
455 if (!plat_domain) {
456 pr_err("Failed to create CPs MSI domain\n");
457 ret = -ENOMEM;
458 goto remove_cp_domain;
459 }
460
461 mvebu_sei_reset(sei);
462
463 irq_set_chained_handler_and_data(parent_irq,
464 mvebu_sei_handle_cascade_irq,
465 sei);
466
467 return 0;
468
469 remove_cp_domain:
470 irq_domain_remove(sei->cp_domain);
471 remove_ap_domain:
472 irq_domain_remove(sei->ap_domain);
473 remove_sei_domain:
474 irq_domain_remove(sei->sei_domain);
475 dispose_irq:
476 irq_dispose_mapping(parent_irq);
477
478 return ret;
479 }
480
481 static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
482 .ap_range = {
483 .first = 0,
484 .size = 21,
485 },
486 .cp_range = {
487 .first = 21,
488 .size = 43,
489 },
490 };
491
492 static const struct of_device_id mvebu_sei_of_match[] = {
493 {
494 .compatible = "marvell,ap806-sei",
495 .data = &mvebu_sei_ap806_caps,
496 },
497 {},
498 };
499
500 static struct platform_driver mvebu_sei_driver = {
501 .probe = mvebu_sei_probe,
502 .driver = {
503 .name = "mvebu-sei",
504 .of_match_table = mvebu_sei_of_match,
505 },
506 };
507 builtin_platform_driver(mvebu_sei_driver);
508