1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5
6 #define pr_fmt(fmt) "xive: " fmt
7
8 #include <linux/types.h>
9 #include <linux/irq.h>
10 #include <linux/smp.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/of.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/bitmap.h>
17 #include <linux/cpumask.h>
18 #include <linux/mm.h>
19 #include <linux/delay.h>
20 #include <linux/libfdt.h>
21
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/smp.h>
26 #include <asm/irq.h>
27 #include <asm/errno.h>
28 #include <asm/xive.h>
29 #include <asm/xive-regs.h>
30 #include <asm/hvcall.h>
31 #include <asm/svm.h>
32 #include <asm/ultravisor.h>
33
34 #include "xive-internal.h"
35
36 static u32 xive_queue_shift;
37
38 struct xive_irq_bitmap {
39 unsigned long *bitmap;
40 unsigned int base;
41 unsigned int count;
42 spinlock_t lock;
43 struct list_head list;
44 };
45
46 static LIST_HEAD(xive_irq_bitmaps);
47
xive_irq_bitmap_add(int base,int count)48 static int xive_irq_bitmap_add(int base, int count)
49 {
50 struct xive_irq_bitmap *xibm;
51
52 xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
53 if (!xibm)
54 return -ENOMEM;
55
56 spin_lock_init(&xibm->lock);
57 xibm->base = base;
58 xibm->count = count;
59 xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
60 if (!xibm->bitmap) {
61 kfree(xibm);
62 return -ENOMEM;
63 }
64 list_add(&xibm->list, &xive_irq_bitmaps);
65
66 pr_info("Using IRQ range [%x-%x]", xibm->base,
67 xibm->base + xibm->count - 1);
68 return 0;
69 }
70
xive_irq_bitmap_remove_all(void)71 static void xive_irq_bitmap_remove_all(void)
72 {
73 struct xive_irq_bitmap *xibm, *tmp;
74
75 list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
76 list_del(&xibm->list);
77 bitmap_free(xibm->bitmap);
78 kfree(xibm);
79 }
80 }
81
__xive_irq_bitmap_alloc(struct xive_irq_bitmap * xibm)82 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
83 {
84 int irq;
85
86 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
87 if (irq != xibm->count) {
88 set_bit(irq, xibm->bitmap);
89 irq += xibm->base;
90 } else {
91 irq = -ENOMEM;
92 }
93
94 return irq;
95 }
96
xive_irq_bitmap_alloc(void)97 static int xive_irq_bitmap_alloc(void)
98 {
99 struct xive_irq_bitmap *xibm;
100 unsigned long flags;
101 int irq = -ENOENT;
102
103 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
104 spin_lock_irqsave(&xibm->lock, flags);
105 irq = __xive_irq_bitmap_alloc(xibm);
106 spin_unlock_irqrestore(&xibm->lock, flags);
107 if (irq >= 0)
108 break;
109 }
110 return irq;
111 }
112
xive_irq_bitmap_free(int irq)113 static void xive_irq_bitmap_free(int irq)
114 {
115 unsigned long flags;
116 struct xive_irq_bitmap *xibm;
117
118 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
119 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
120 spin_lock_irqsave(&xibm->lock, flags);
121 clear_bit(irq - xibm->base, xibm->bitmap);
122 spin_unlock_irqrestore(&xibm->lock, flags);
123 break;
124 }
125 }
126 }
127
128
129 /* Based on the similar routines in RTAS */
plpar_busy_delay_time(long rc)130 static unsigned int plpar_busy_delay_time(long rc)
131 {
132 unsigned int ms = 0;
133
134 if (H_IS_LONG_BUSY(rc)) {
135 ms = get_longbusy_msecs(rc);
136 } else if (rc == H_BUSY) {
137 ms = 10; /* seems appropriate for XIVE hcalls */
138 }
139
140 return ms;
141 }
142
plpar_busy_delay(int rc)143 static unsigned int plpar_busy_delay(int rc)
144 {
145 unsigned int ms;
146
147 ms = plpar_busy_delay_time(rc);
148 if (ms)
149 mdelay(ms);
150
151 return ms;
152 }
153
154 /*
155 * Note: this call has a partition wide scope and can take a while to
156 * complete. If it returns H_LONG_BUSY_* it should be retried
157 * periodically.
158 */
plpar_int_reset(unsigned long flags)159 static long plpar_int_reset(unsigned long flags)
160 {
161 long rc;
162
163 do {
164 rc = plpar_hcall_norets(H_INT_RESET, flags);
165 } while (plpar_busy_delay(rc));
166
167 if (rc)
168 pr_err("H_INT_RESET failed %ld\n", rc);
169
170 return rc;
171 }
172
plpar_int_get_source_info(unsigned long flags,unsigned long lisn,unsigned long * src_flags,unsigned long * eoi_page,unsigned long * trig_page,unsigned long * esb_shift)173 static long plpar_int_get_source_info(unsigned long flags,
174 unsigned long lisn,
175 unsigned long *src_flags,
176 unsigned long *eoi_page,
177 unsigned long *trig_page,
178 unsigned long *esb_shift)
179 {
180 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
181 long rc;
182
183 do {
184 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
185 } while (plpar_busy_delay(rc));
186
187 if (rc) {
188 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
189 return rc;
190 }
191
192 *src_flags = retbuf[0];
193 *eoi_page = retbuf[1];
194 *trig_page = retbuf[2];
195 *esb_shift = retbuf[3];
196
197 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
198 retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
199
200 return 0;
201 }
202
203 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
204 #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
205
plpar_int_set_source_config(unsigned long flags,unsigned long lisn,unsigned long target,unsigned long prio,unsigned long sw_irq)206 static long plpar_int_set_source_config(unsigned long flags,
207 unsigned long lisn,
208 unsigned long target,
209 unsigned long prio,
210 unsigned long sw_irq)
211 {
212 long rc;
213
214
215 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
216 flags, lisn, target, prio, sw_irq);
217
218
219 do {
220 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
221 target, prio, sw_irq);
222 } while (plpar_busy_delay(rc));
223
224 if (rc) {
225 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
226 lisn, target, prio, rc);
227 return rc;
228 }
229
230 return 0;
231 }
232
plpar_int_get_source_config(unsigned long flags,unsigned long lisn,unsigned long * target,unsigned long * prio,unsigned long * sw_irq)233 static long plpar_int_get_source_config(unsigned long flags,
234 unsigned long lisn,
235 unsigned long *target,
236 unsigned long *prio,
237 unsigned long *sw_irq)
238 {
239 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
240 long rc;
241
242 pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
243
244 do {
245 rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
246 target, prio, sw_irq);
247 } while (plpar_busy_delay(rc));
248
249 if (rc) {
250 pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
251 lisn, rc);
252 return rc;
253 }
254
255 *target = retbuf[0];
256 *prio = retbuf[1];
257 *sw_irq = retbuf[2];
258
259 pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
260 retbuf[0], retbuf[1], retbuf[2]);
261
262 return 0;
263 }
264
plpar_int_get_queue_info(unsigned long flags,unsigned long target,unsigned long priority,unsigned long * esn_page,unsigned long * esn_size)265 static long plpar_int_get_queue_info(unsigned long flags,
266 unsigned long target,
267 unsigned long priority,
268 unsigned long *esn_page,
269 unsigned long *esn_size)
270 {
271 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
272 long rc;
273
274 do {
275 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
276 priority);
277 } while (plpar_busy_delay(rc));
278
279 if (rc) {
280 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
281 target, priority, rc);
282 return rc;
283 }
284
285 *esn_page = retbuf[0];
286 *esn_size = retbuf[1];
287
288 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
289 retbuf[0], retbuf[1]);
290
291 return 0;
292 }
293
294 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
295
plpar_int_set_queue_config(unsigned long flags,unsigned long target,unsigned long priority,unsigned long qpage,unsigned long qsize)296 static long plpar_int_set_queue_config(unsigned long flags,
297 unsigned long target,
298 unsigned long priority,
299 unsigned long qpage,
300 unsigned long qsize)
301 {
302 long rc;
303
304 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
305 flags, target, priority, qpage, qsize);
306
307 do {
308 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
309 priority, qpage, qsize);
310 } while (plpar_busy_delay(rc));
311
312 if (rc) {
313 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
314 target, priority, qpage, rc);
315 return rc;
316 }
317
318 return 0;
319 }
320
plpar_int_sync(unsigned long flags,unsigned long lisn)321 static long plpar_int_sync(unsigned long flags, unsigned long lisn)
322 {
323 long rc;
324
325 do {
326 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
327 } while (plpar_busy_delay(rc));
328
329 if (rc) {
330 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
331 return rc;
332 }
333
334 return 0;
335 }
336
337 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
338
plpar_int_esb(unsigned long flags,unsigned long lisn,unsigned long offset,unsigned long in_data,unsigned long * out_data)339 static long plpar_int_esb(unsigned long flags,
340 unsigned long lisn,
341 unsigned long offset,
342 unsigned long in_data,
343 unsigned long *out_data)
344 {
345 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
346 long rc;
347
348 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
349 flags, lisn, offset, in_data);
350
351 do {
352 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
353 in_data);
354 } while (plpar_busy_delay(rc));
355
356 if (rc) {
357 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
358 lisn, offset, rc);
359 return rc;
360 }
361
362 *out_data = retbuf[0];
363
364 return 0;
365 }
366
xive_spapr_esb_rw(u32 lisn,u32 offset,u64 data,bool write)367 static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
368 {
369 unsigned long read_data;
370 long rc;
371
372 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
373 lisn, offset, data, &read_data);
374 if (rc)
375 return -1;
376
377 return write ? 0 : read_data;
378 }
379
380 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
381 #define XIVE_SRC_LSI (1ull << (63 - 61))
382 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
383 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
384
xive_spapr_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)385 static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
386 {
387 long rc;
388 unsigned long flags;
389 unsigned long eoi_page;
390 unsigned long trig_page;
391 unsigned long esb_shift;
392
393 memset(data, 0, sizeof(*data));
394
395 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
396 &esb_shift);
397 if (rc)
398 return -EINVAL;
399
400 if (flags & XIVE_SRC_H_INT_ESB)
401 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
402 if (flags & XIVE_SRC_STORE_EOI)
403 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
404 if (flags & XIVE_SRC_LSI)
405 data->flags |= XIVE_IRQ_FLAG_LSI;
406 data->eoi_page = eoi_page;
407 data->esb_shift = esb_shift;
408 data->trig_page = trig_page;
409
410 data->hw_irq = hw_irq;
411
412 /*
413 * No chip-id for the sPAPR backend. This has an impact how we
414 * pick a target. See xive_pick_irq_target().
415 */
416 data->src_chip = XIVE_INVALID_CHIP_ID;
417
418 /*
419 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
420 * be used for interrupt management. Skip the remapping of the
421 * ESB pages which are not available.
422 */
423 if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
424 return 0;
425
426 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
427 if (!data->eoi_mmio) {
428 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
429 return -ENOMEM;
430 }
431
432 /* Full function page supports trigger */
433 if (flags & XIVE_SRC_TRIGGER) {
434 data->trig_mmio = data->eoi_mmio;
435 return 0;
436 }
437
438 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
439 if (!data->trig_mmio) {
440 iounmap(data->eoi_mmio);
441 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
442 return -ENOMEM;
443 }
444 return 0;
445 }
446
xive_spapr_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)447 static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
448 {
449 long rc;
450
451 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
452 prio, sw_irq);
453
454 return rc == 0 ? 0 : -ENXIO;
455 }
456
xive_spapr_get_irq_config(u32 hw_irq,u32 * target,u8 * prio,u32 * sw_irq)457 static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
458 u32 *sw_irq)
459 {
460 long rc;
461 unsigned long h_target;
462 unsigned long h_prio;
463 unsigned long h_sw_irq;
464
465 rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
466 &h_sw_irq);
467
468 *target = h_target;
469 *prio = h_prio;
470 *sw_irq = h_sw_irq;
471
472 return rc == 0 ? 0 : -ENXIO;
473 }
474
475 /* This can be called multiple time to change a queue configuration */
xive_spapr_configure_queue(u32 target,struct xive_q * q,u8 prio,__be32 * qpage,u32 order)476 static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
477 __be32 *qpage, u32 order)
478 {
479 s64 rc = 0;
480 unsigned long esn_page;
481 unsigned long esn_size;
482 u64 flags, qpage_phys;
483
484 /* If there's an actual queue page, clean it */
485 if (order) {
486 if (WARN_ON(!qpage))
487 return -EINVAL;
488 qpage_phys = __pa(qpage);
489 } else {
490 qpage_phys = 0;
491 }
492
493 /* Initialize the rest of the fields */
494 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
495 q->idx = 0;
496 q->toggle = 0;
497
498 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
499 if (rc) {
500 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
501 target, prio);
502 rc = -EIO;
503 goto fail;
504 }
505
506 /* TODO: add support for the notification page */
507 q->eoi_phys = esn_page;
508
509 /* Default is to always notify */
510 flags = XIVE_EQ_ALWAYS_NOTIFY;
511
512 /* Configure and enable the queue in HW */
513 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
514 if (rc) {
515 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
516 target, prio);
517 rc = -EIO;
518 } else {
519 q->qpage = qpage;
520 if (is_secure_guest())
521 uv_share_page(PHYS_PFN(qpage_phys),
522 1 << xive_alloc_order(order));
523 }
524 fail:
525 return rc;
526 }
527
xive_spapr_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)528 static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
529 u8 prio)
530 {
531 struct xive_q *q = &xc->queue[prio];
532 __be32 *qpage;
533
534 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
535 if (IS_ERR(qpage))
536 return PTR_ERR(qpage);
537
538 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
539 q, prio, qpage, xive_queue_shift);
540 }
541
xive_spapr_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)542 static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
543 u8 prio)
544 {
545 struct xive_q *q = &xc->queue[prio];
546 unsigned int alloc_order;
547 long rc;
548 int hw_cpu = get_hard_smp_processor_id(cpu);
549
550 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
551 if (rc)
552 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
553 hw_cpu, prio);
554
555 alloc_order = xive_alloc_order(xive_queue_shift);
556 if (is_secure_guest())
557 uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
558 free_pages((unsigned long)q->qpage, alloc_order);
559 q->qpage = NULL;
560 }
561
xive_spapr_match(struct device_node * node)562 static bool xive_spapr_match(struct device_node *node)
563 {
564 /* Ignore cascaded controllers for the moment */
565 return true;
566 }
567
568 #ifdef CONFIG_SMP
xive_spapr_get_ipi(unsigned int cpu,struct xive_cpu * xc)569 static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
570 {
571 int irq = xive_irq_bitmap_alloc();
572
573 if (irq < 0) {
574 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
575 return -ENXIO;
576 }
577
578 xc->hw_ipi = irq;
579 return 0;
580 }
581
xive_spapr_put_ipi(unsigned int cpu,struct xive_cpu * xc)582 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
583 {
584 if (xc->hw_ipi == XIVE_BAD_IRQ)
585 return;
586
587 xive_irq_bitmap_free(xc->hw_ipi);
588 xc->hw_ipi = XIVE_BAD_IRQ;
589 }
590 #endif /* CONFIG_SMP */
591
xive_spapr_shutdown(void)592 static void xive_spapr_shutdown(void)
593 {
594 plpar_int_reset(0);
595 }
596
597 /*
598 * Perform an "ack" cycle on the current thread. Grab the pending
599 * active priorities and update the CPPR to the most favored one.
600 */
xive_spapr_update_pending(struct xive_cpu * xc)601 static void xive_spapr_update_pending(struct xive_cpu *xc)
602 {
603 u8 nsr, cppr;
604 u16 ack;
605
606 /*
607 * Perform the "Acknowledge O/S to Register" cycle.
608 *
609 * Let's speedup the access to the TIMA using the raw I/O
610 * accessor as we don't need the synchronisation routine of
611 * the higher level ones
612 */
613 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
614
615 /* Synchronize subsequent queue accesses */
616 mb();
617
618 /*
619 * Grab the CPPR and the "NSR" field which indicates the source
620 * of the interrupt (if any)
621 */
622 cppr = ack & 0xff;
623 nsr = ack >> 8;
624
625 if (nsr & TM_QW1_NSR_EO) {
626 if (cppr == 0xff)
627 return;
628 /* Mark the priority pending */
629 xc->pending_prio |= 1 << cppr;
630
631 /*
632 * A new interrupt should never have a CPPR less favored
633 * than our current one.
634 */
635 if (cppr >= xc->cppr)
636 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
637 smp_processor_id(), cppr, xc->cppr);
638
639 /* Update our idea of what the CPPR is */
640 xc->cppr = cppr;
641 }
642 }
643
xive_spapr_setup_cpu(unsigned int cpu,struct xive_cpu * xc)644 static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
645 {
646 /* Only some debug on the TIMA settings */
647 pr_debug("(HW value: %08x %08x %08x)\n",
648 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
649 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
650 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
651 }
652
xive_spapr_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)653 static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
654 {
655 /* Nothing to do */;
656 }
657
xive_spapr_sync_source(u32 hw_irq)658 static void xive_spapr_sync_source(u32 hw_irq)
659 {
660 /* Specs are unclear on what this is doing */
661 plpar_int_sync(0, hw_irq);
662 }
663
xive_spapr_debug_show(struct seq_file * m,void * private)664 static int xive_spapr_debug_show(struct seq_file *m, void *private)
665 {
666 struct xive_irq_bitmap *xibm;
667 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
668
669 if (!buf)
670 return -ENOMEM;
671
672 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
673 memset(buf, 0, PAGE_SIZE);
674 bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
675 seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
676 }
677 kfree(buf);
678
679 return 0;
680 }
681
682 static const struct xive_ops xive_spapr_ops = {
683 .populate_irq_data = xive_spapr_populate_irq_data,
684 .configure_irq = xive_spapr_configure_irq,
685 .get_irq_config = xive_spapr_get_irq_config,
686 .setup_queue = xive_spapr_setup_queue,
687 .cleanup_queue = xive_spapr_cleanup_queue,
688 .match = xive_spapr_match,
689 .shutdown = xive_spapr_shutdown,
690 .update_pending = xive_spapr_update_pending,
691 .setup_cpu = xive_spapr_setup_cpu,
692 .teardown_cpu = xive_spapr_teardown_cpu,
693 .sync_source = xive_spapr_sync_source,
694 .esb_rw = xive_spapr_esb_rw,
695 #ifdef CONFIG_SMP
696 .get_ipi = xive_spapr_get_ipi,
697 .put_ipi = xive_spapr_put_ipi,
698 .debug_show = xive_spapr_debug_show,
699 #endif /* CONFIG_SMP */
700 .name = "spapr",
701 };
702
703 /*
704 * get max priority from "/ibm,plat-res-int-priorities"
705 */
xive_get_max_prio(u8 * max_prio)706 static bool xive_get_max_prio(u8 *max_prio)
707 {
708 struct device_node *rootdn;
709 const __be32 *reg;
710 u32 len;
711 int prio, found;
712
713 rootdn = of_find_node_by_path("/");
714 if (!rootdn) {
715 pr_err("not root node found !\n");
716 return false;
717 }
718
719 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
720 of_node_put(rootdn);
721 if (!reg) {
722 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
723 return false;
724 }
725
726 if (len % (2 * sizeof(u32)) != 0) {
727 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
728 return false;
729 }
730
731 /* HW supports priorities in the range [0-7] and 0xFF is a
732 * wildcard priority used to mask. We scan the ranges reserved
733 * by the hypervisor to find the lowest priority we can use.
734 */
735 found = 0xFF;
736 for (prio = 0; prio < 8; prio++) {
737 int reserved = 0;
738 int i;
739
740 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
741 int base = be32_to_cpu(reg[2 * i]);
742 int range = be32_to_cpu(reg[2 * i + 1]);
743
744 if (prio >= base && prio < base + range)
745 reserved++;
746 }
747
748 if (!reserved)
749 found = prio;
750 }
751
752 if (found == 0xFF) {
753 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
754 return false;
755 }
756
757 *max_prio = found;
758 return true;
759 }
760
get_vec5_feature(unsigned int index)761 static const u8 *get_vec5_feature(unsigned int index)
762 {
763 unsigned long root, chosen;
764 int size;
765 const u8 *vec5;
766
767 root = of_get_flat_dt_root();
768 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
769 if (chosen == -FDT_ERR_NOTFOUND)
770 return NULL;
771
772 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
773 if (!vec5)
774 return NULL;
775
776 if (size <= index)
777 return NULL;
778
779 return vec5 + index;
780 }
781
xive_spapr_disabled(void)782 static bool __init xive_spapr_disabled(void)
783 {
784 const u8 *vec5_xive;
785
786 vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
787 if (vec5_xive) {
788 u8 val;
789
790 val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
791 switch (val) {
792 case OV5_FEAT(OV5_XIVE_EITHER):
793 case OV5_FEAT(OV5_XIVE_LEGACY):
794 break;
795 case OV5_FEAT(OV5_XIVE_EXPLOIT):
796 /* Hypervisor only supports XIVE */
797 if (xive_cmdline_disabled)
798 pr_warn("WARNING: Ignoring cmdline option xive=off\n");
799 return false;
800 default:
801 pr_warn("%s: Unknown xive support option: 0x%x\n",
802 __func__, val);
803 break;
804 }
805 }
806
807 return xive_cmdline_disabled;
808 }
809
xive_spapr_init(void)810 bool __init xive_spapr_init(void)
811 {
812 struct device_node *np;
813 struct resource r;
814 void __iomem *tima;
815 struct property *prop;
816 u8 max_prio;
817 u32 val;
818 u32 len;
819 const __be32 *reg;
820 int i, err;
821
822 if (xive_spapr_disabled())
823 return false;
824
825 pr_devel("%s()\n", __func__);
826 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
827 if (!np) {
828 pr_devel("not found !\n");
829 return false;
830 }
831 pr_devel("Found %s\n", np->full_name);
832
833 /* Resource 1 is the OS ring TIMA */
834 if (of_address_to_resource(np, 1, &r)) {
835 pr_err("Failed to get thread mgmnt area resource\n");
836 goto err_put;
837 }
838 tima = ioremap(r.start, resource_size(&r));
839 if (!tima) {
840 pr_err("Failed to map thread mgmnt area\n");
841 goto err_put;
842 }
843
844 if (!xive_get_max_prio(&max_prio))
845 goto err_unmap;
846
847 /* Feed the IRQ number allocator with the ranges given in the DT */
848 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
849 if (!reg) {
850 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
851 goto err_unmap;
852 }
853
854 if (len % (2 * sizeof(u32)) != 0) {
855 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
856 goto err_unmap;
857 }
858
859 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
860 err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
861 be32_to_cpu(reg[1]));
862 if (err < 0)
863 goto err_mem_free;
864 }
865
866 /* Iterate the EQ sizes and pick one */
867 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
868 xive_queue_shift = val;
869 if (val == PAGE_SHIFT)
870 break;
871 }
872
873 /* Initialize XIVE core with our backend */
874 if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
875 goto err_mem_free;
876
877 of_node_put(np);
878 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
879 return true;
880
881 err_mem_free:
882 xive_irq_bitmap_remove_all();
883 err_unmap:
884 iounmap(tima);
885 err_put:
886 of_node_put(np);
887 return false;
888 }
889
890 machine_arch_initcall(pseries, xive_core_debug_init);
891