1 /*
2 * Copyright 2016,2017 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #define pr_fmt(fmt) "xive: " fmt
11
12 #include <linux/types.h>
13 #include <linux/irq.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/cpumask.h>
21 #include <linux/mm.h>
22
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/smp.h>
26 #include <asm/irq.h>
27 #include <asm/errno.h>
28 #include <asm/xive.h>
29 #include <asm/xive-regs.h>
30 #include <asm/hvcall.h>
31
32 #include "xive-internal.h"
33
34 static u32 xive_queue_shift;
35
36 struct xive_irq_bitmap {
37 unsigned long *bitmap;
38 unsigned int base;
39 unsigned int count;
40 spinlock_t lock;
41 struct list_head list;
42 };
43
44 static LIST_HEAD(xive_irq_bitmaps);
45
xive_irq_bitmap_add(int base,int count)46 static int xive_irq_bitmap_add(int base, int count)
47 {
48 struct xive_irq_bitmap *xibm;
49
50 xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC);
51 if (!xibm)
52 return -ENOMEM;
53
54 spin_lock_init(&xibm->lock);
55 xibm->base = base;
56 xibm->count = count;
57 xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
58 list_add(&xibm->list, &xive_irq_bitmaps);
59
60 pr_info("Using IRQ range [%x-%x]", xibm->base,
61 xibm->base + xibm->count - 1);
62 return 0;
63 }
64
__xive_irq_bitmap_alloc(struct xive_irq_bitmap * xibm)65 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
66 {
67 int irq;
68
69 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
70 if (irq != xibm->count) {
71 set_bit(irq, xibm->bitmap);
72 irq += xibm->base;
73 } else {
74 irq = -ENOMEM;
75 }
76
77 return irq;
78 }
79
xive_irq_bitmap_alloc(void)80 static int xive_irq_bitmap_alloc(void)
81 {
82 struct xive_irq_bitmap *xibm;
83 unsigned long flags;
84 int irq = -ENOENT;
85
86 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
87 spin_lock_irqsave(&xibm->lock, flags);
88 irq = __xive_irq_bitmap_alloc(xibm);
89 spin_unlock_irqrestore(&xibm->lock, flags);
90 if (irq >= 0)
91 break;
92 }
93 return irq;
94 }
95
xive_irq_bitmap_free(int irq)96 static void xive_irq_bitmap_free(int irq)
97 {
98 unsigned long flags;
99 struct xive_irq_bitmap *xibm;
100
101 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
102 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
103 spin_lock_irqsave(&xibm->lock, flags);
104 clear_bit(irq - xibm->base, xibm->bitmap);
105 spin_unlock_irqrestore(&xibm->lock, flags);
106 break;
107 }
108 }
109 }
110
plpar_int_get_source_info(unsigned long flags,unsigned long lisn,unsigned long * src_flags,unsigned long * eoi_page,unsigned long * trig_page,unsigned long * esb_shift)111 static long plpar_int_get_source_info(unsigned long flags,
112 unsigned long lisn,
113 unsigned long *src_flags,
114 unsigned long *eoi_page,
115 unsigned long *trig_page,
116 unsigned long *esb_shift)
117 {
118 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
119 long rc;
120
121 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
122 if (rc) {
123 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
124 return rc;
125 }
126
127 *src_flags = retbuf[0];
128 *eoi_page = retbuf[1];
129 *trig_page = retbuf[2];
130 *esb_shift = retbuf[3];
131
132 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
133 retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
134
135 return 0;
136 }
137
138 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
139 #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
140
plpar_int_set_source_config(unsigned long flags,unsigned long lisn,unsigned long target,unsigned long prio,unsigned long sw_irq)141 static long plpar_int_set_source_config(unsigned long flags,
142 unsigned long lisn,
143 unsigned long target,
144 unsigned long prio,
145 unsigned long sw_irq)
146 {
147 long rc;
148
149
150 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
151 flags, lisn, target, prio, sw_irq);
152
153
154 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
155 target, prio, sw_irq);
156 if (rc) {
157 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
158 lisn, target, prio, rc);
159 return rc;
160 }
161
162 return 0;
163 }
164
plpar_int_get_queue_info(unsigned long flags,unsigned long target,unsigned long priority,unsigned long * esn_page,unsigned long * esn_size)165 static long plpar_int_get_queue_info(unsigned long flags,
166 unsigned long target,
167 unsigned long priority,
168 unsigned long *esn_page,
169 unsigned long *esn_size)
170 {
171 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
172 long rc;
173
174 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target, priority);
175 if (rc) {
176 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
177 target, priority, rc);
178 return rc;
179 }
180
181 *esn_page = retbuf[0];
182 *esn_size = retbuf[1];
183
184 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
185 retbuf[0], retbuf[1]);
186
187 return 0;
188 }
189
190 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
191
plpar_int_set_queue_config(unsigned long flags,unsigned long target,unsigned long priority,unsigned long qpage,unsigned long qsize)192 static long plpar_int_set_queue_config(unsigned long flags,
193 unsigned long target,
194 unsigned long priority,
195 unsigned long qpage,
196 unsigned long qsize)
197 {
198 long rc;
199
200 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
201 flags, target, priority, qpage, qsize);
202
203 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
204 priority, qpage, qsize);
205 if (rc) {
206 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
207 target, priority, qpage, rc);
208 return rc;
209 }
210
211 return 0;
212 }
213
plpar_int_sync(unsigned long flags,unsigned long lisn)214 static long plpar_int_sync(unsigned long flags, unsigned long lisn)
215 {
216 long rc;
217
218 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
219 if (rc) {
220 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
221 return rc;
222 }
223
224 return 0;
225 }
226
227 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
228
plpar_int_esb(unsigned long flags,unsigned long lisn,unsigned long offset,unsigned long in_data,unsigned long * out_data)229 static long plpar_int_esb(unsigned long flags,
230 unsigned long lisn,
231 unsigned long offset,
232 unsigned long in_data,
233 unsigned long *out_data)
234 {
235 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
236 long rc;
237
238 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
239 flags, lisn, offset, in_data);
240
241 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, in_data);
242 if (rc) {
243 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
244 lisn, offset, rc);
245 return rc;
246 }
247
248 *out_data = retbuf[0];
249
250 return 0;
251 }
252
xive_spapr_esb_rw(u32 lisn,u32 offset,u64 data,bool write)253 static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
254 {
255 unsigned long read_data;
256 long rc;
257
258 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
259 lisn, offset, data, &read_data);
260 if (rc)
261 return -1;
262
263 return write ? 0 : read_data;
264 }
265
266 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
267 #define XIVE_SRC_LSI (1ull << (63 - 61))
268 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
269 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
270
xive_spapr_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)271 static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
272 {
273 long rc;
274 unsigned long flags;
275 unsigned long eoi_page;
276 unsigned long trig_page;
277 unsigned long esb_shift;
278
279 memset(data, 0, sizeof(*data));
280
281 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
282 &esb_shift);
283 if (rc)
284 return -EINVAL;
285
286 if (flags & XIVE_SRC_H_INT_ESB)
287 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
288 if (flags & XIVE_SRC_STORE_EOI)
289 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
290 if (flags & XIVE_SRC_LSI)
291 data->flags |= XIVE_IRQ_FLAG_LSI;
292 data->eoi_page = eoi_page;
293 data->esb_shift = esb_shift;
294 data->trig_page = trig_page;
295
296 data->hw_irq = hw_irq;
297
298 /*
299 * No chip-id for the sPAPR backend. This has an impact how we
300 * pick a target. See xive_pick_irq_target().
301 */
302 data->src_chip = XIVE_INVALID_CHIP_ID;
303
304 /*
305 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
306 * be used for interrupt management. Skip the remapping of the
307 * ESB pages which are not available.
308 */
309 if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
310 return 0;
311
312 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
313 if (!data->eoi_mmio) {
314 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
315 return -ENOMEM;
316 }
317
318 /* Full function page supports trigger */
319 if (flags & XIVE_SRC_TRIGGER) {
320 data->trig_mmio = data->eoi_mmio;
321 return 0;
322 }
323
324 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
325 if (!data->trig_mmio) {
326 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
327 return -ENOMEM;
328 }
329 return 0;
330 }
331
xive_spapr_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)332 static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
333 {
334 long rc;
335
336 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
337 prio, sw_irq);
338
339 return rc == 0 ? 0 : -ENXIO;
340 }
341
342 /* This can be called multiple time to change a queue configuration */
xive_spapr_configure_queue(u32 target,struct xive_q * q,u8 prio,__be32 * qpage,u32 order)343 static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
344 __be32 *qpage, u32 order)
345 {
346 s64 rc = 0;
347 unsigned long esn_page;
348 unsigned long esn_size;
349 u64 flags, qpage_phys;
350
351 /* If there's an actual queue page, clean it */
352 if (order) {
353 if (WARN_ON(!qpage))
354 return -EINVAL;
355 qpage_phys = __pa(qpage);
356 } else {
357 qpage_phys = 0;
358 }
359
360 /* Initialize the rest of the fields */
361 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
362 q->idx = 0;
363 q->toggle = 0;
364
365 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
366 if (rc) {
367 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
368 target, prio);
369 rc = -EIO;
370 goto fail;
371 }
372
373 /* TODO: add support for the notification page */
374 q->eoi_phys = esn_page;
375
376 /* Default is to always notify */
377 flags = XIVE_EQ_ALWAYS_NOTIFY;
378
379 /* Configure and enable the queue in HW */
380 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
381 if (rc) {
382 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
383 target, prio);
384 rc = -EIO;
385 } else {
386 q->qpage = qpage;
387 }
388 fail:
389 return rc;
390 }
391
xive_spapr_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)392 static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
393 u8 prio)
394 {
395 struct xive_q *q = &xc->queue[prio];
396 __be32 *qpage;
397
398 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
399 if (IS_ERR(qpage))
400 return PTR_ERR(qpage);
401
402 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
403 q, prio, qpage, xive_queue_shift);
404 }
405
xive_spapr_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)406 static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
407 u8 prio)
408 {
409 struct xive_q *q = &xc->queue[prio];
410 unsigned int alloc_order;
411 long rc;
412 int hw_cpu = get_hard_smp_processor_id(cpu);
413
414 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
415 if (rc)
416 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
417 hw_cpu, prio);
418
419 alloc_order = xive_alloc_order(xive_queue_shift);
420 free_pages((unsigned long)q->qpage, alloc_order);
421 q->qpage = NULL;
422 }
423
xive_spapr_match(struct device_node * node)424 static bool xive_spapr_match(struct device_node *node)
425 {
426 /* Ignore cascaded controllers for the moment */
427 return 1;
428 }
429
430 #ifdef CONFIG_SMP
xive_spapr_get_ipi(unsigned int cpu,struct xive_cpu * xc)431 static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
432 {
433 int irq = xive_irq_bitmap_alloc();
434
435 if (irq < 0) {
436 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
437 return -ENXIO;
438 }
439
440 xc->hw_ipi = irq;
441 return 0;
442 }
443
xive_spapr_put_ipi(unsigned int cpu,struct xive_cpu * xc)444 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
445 {
446 if (!xc->hw_ipi)
447 return;
448
449 xive_irq_bitmap_free(xc->hw_ipi);
450 xc->hw_ipi = 0;
451 }
452 #endif /* CONFIG_SMP */
453
xive_spapr_shutdown(void)454 static void xive_spapr_shutdown(void)
455 {
456 long rc;
457
458 rc = plpar_hcall_norets(H_INT_RESET, 0);
459 if (rc)
460 pr_err("H_INT_RESET failed %ld\n", rc);
461 }
462
463 /*
464 * Perform an "ack" cycle on the current thread. Grab the pending
465 * active priorities and update the CPPR to the most favored one.
466 */
xive_spapr_update_pending(struct xive_cpu * xc)467 static void xive_spapr_update_pending(struct xive_cpu *xc)
468 {
469 u8 nsr, cppr;
470 u16 ack;
471
472 /*
473 * Perform the "Acknowledge O/S to Register" cycle.
474 *
475 * Let's speedup the access to the TIMA using the raw I/O
476 * accessor as we don't need the synchronisation routine of
477 * the higher level ones
478 */
479 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
480
481 /* Synchronize subsequent queue accesses */
482 mb();
483
484 /*
485 * Grab the CPPR and the "NSR" field which indicates the source
486 * of the interrupt (if any)
487 */
488 cppr = ack & 0xff;
489 nsr = ack >> 8;
490
491 if (nsr & TM_QW1_NSR_EO) {
492 if (cppr == 0xff)
493 return;
494 /* Mark the priority pending */
495 xc->pending_prio |= 1 << cppr;
496
497 /*
498 * A new interrupt should never have a CPPR less favored
499 * than our current one.
500 */
501 if (cppr >= xc->cppr)
502 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
503 smp_processor_id(), cppr, xc->cppr);
504
505 /* Update our idea of what the CPPR is */
506 xc->cppr = cppr;
507 }
508 }
509
xive_spapr_eoi(u32 hw_irq)510 static void xive_spapr_eoi(u32 hw_irq)
511 {
512 /* Not used */;
513 }
514
xive_spapr_setup_cpu(unsigned int cpu,struct xive_cpu * xc)515 static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
516 {
517 /* Only some debug on the TIMA settings */
518 pr_debug("(HW value: %08x %08x %08x)\n",
519 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
520 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
521 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
522 }
523
xive_spapr_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)524 static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
525 {
526 /* Nothing to do */;
527 }
528
xive_spapr_sync_source(u32 hw_irq)529 static void xive_spapr_sync_source(u32 hw_irq)
530 {
531 /* Specs are unclear on what this is doing */
532 plpar_int_sync(0, hw_irq);
533 }
534
535 static const struct xive_ops xive_spapr_ops = {
536 .populate_irq_data = xive_spapr_populate_irq_data,
537 .configure_irq = xive_spapr_configure_irq,
538 .setup_queue = xive_spapr_setup_queue,
539 .cleanup_queue = xive_spapr_cleanup_queue,
540 .match = xive_spapr_match,
541 .shutdown = xive_spapr_shutdown,
542 .update_pending = xive_spapr_update_pending,
543 .eoi = xive_spapr_eoi,
544 .setup_cpu = xive_spapr_setup_cpu,
545 .teardown_cpu = xive_spapr_teardown_cpu,
546 .sync_source = xive_spapr_sync_source,
547 .esb_rw = xive_spapr_esb_rw,
548 #ifdef CONFIG_SMP
549 .get_ipi = xive_spapr_get_ipi,
550 .put_ipi = xive_spapr_put_ipi,
551 #endif /* CONFIG_SMP */
552 .name = "spapr",
553 };
554
555 /*
556 * get max priority from "/ibm,plat-res-int-priorities"
557 */
xive_get_max_prio(u8 * max_prio)558 static bool xive_get_max_prio(u8 *max_prio)
559 {
560 struct device_node *rootdn;
561 const __be32 *reg;
562 u32 len;
563 int prio, found;
564
565 rootdn = of_find_node_by_path("/");
566 if (!rootdn) {
567 pr_err("not root node found !\n");
568 return false;
569 }
570
571 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
572 if (!reg) {
573 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
574 return false;
575 }
576
577 if (len % (2 * sizeof(u32)) != 0) {
578 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
579 return false;
580 }
581
582 /* HW supports priorities in the range [0-7] and 0xFF is a
583 * wildcard priority used to mask. We scan the ranges reserved
584 * by the hypervisor to find the lowest priority we can use.
585 */
586 found = 0xFF;
587 for (prio = 0; prio < 8; prio++) {
588 int reserved = 0;
589 int i;
590
591 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
592 int base = be32_to_cpu(reg[2 * i]);
593 int range = be32_to_cpu(reg[2 * i + 1]);
594
595 if (prio >= base && prio < base + range)
596 reserved++;
597 }
598
599 if (!reserved)
600 found = prio;
601 }
602
603 if (found == 0xFF) {
604 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
605 return false;
606 }
607
608 *max_prio = found;
609 return true;
610 }
611
xive_spapr_init(void)612 bool __init xive_spapr_init(void)
613 {
614 struct device_node *np;
615 struct resource r;
616 void __iomem *tima;
617 struct property *prop;
618 u8 max_prio;
619 u32 val;
620 u32 len;
621 const __be32 *reg;
622 int i;
623
624 if (xive_cmdline_disabled)
625 return false;
626
627 pr_devel("%s()\n", __func__);
628 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
629 if (!np) {
630 pr_devel("not found !\n");
631 return false;
632 }
633 pr_devel("Found %s\n", np->full_name);
634
635 /* Resource 1 is the OS ring TIMA */
636 if (of_address_to_resource(np, 1, &r)) {
637 pr_err("Failed to get thread mgmnt area resource\n");
638 return false;
639 }
640 tima = ioremap(r.start, resource_size(&r));
641 if (!tima) {
642 pr_err("Failed to map thread mgmnt area\n");
643 return false;
644 }
645
646 if (!xive_get_max_prio(&max_prio))
647 return false;
648
649 /* Feed the IRQ number allocator with the ranges given in the DT */
650 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
651 if (!reg) {
652 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
653 return false;
654 }
655
656 if (len % (2 * sizeof(u32)) != 0) {
657 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
658 return false;
659 }
660
661 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
662 xive_irq_bitmap_add(be32_to_cpu(reg[0]),
663 be32_to_cpu(reg[1]));
664
665 /* Iterate the EQ sizes and pick one */
666 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
667 xive_queue_shift = val;
668 if (val == PAGE_SHIFT)
669 break;
670 }
671
672 /* Initialize XIVE core with our backend */
673 if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
674 return false;
675
676 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
677 return true;
678 }
679