• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016,2017 IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #define pr_fmt(fmt) "xive: " fmt
11 
12 #include <linux/types.h>
13 #include <linux/irq.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/of.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/cpumask.h>
21 #include <linux/mm.h>
22 #include <linux/delay.h>
23 
24 #include <asm/prom.h>
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/irq.h>
28 #include <asm/errno.h>
29 #include <asm/xive.h>
30 #include <asm/xive-regs.h>
31 #include <asm/hvcall.h>
32 
33 #include "xive-internal.h"
34 
35 static u32 xive_queue_shift;
36 
37 struct xive_irq_bitmap {
38 	unsigned long		*bitmap;
39 	unsigned int		base;
40 	unsigned int		count;
41 	spinlock_t		lock;
42 	struct list_head	list;
43 };
44 
45 static LIST_HEAD(xive_irq_bitmaps);
46 
xive_irq_bitmap_add(int base,int count)47 static int xive_irq_bitmap_add(int base, int count)
48 {
49 	struct xive_irq_bitmap *xibm;
50 
51 	xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC);
52 	if (!xibm)
53 		return -ENOMEM;
54 
55 	spin_lock_init(&xibm->lock);
56 	xibm->base = base;
57 	xibm->count = count;
58 	xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
59 	list_add(&xibm->list, &xive_irq_bitmaps);
60 
61 	pr_info("Using IRQ range [%x-%x]", xibm->base,
62 		xibm->base + xibm->count - 1);
63 	return 0;
64 }
65 
__xive_irq_bitmap_alloc(struct xive_irq_bitmap * xibm)66 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
67 {
68 	int irq;
69 
70 	irq = find_first_zero_bit(xibm->bitmap, xibm->count);
71 	if (irq != xibm->count) {
72 		set_bit(irq, xibm->bitmap);
73 		irq += xibm->base;
74 	} else {
75 		irq = -ENOMEM;
76 	}
77 
78 	return irq;
79 }
80 
xive_irq_bitmap_alloc(void)81 static int xive_irq_bitmap_alloc(void)
82 {
83 	struct xive_irq_bitmap *xibm;
84 	unsigned long flags;
85 	int irq = -ENOENT;
86 
87 	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
88 		spin_lock_irqsave(&xibm->lock, flags);
89 		irq = __xive_irq_bitmap_alloc(xibm);
90 		spin_unlock_irqrestore(&xibm->lock, flags);
91 		if (irq >= 0)
92 			break;
93 	}
94 	return irq;
95 }
96 
xive_irq_bitmap_free(int irq)97 static void xive_irq_bitmap_free(int irq)
98 {
99 	unsigned long flags;
100 	struct xive_irq_bitmap *xibm;
101 
102 	list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
103 		if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
104 			spin_lock_irqsave(&xibm->lock, flags);
105 			clear_bit(irq - xibm->base, xibm->bitmap);
106 			spin_unlock_irqrestore(&xibm->lock, flags);
107 			break;
108 		}
109 	}
110 }
111 
112 
113 /* Based on the similar routines in RTAS */
plpar_busy_delay_time(long rc)114 static unsigned int plpar_busy_delay_time(long rc)
115 {
116 	unsigned int ms = 0;
117 
118 	if (H_IS_LONG_BUSY(rc)) {
119 		ms = get_longbusy_msecs(rc);
120 	} else if (rc == H_BUSY) {
121 		ms = 10; /* seems appropriate for XIVE hcalls */
122 	}
123 
124 	return ms;
125 }
126 
plpar_busy_delay(int rc)127 static unsigned int plpar_busy_delay(int rc)
128 {
129 	unsigned int ms;
130 
131 	ms = plpar_busy_delay_time(rc);
132 	if (ms)
133 		mdelay(ms);
134 
135 	return ms;
136 }
137 
138 /*
139  * Note: this call has a partition wide scope and can take a while to
140  * complete. If it returns H_LONG_BUSY_* it should be retried
141  * periodically.
142  */
plpar_int_reset(unsigned long flags)143 static long plpar_int_reset(unsigned long flags)
144 {
145 	long rc;
146 
147 	do {
148 		rc = plpar_hcall_norets(H_INT_RESET, flags);
149 	} while (plpar_busy_delay(rc));
150 
151 	if (rc)
152 		pr_err("H_INT_RESET failed %ld\n", rc);
153 
154 	return rc;
155 }
156 
plpar_int_get_source_info(unsigned long flags,unsigned long lisn,unsigned long * src_flags,unsigned long * eoi_page,unsigned long * trig_page,unsigned long * esb_shift)157 static long plpar_int_get_source_info(unsigned long flags,
158 				      unsigned long lisn,
159 				      unsigned long *src_flags,
160 				      unsigned long *eoi_page,
161 				      unsigned long *trig_page,
162 				      unsigned long *esb_shift)
163 {
164 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
165 	long rc;
166 
167 	do {
168 		rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
169 	} while (plpar_busy_delay(rc));
170 
171 	if (rc) {
172 		pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
173 		return rc;
174 	}
175 
176 	*src_flags = retbuf[0];
177 	*eoi_page  = retbuf[1];
178 	*trig_page = retbuf[2];
179 	*esb_shift = retbuf[3];
180 
181 	pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
182 		retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
183 
184 	return 0;
185 }
186 
187 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
188 #define XIVE_SRC_MASK     (1ull << (63 - 63)) /* unused */
189 
plpar_int_set_source_config(unsigned long flags,unsigned long lisn,unsigned long target,unsigned long prio,unsigned long sw_irq)190 static long plpar_int_set_source_config(unsigned long flags,
191 					unsigned long lisn,
192 					unsigned long target,
193 					unsigned long prio,
194 					unsigned long sw_irq)
195 {
196 	long rc;
197 
198 
199 	pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
200 		flags, lisn, target, prio, sw_irq);
201 
202 
203 	do {
204 		rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
205 					target, prio, sw_irq);
206 	} while (plpar_busy_delay(rc));
207 
208 	if (rc) {
209 		pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
210 		       lisn, target, prio, rc);
211 		return rc;
212 	}
213 
214 	return 0;
215 }
216 
plpar_int_get_queue_info(unsigned long flags,unsigned long target,unsigned long priority,unsigned long * esn_page,unsigned long * esn_size)217 static long plpar_int_get_queue_info(unsigned long flags,
218 				     unsigned long target,
219 				     unsigned long priority,
220 				     unsigned long *esn_page,
221 				     unsigned long *esn_size)
222 {
223 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
224 	long rc;
225 
226 	do {
227 		rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
228 				 priority);
229 	} while (plpar_busy_delay(rc));
230 
231 	if (rc) {
232 		pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
233 		       target, priority, rc);
234 		return rc;
235 	}
236 
237 	*esn_page = retbuf[0];
238 	*esn_size = retbuf[1];
239 
240 	pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
241 		retbuf[0], retbuf[1]);
242 
243 	return 0;
244 }
245 
246 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
247 
plpar_int_set_queue_config(unsigned long flags,unsigned long target,unsigned long priority,unsigned long qpage,unsigned long qsize)248 static long plpar_int_set_queue_config(unsigned long flags,
249 				       unsigned long target,
250 				       unsigned long priority,
251 				       unsigned long qpage,
252 				       unsigned long qsize)
253 {
254 	long rc;
255 
256 	pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
257 		flags,  target, priority, qpage, qsize);
258 
259 	do {
260 		rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
261 					priority, qpage, qsize);
262 	} while (plpar_busy_delay(rc));
263 
264 	if (rc) {
265 		pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
266 		       target, priority, qpage, rc);
267 		return  rc;
268 	}
269 
270 	return 0;
271 }
272 
plpar_int_sync(unsigned long flags,unsigned long lisn)273 static long plpar_int_sync(unsigned long flags, unsigned long lisn)
274 {
275 	long rc;
276 
277 	do {
278 		rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
279 	} while (plpar_busy_delay(rc));
280 
281 	if (rc) {
282 		pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
283 		return  rc;
284 	}
285 
286 	return 0;
287 }
288 
289 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
290 
plpar_int_esb(unsigned long flags,unsigned long lisn,unsigned long offset,unsigned long in_data,unsigned long * out_data)291 static long plpar_int_esb(unsigned long flags,
292 			  unsigned long lisn,
293 			  unsigned long offset,
294 			  unsigned long in_data,
295 			  unsigned long *out_data)
296 {
297 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
298 	long rc;
299 
300 	pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
301 		flags,  lisn, offset, in_data);
302 
303 	do {
304 		rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
305 				 in_data);
306 	} while (plpar_busy_delay(rc));
307 
308 	if (rc) {
309 		pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
310 		       lisn, offset, rc);
311 		return  rc;
312 	}
313 
314 	*out_data = retbuf[0];
315 
316 	return 0;
317 }
318 
xive_spapr_esb_rw(u32 lisn,u32 offset,u64 data,bool write)319 static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
320 {
321 	unsigned long read_data;
322 	long rc;
323 
324 	rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
325 			   lisn, offset, data, &read_data);
326 	if (rc)
327 		return -1;
328 
329 	return write ? 0 : read_data;
330 }
331 
332 #define XIVE_SRC_H_INT_ESB     (1ull << (63 - 60))
333 #define XIVE_SRC_LSI           (1ull << (63 - 61))
334 #define XIVE_SRC_TRIGGER       (1ull << (63 - 62))
335 #define XIVE_SRC_STORE_EOI     (1ull << (63 - 63))
336 
xive_spapr_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)337 static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
338 {
339 	long rc;
340 	unsigned long flags;
341 	unsigned long eoi_page;
342 	unsigned long trig_page;
343 	unsigned long esb_shift;
344 
345 	memset(data, 0, sizeof(*data));
346 
347 	rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
348 				       &esb_shift);
349 	if (rc)
350 		return  -EINVAL;
351 
352 	if (flags & XIVE_SRC_H_INT_ESB)
353 		data->flags  |= XIVE_IRQ_FLAG_H_INT_ESB;
354 	if (flags & XIVE_SRC_STORE_EOI)
355 		data->flags  |= XIVE_IRQ_FLAG_STORE_EOI;
356 	if (flags & XIVE_SRC_LSI)
357 		data->flags  |= XIVE_IRQ_FLAG_LSI;
358 	data->eoi_page  = eoi_page;
359 	data->esb_shift = esb_shift;
360 	data->trig_page = trig_page;
361 
362 	data->hw_irq = hw_irq;
363 
364 	/*
365 	 * No chip-id for the sPAPR backend. This has an impact how we
366 	 * pick a target. See xive_pick_irq_target().
367 	 */
368 	data->src_chip = XIVE_INVALID_CHIP_ID;
369 
370 	/*
371 	 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
372 	 * be used for interrupt management. Skip the remapping of the
373 	 * ESB pages which are not available.
374 	 */
375 	if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
376 		return 0;
377 
378 	data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
379 	if (!data->eoi_mmio) {
380 		pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
381 		return -ENOMEM;
382 	}
383 
384 	/* Full function page supports trigger */
385 	if (flags & XIVE_SRC_TRIGGER) {
386 		data->trig_mmio = data->eoi_mmio;
387 		return 0;
388 	}
389 
390 	data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
391 	if (!data->trig_mmio) {
392 		pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
393 		return -ENOMEM;
394 	}
395 	return 0;
396 }
397 
xive_spapr_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)398 static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
399 {
400 	long rc;
401 
402 	rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
403 					 prio, sw_irq);
404 
405 	return rc == 0 ? 0 : -ENXIO;
406 }
407 
408 /* This can be called multiple time to change a queue configuration */
xive_spapr_configure_queue(u32 target,struct xive_q * q,u8 prio,__be32 * qpage,u32 order)409 static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
410 				   __be32 *qpage, u32 order)
411 {
412 	s64 rc = 0;
413 	unsigned long esn_page;
414 	unsigned long esn_size;
415 	u64 flags, qpage_phys;
416 
417 	/* If there's an actual queue page, clean it */
418 	if (order) {
419 		if (WARN_ON(!qpage))
420 			return -EINVAL;
421 		qpage_phys = __pa(qpage);
422 	} else {
423 		qpage_phys = 0;
424 	}
425 
426 	/* Initialize the rest of the fields */
427 	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
428 	q->idx = 0;
429 	q->toggle = 0;
430 
431 	rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
432 	if (rc) {
433 		pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
434 		       target, prio);
435 		rc = -EIO;
436 		goto fail;
437 	}
438 
439 	/* TODO: add support for the notification page */
440 	q->eoi_phys = esn_page;
441 
442 	/* Default is to always notify */
443 	flags = XIVE_EQ_ALWAYS_NOTIFY;
444 
445 	/* Configure and enable the queue in HW */
446 	rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
447 	if (rc) {
448 		pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
449 		       target, prio);
450 		rc = -EIO;
451 	} else {
452 		q->qpage = qpage;
453 	}
454 fail:
455 	return rc;
456 }
457 
xive_spapr_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)458 static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
459 				  u8 prio)
460 {
461 	struct xive_q *q = &xc->queue[prio];
462 	__be32 *qpage;
463 
464 	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
465 	if (IS_ERR(qpage))
466 		return PTR_ERR(qpage);
467 
468 	return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
469 					  q, prio, qpage, xive_queue_shift);
470 }
471 
xive_spapr_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)472 static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
473 				  u8 prio)
474 {
475 	struct xive_q *q = &xc->queue[prio];
476 	unsigned int alloc_order;
477 	long rc;
478 	int hw_cpu = get_hard_smp_processor_id(cpu);
479 
480 	rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
481 	if (rc)
482 		pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
483 		       hw_cpu, prio);
484 
485 	alloc_order = xive_alloc_order(xive_queue_shift);
486 	free_pages((unsigned long)q->qpage, alloc_order);
487 	q->qpage = NULL;
488 }
489 
xive_spapr_match(struct device_node * node)490 static bool xive_spapr_match(struct device_node *node)
491 {
492 	/* Ignore cascaded controllers for the moment */
493 	return 1;
494 }
495 
496 #ifdef CONFIG_SMP
xive_spapr_get_ipi(unsigned int cpu,struct xive_cpu * xc)497 static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
498 {
499 	int irq = xive_irq_bitmap_alloc();
500 
501 	if (irq < 0) {
502 		pr_err("Failed to allocate IPI on CPU %d\n", cpu);
503 		return -ENXIO;
504 	}
505 
506 	xc->hw_ipi = irq;
507 	return 0;
508 }
509 
xive_spapr_put_ipi(unsigned int cpu,struct xive_cpu * xc)510 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
511 {
512 	if (xc->hw_ipi == XIVE_BAD_IRQ)
513 		return;
514 
515 	xive_irq_bitmap_free(xc->hw_ipi);
516 	xc->hw_ipi = XIVE_BAD_IRQ;
517 }
518 #endif /* CONFIG_SMP */
519 
xive_spapr_shutdown(void)520 static void xive_spapr_shutdown(void)
521 {
522 	plpar_int_reset(0);
523 }
524 
525 /*
526  * Perform an "ack" cycle on the current thread. Grab the pending
527  * active priorities and update the CPPR to the most favored one.
528  */
xive_spapr_update_pending(struct xive_cpu * xc)529 static void xive_spapr_update_pending(struct xive_cpu *xc)
530 {
531 	u8 nsr, cppr;
532 	u16 ack;
533 
534 	/*
535 	 * Perform the "Acknowledge O/S to Register" cycle.
536 	 *
537 	 * Let's speedup the access to the TIMA using the raw I/O
538 	 * accessor as we don't need the synchronisation routine of
539 	 * the higher level ones
540 	 */
541 	ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
542 
543 	/* Synchronize subsequent queue accesses */
544 	mb();
545 
546 	/*
547 	 * Grab the CPPR and the "NSR" field which indicates the source
548 	 * of the interrupt (if any)
549 	 */
550 	cppr = ack & 0xff;
551 	nsr = ack >> 8;
552 
553 	if (nsr & TM_QW1_NSR_EO) {
554 		if (cppr == 0xff)
555 			return;
556 		/* Mark the priority pending */
557 		xc->pending_prio |= 1 << cppr;
558 
559 		/*
560 		 * A new interrupt should never have a CPPR less favored
561 		 * than our current one.
562 		 */
563 		if (cppr >= xc->cppr)
564 			pr_err("CPU %d odd ack CPPR, got %d at %d\n",
565 			       smp_processor_id(), cppr, xc->cppr);
566 
567 		/* Update our idea of what the CPPR is */
568 		xc->cppr = cppr;
569 	}
570 }
571 
xive_spapr_eoi(u32 hw_irq)572 static void xive_spapr_eoi(u32 hw_irq)
573 {
574 	/* Not used */;
575 }
576 
xive_spapr_setup_cpu(unsigned int cpu,struct xive_cpu * xc)577 static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
578 {
579 	/* Only some debug on the TIMA settings */
580 	pr_debug("(HW value: %08x %08x %08x)\n",
581 		 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
582 		 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
583 		 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
584 }
585 
xive_spapr_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)586 static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
587 {
588 	/* Nothing to do */;
589 }
590 
xive_spapr_sync_source(u32 hw_irq)591 static void xive_spapr_sync_source(u32 hw_irq)
592 {
593 	/* Specs are unclear on what this is doing */
594 	plpar_int_sync(0, hw_irq);
595 }
596 
597 static const struct xive_ops xive_spapr_ops = {
598 	.populate_irq_data	= xive_spapr_populate_irq_data,
599 	.configure_irq		= xive_spapr_configure_irq,
600 	.setup_queue		= xive_spapr_setup_queue,
601 	.cleanup_queue		= xive_spapr_cleanup_queue,
602 	.match			= xive_spapr_match,
603 	.shutdown		= xive_spapr_shutdown,
604 	.update_pending		= xive_spapr_update_pending,
605 	.eoi			= xive_spapr_eoi,
606 	.setup_cpu		= xive_spapr_setup_cpu,
607 	.teardown_cpu		= xive_spapr_teardown_cpu,
608 	.sync_source		= xive_spapr_sync_source,
609 	.esb_rw			= xive_spapr_esb_rw,
610 #ifdef CONFIG_SMP
611 	.get_ipi		= xive_spapr_get_ipi,
612 	.put_ipi		= xive_spapr_put_ipi,
613 #endif /* CONFIG_SMP */
614 	.name			= "spapr",
615 };
616 
617 /*
618  * get max priority from "/ibm,plat-res-int-priorities"
619  */
xive_get_max_prio(u8 * max_prio)620 static bool xive_get_max_prio(u8 *max_prio)
621 {
622 	struct device_node *rootdn;
623 	const __be32 *reg;
624 	u32 len;
625 	int prio, found;
626 
627 	rootdn = of_find_node_by_path("/");
628 	if (!rootdn) {
629 		pr_err("not root node found !\n");
630 		return false;
631 	}
632 
633 	reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
634 	if (!reg) {
635 		pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
636 		return false;
637 	}
638 
639 	if (len % (2 * sizeof(u32)) != 0) {
640 		pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
641 		return false;
642 	}
643 
644 	/* HW supports priorities in the range [0-7] and 0xFF is a
645 	 * wildcard priority used to mask. We scan the ranges reserved
646 	 * by the hypervisor to find the lowest priority we can use.
647 	 */
648 	found = 0xFF;
649 	for (prio = 0; prio < 8; prio++) {
650 		int reserved = 0;
651 		int i;
652 
653 		for (i = 0; i < len / (2 * sizeof(u32)); i++) {
654 			int base  = be32_to_cpu(reg[2 * i]);
655 			int range = be32_to_cpu(reg[2 * i + 1]);
656 
657 			if (prio >= base && prio < base + range)
658 				reserved++;
659 		}
660 
661 		if (!reserved)
662 			found = prio;
663 	}
664 
665 	if (found == 0xFF) {
666 		pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
667 		return false;
668 	}
669 
670 	*max_prio = found;
671 	return true;
672 }
673 
xive_spapr_init(void)674 bool __init xive_spapr_init(void)
675 {
676 	struct device_node *np;
677 	struct resource r;
678 	void __iomem *tima;
679 	struct property *prop;
680 	u8 max_prio;
681 	u32 val;
682 	u32 len;
683 	const __be32 *reg;
684 	int i;
685 
686 	if (xive_cmdline_disabled)
687 		return false;
688 
689 	pr_devel("%s()\n", __func__);
690 	np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
691 	if (!np) {
692 		pr_devel("not found !\n");
693 		return false;
694 	}
695 	pr_devel("Found %s\n", np->full_name);
696 
697 	/* Resource 1 is the OS ring TIMA */
698 	if (of_address_to_resource(np, 1, &r)) {
699 		pr_err("Failed to get thread mgmnt area resource\n");
700 		return false;
701 	}
702 	tima = ioremap(r.start, resource_size(&r));
703 	if (!tima) {
704 		pr_err("Failed to map thread mgmnt area\n");
705 		return false;
706 	}
707 
708 	if (!xive_get_max_prio(&max_prio))
709 		return false;
710 
711 	/* Feed the IRQ number allocator with the ranges given in the DT */
712 	reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
713 	if (!reg) {
714 		pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
715 		return false;
716 	}
717 
718 	if (len % (2 * sizeof(u32)) != 0) {
719 		pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
720 		return false;
721 	}
722 
723 	for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
724 		xive_irq_bitmap_add(be32_to_cpu(reg[0]),
725 				    be32_to_cpu(reg[1]));
726 
727 	/* Iterate the EQ sizes and pick one */
728 	of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
729 		xive_queue_shift = val;
730 		if (val == PAGE_SHIFT)
731 			break;
732 	}
733 
734 	/* Initialize XIVE core with our backend */
735 	if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
736 		return false;
737 
738 	pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
739 	return true;
740 }
741