• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016,2017 IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #define pr_fmt(fmt) "xive: " fmt
11 
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/debugfs.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/seq_file.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/of.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/msi.h>
26 
27 #include <asm/prom.h>
28 #include <asm/io.h>
29 #include <asm/smp.h>
30 #include <asm/machdep.h>
31 #include <asm/irq.h>
32 #include <asm/errno.h>
33 #include <asm/xive.h>
34 #include <asm/xive-regs.h>
35 #include <asm/xmon.h>
36 
37 #include "xive-internal.h"
38 
39 #undef DEBUG_FLUSH
40 #undef DEBUG_ALL
41 
42 #ifdef DEBUG_ALL
43 #define DBG_VERBOSE(fmt, ...)	pr_devel("cpu %d - " fmt, \
44 					 smp_processor_id(), ## __VA_ARGS__)
45 #else
46 #define DBG_VERBOSE(fmt...)	do { } while(0)
47 #endif
48 
49 bool __xive_enabled;
50 EXPORT_SYMBOL_GPL(__xive_enabled);
51 bool xive_cmdline_disabled;
52 
53 /* We use only one priority for now */
54 static u8 xive_irq_priority;
55 
56 /* TIMA exported to KVM */
57 void __iomem *xive_tima;
58 EXPORT_SYMBOL_GPL(xive_tima);
59 u32 xive_tima_offset;
60 
61 /* Backend ops */
62 static const struct xive_ops *xive_ops;
63 
64 /* Our global interrupt domain */
65 static struct irq_domain *xive_irq_domain;
66 
67 #ifdef CONFIG_SMP
68 /* The IPIs all use the same logical irq number */
69 static u32 xive_ipi_irq;
70 #endif
71 
72 /* Xive state for each CPU */
73 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
74 
75 /*
76  * A "disabled" interrupt should never fire, to catch problems
77  * we set its logical number to this
78  */
79 #define XIVE_BAD_IRQ		0x7fffffff
80 #define XIVE_MAX_IRQ		(XIVE_BAD_IRQ - 1)
81 
82 /* An invalid CPU target */
83 #define XIVE_INVALID_TARGET	(-1)
84 
85 /*
86  * Read the next entry in a queue, return its content if it's valid
87  * or 0 if there is no new entry.
88  *
89  * The queue pointer is moved forward unless "just_peek" is set
90  */
xive_read_eq(struct xive_q * q,bool just_peek)91 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
92 {
93 	u32 cur;
94 
95 	if (!q->qpage)
96 		return 0;
97 	cur = be32_to_cpup(q->qpage + q->idx);
98 
99 	/* Check valid bit (31) vs current toggle polarity */
100 	if ((cur >> 31) == q->toggle)
101 		return 0;
102 
103 	/* If consuming from the queue ... */
104 	if (!just_peek) {
105 		/* Next entry */
106 		q->idx = (q->idx + 1) & q->msk;
107 
108 		/* Wrap around: flip valid toggle */
109 		if (q->idx == 0)
110 			q->toggle ^= 1;
111 	}
112 	/* Mask out the valid bit (31) */
113 	return cur & 0x7fffffff;
114 }
115 
116 /*
117  * Scans all the queue that may have interrupts in them
118  * (based on "pending_prio") in priority order until an
119  * interrupt is found or all the queues are empty.
120  *
121  * Then updates the CPPR (Current Processor Priority
122  * Register) based on the most favored interrupt found
123  * (0xff if none) and return what was found (0 if none).
124  *
125  * If just_peek is set, return the most favored pending
126  * interrupt if any but don't update the queue pointers.
127  *
128  * Note: This function can operate generically on any number
129  * of queues (up to 8). The current implementation of the XIVE
130  * driver only uses a single queue however.
131  *
132  * Note2: This will also "flush" "the pending_count" of a queue
133  * into the "count" when that queue is observed to be empty.
134  * This is used to keep track of the amount of interrupts
135  * targetting a queue. When an interrupt is moved away from
136  * a queue, we only decrement that queue count once the queue
137  * has been observed empty to avoid races.
138  */
xive_scan_interrupts(struct xive_cpu * xc,bool just_peek)139 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
140 {
141 	u32 irq = 0;
142 	u8 prio;
143 
144 	/* Find highest pending priority */
145 	while (xc->pending_prio != 0) {
146 		struct xive_q *q;
147 
148 		prio = ffs(xc->pending_prio) - 1;
149 		DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
150 
151 		/* Try to fetch */
152 		irq = xive_read_eq(&xc->queue[prio], just_peek);
153 
154 		/* Found something ? That's it */
155 		if (irq)
156 			break;
157 
158 		/* Clear pending bits */
159 		xc->pending_prio &= ~(1 << prio);
160 
161 		/*
162 		 * Check if the queue count needs adjusting due to
163 		 * interrupts being moved away. See description of
164 		 * xive_dec_target_count()
165 		 */
166 		q = &xc->queue[prio];
167 		if (atomic_read(&q->pending_count)) {
168 			int p = atomic_xchg(&q->pending_count, 0);
169 			if (p) {
170 				WARN_ON(p > atomic_read(&q->count));
171 				atomic_sub(p, &q->count);
172 			}
173 		}
174 	}
175 
176 	/* If nothing was found, set CPPR to 0xff */
177 	if (irq == 0)
178 		prio = 0xff;
179 
180 	/* Update HW CPPR to match if necessary */
181 	if (prio != xc->cppr) {
182 		DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
183 		xc->cppr = prio;
184 		out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
185 	}
186 
187 	return irq;
188 }
189 
190 /*
191  * This is used to perform the magic loads from an ESB
192  * described in xive.h
193  */
xive_esb_read(struct xive_irq_data * xd,u32 offset)194 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
195 {
196 	u64 val;
197 
198 	/* Handle HW errata */
199 	if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
200 		offset |= offset << 4;
201 
202 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
203 		val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
204 	else
205 		val = in_be64(xd->eoi_mmio + offset);
206 
207 	return (u8)val;
208 }
209 
xive_esb_write(struct xive_irq_data * xd,u32 offset,u64 data)210 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
211 {
212 	/* Handle HW errata */
213 	if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
214 		offset |= offset << 4;
215 
216 	if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
217 		xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
218 	else
219 		out_be64(xd->eoi_mmio + offset, data);
220 }
221 
222 #ifdef CONFIG_XMON
xive_dump_eq(const char * name,struct xive_q * q)223 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
224 {
225 	u32 i0, i1, idx;
226 
227 	if (!q->qpage)
228 		return;
229 	idx = q->idx;
230 	i0 = be32_to_cpup(q->qpage + idx);
231 	idx = (idx + 1) & q->msk;
232 	i1 = be32_to_cpup(q->qpage + idx);
233 	xmon_printf("  %s Q T=%d %08x %08x ...\n", name,
234 		    q->toggle, i0, i1);
235 }
236 
xmon_xive_do_dump(int cpu)237 notrace void xmon_xive_do_dump(int cpu)
238 {
239 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
240 
241 	xmon_printf("XIVE state for CPU %d:\n", cpu);
242 	xmon_printf("  pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
243 	xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
244 #ifdef CONFIG_SMP
245 	{
246 		u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
247 		xmon_printf("  IPI state: %x:%c%c\n", xc->hw_ipi,
248 			val & XIVE_ESB_VAL_P ? 'P' : 'p',
249 			val & XIVE_ESB_VAL_P ? 'Q' : 'q');
250 	}
251 #endif
252 }
253 #endif /* CONFIG_XMON */
254 
xive_get_irq(void)255 static unsigned int xive_get_irq(void)
256 {
257 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
258 	u32 irq;
259 
260 	/*
261 	 * This can be called either as a result of a HW interrupt or
262 	 * as a "replay" because EOI decided there was still something
263 	 * in one of the queues.
264 	 *
265 	 * First we perform an ACK cycle in order to update our mask
266 	 * of pending priorities. This will also have the effect of
267 	 * updating the CPPR to the most favored pending interrupts.
268 	 *
269 	 * In the future, if we have a way to differenciate a first
270 	 * entry (on HW interrupt) from a replay triggered by EOI,
271 	 * we could skip this on replays unless we soft-mask tells us
272 	 * that a new HW interrupt occurred.
273 	 */
274 	xive_ops->update_pending(xc);
275 
276 	DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
277 
278 	/* Scan our queue(s) for interrupts */
279 	irq = xive_scan_interrupts(xc, false);
280 
281 	DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
282 	    irq, xc->pending_prio);
283 
284 	/* Return pending interrupt if any */
285 	if (irq == XIVE_BAD_IRQ)
286 		return 0;
287 	return irq;
288 }
289 
290 /*
291  * After EOI'ing an interrupt, we need to re-check the queue
292  * to see if another interrupt is pending since multiple
293  * interrupts can coalesce into a single notification to the
294  * CPU.
295  *
296  * If we find that there is indeed more in there, we call
297  * force_external_irq_replay() to make Linux synthetize an
298  * external interrupt on the next call to local_irq_restore().
299  */
xive_do_queue_eoi(struct xive_cpu * xc)300 static void xive_do_queue_eoi(struct xive_cpu *xc)
301 {
302 	if (xive_scan_interrupts(xc, true) != 0) {
303 		DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
304 		force_external_irq_replay();
305 	}
306 }
307 
308 /*
309  * EOI an interrupt at the source. There are several methods
310  * to do this depending on the HW version and source type
311  */
xive_do_source_eoi(u32 hw_irq,struct xive_irq_data * xd)312 void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
313 {
314 	/* If the XIVE supports the new "store EOI facility, use it */
315 	if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
316 		xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
317 	else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
318 		/*
319 		 * The FW told us to call it. This happens for some
320 		 * interrupt sources that need additional HW whacking
321 		 * beyond the ESB manipulation. For example LPC interrupts
322 		 * on P9 DD1.0 need a latch to be clared in the LPC bridge
323 		 * itself. The Firmware will take care of it.
324 		 */
325 		if (WARN_ON_ONCE(!xive_ops->eoi))
326 			return;
327 		xive_ops->eoi(hw_irq);
328 	} else {
329 		u8 eoi_val;
330 
331 		/*
332 		 * Otherwise for EOI, we use the special MMIO that does
333 		 * a clear of both P and Q and returns the old Q,
334 		 * except for LSIs where we use the "EOI cycle" special
335 		 * load.
336 		 *
337 		 * This allows us to then do a re-trigger if Q was set
338 		 * rather than synthesizing an interrupt in software
339 		 *
340 		 * For LSIs, using the HW EOI cycle works around a problem
341 		 * on P9 DD1 PHBs where the other ESB accesses don't work
342 		 * properly.
343 		 */
344 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
345 			xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
346 		else {
347 			eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
348 			DBG_VERBOSE("eoi_val=%x\n", eoi_val);
349 
350 			/* Re-trigger if needed */
351 			if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
352 				out_be64(xd->trig_mmio, 0);
353 		}
354 	}
355 }
356 
357 /* irq_chip eoi callback */
xive_irq_eoi(struct irq_data * d)358 static void xive_irq_eoi(struct irq_data *d)
359 {
360 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
361 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
362 
363 	DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
364 		    d->irq, irqd_to_hwirq(d), xc->pending_prio);
365 
366 	/*
367 	 * EOI the source if it hasn't been disabled and hasn't
368 	 * been passed-through to a KVM guest
369 	 */
370 	if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d))
371 		xive_do_source_eoi(irqd_to_hwirq(d), xd);
372 
373 	/*
374 	 * Clear saved_p to indicate that it's no longer occupying
375 	 * a queue slot on the target queue
376 	 */
377 	xd->saved_p = false;
378 
379 	/* Check for more work in the queue */
380 	xive_do_queue_eoi(xc);
381 }
382 
383 /*
384  * Helper used to mask and unmask an interrupt source. This
385  * is only called for normal interrupts that do not require
386  * masking/unmasking via firmware.
387  */
xive_do_source_set_mask(struct xive_irq_data * xd,bool mask)388 static void xive_do_source_set_mask(struct xive_irq_data *xd,
389 				    bool mask)
390 {
391 	u64 val;
392 
393 	/*
394 	 * If the interrupt had P set, it may be in a queue.
395 	 *
396 	 * We need to make sure we don't re-enable it until it
397 	 * has been fetched from that queue and EOId. We keep
398 	 * a copy of that P state and use it to restore the
399 	 * ESB accordingly on unmask.
400 	 */
401 	if (mask) {
402 		val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
403 		xd->saved_p = !!(val & XIVE_ESB_VAL_P);
404 	} else if (xd->saved_p)
405 		xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
406 	else
407 		xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
408 }
409 
410 /*
411  * Try to chose "cpu" as a new interrupt target. Increments
412  * the queue accounting for that target if it's not already
413  * full.
414  */
xive_try_pick_target(int cpu)415 static bool xive_try_pick_target(int cpu)
416 {
417 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
418 	struct xive_q *q = &xc->queue[xive_irq_priority];
419 	int max;
420 
421 	/*
422 	 * Calculate max number of interrupts in that queue.
423 	 *
424 	 * We leave a gap of 1 just in case...
425 	 */
426 	max = (q->msk + 1) - 1;
427 	return !!atomic_add_unless(&q->count, 1, max);
428 }
429 
430 /*
431  * Un-account an interrupt for a target CPU. We don't directly
432  * decrement q->count since the interrupt might still be present
433  * in the queue.
434  *
435  * Instead increment a separate counter "pending_count" which
436  * will be substracted from "count" later when that CPU observes
437  * the queue to be empty.
438  */
xive_dec_target_count(int cpu)439 static void xive_dec_target_count(int cpu)
440 {
441 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
442 	struct xive_q *q = &xc->queue[xive_irq_priority];
443 
444 	if (unlikely(WARN_ON(cpu < 0 || !xc))) {
445 		pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
446 		return;
447 	}
448 
449 	/*
450 	 * We increment the "pending count" which will be used
451 	 * to decrement the target queue count whenever it's next
452 	 * processed and found empty. This ensure that we don't
453 	 * decrement while we still have the interrupt there
454 	 * occupying a slot.
455 	 */
456 	atomic_inc(&q->pending_count);
457 }
458 
459 /* Find a tentative CPU target in a CPU mask */
xive_find_target_in_mask(const struct cpumask * mask,unsigned int fuzz)460 static int xive_find_target_in_mask(const struct cpumask *mask,
461 				    unsigned int fuzz)
462 {
463 	int cpu, first, num, i;
464 
465 	/* Pick up a starting point CPU in the mask based on  fuzz */
466 	num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
467 	first = fuzz % num;
468 
469 	/* Locate it */
470 	cpu = cpumask_first(mask);
471 	for (i = 0; i < first && cpu < nr_cpu_ids; i++)
472 		cpu = cpumask_next(cpu, mask);
473 
474 	/* Sanity check */
475 	if (WARN_ON(cpu >= nr_cpu_ids))
476 		cpu = cpumask_first(cpu_online_mask);
477 
478 	/* Remember first one to handle wrap-around */
479 	first = cpu;
480 
481 	/*
482 	 * Now go through the entire mask until we find a valid
483 	 * target.
484 	 */
485 	do {
486 		/*
487 		 * We re-check online as the fallback case passes us
488 		 * an untested affinity mask
489 		 */
490 		if (cpu_online(cpu) && xive_try_pick_target(cpu))
491 			return cpu;
492 		cpu = cpumask_next(cpu, mask);
493 		/* Wrap around */
494 		if (cpu >= nr_cpu_ids)
495 			cpu = cpumask_first(mask);
496 	} while (cpu != first);
497 
498 	return -1;
499 }
500 
501 /*
502  * Pick a target CPU for an interrupt. This is done at
503  * startup or if the affinity is changed in a way that
504  * invalidates the current target.
505  */
xive_pick_irq_target(struct irq_data * d,const struct cpumask * affinity)506 static int xive_pick_irq_target(struct irq_data *d,
507 				const struct cpumask *affinity)
508 {
509 	static unsigned int fuzz;
510 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
511 	cpumask_var_t mask;
512 	int cpu = -1;
513 
514 	/*
515 	 * If we have chip IDs, first we try to build a mask of
516 	 * CPUs matching the CPU and find a target in there
517 	 */
518 	if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
519 		zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
520 		/* Build a mask of matching chip IDs */
521 		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
522 			struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
523 			if (xc->chip_id == xd->src_chip)
524 				cpumask_set_cpu(cpu, mask);
525 		}
526 		/* Try to find a target */
527 		if (cpumask_empty(mask))
528 			cpu = -1;
529 		else
530 			cpu = xive_find_target_in_mask(mask, fuzz++);
531 		free_cpumask_var(mask);
532 		if (cpu >= 0)
533 			return cpu;
534 		fuzz--;
535 	}
536 
537 	/* No chip IDs, fallback to using the affinity mask */
538 	return xive_find_target_in_mask(affinity, fuzz++);
539 }
540 
xive_irq_startup(struct irq_data * d)541 static unsigned int xive_irq_startup(struct irq_data *d)
542 {
543 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
544 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
545 	int target, rc;
546 
547 	pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
548 		 d->irq, hw_irq, d);
549 
550 #ifdef CONFIG_PCI_MSI
551 	/*
552 	 * The generic MSI code returns with the interrupt disabled on the
553 	 * card, using the MSI mask bits. Firmware doesn't appear to unmask
554 	 * at that level, so we do it here by hand.
555 	 */
556 	if (irq_data_get_msi_desc(d))
557 		pci_msi_unmask_irq(d);
558 #endif
559 
560 	/* Pick a target */
561 	target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
562 	if (target == XIVE_INVALID_TARGET) {
563 		/* Try again breaking affinity */
564 		target = xive_pick_irq_target(d, cpu_online_mask);
565 		if (target == XIVE_INVALID_TARGET)
566 			return -ENXIO;
567 		pr_warn("irq %d started with broken affinity\n", d->irq);
568 	}
569 
570 	/* Sanity check */
571 	if (WARN_ON(target == XIVE_INVALID_TARGET ||
572 		    target >= nr_cpu_ids))
573 		target = smp_processor_id();
574 
575 	xd->target = target;
576 
577 	/*
578 	 * Configure the logical number to be the Linux IRQ number
579 	 * and set the target queue
580 	 */
581 	rc = xive_ops->configure_irq(hw_irq,
582 				     get_hard_smp_processor_id(target),
583 				     xive_irq_priority, d->irq);
584 	if (rc)
585 		return rc;
586 
587 	/* Unmask the ESB */
588 	xive_do_source_set_mask(xd, false);
589 
590 	return 0;
591 }
592 
xive_irq_shutdown(struct irq_data * d)593 static void xive_irq_shutdown(struct irq_data *d)
594 {
595 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
596 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
597 
598 	pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
599 		 d->irq, hw_irq, d);
600 
601 	if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
602 		return;
603 
604 	/* Mask the interrupt at the source */
605 	xive_do_source_set_mask(xd, true);
606 
607 	/*
608 	 * The above may have set saved_p. We clear it otherwise it
609 	 * will prevent re-enabling later on. It is ok to forget the
610 	 * fact that the interrupt might be in a queue because we are
611 	 * accounting that already in xive_dec_target_count() and will
612 	 * be re-routing it to a new queue with proper accounting when
613 	 * it's started up again
614 	 */
615 	xd->saved_p = false;
616 
617 	/*
618 	 * Mask the interrupt in HW in the IVT/EAS and set the number
619 	 * to be the "bad" IRQ number
620 	 */
621 	xive_ops->configure_irq(hw_irq,
622 				get_hard_smp_processor_id(xd->target),
623 				0xff, XIVE_BAD_IRQ);
624 
625 	xive_dec_target_count(xd->target);
626 	xd->target = XIVE_INVALID_TARGET;
627 }
628 
xive_irq_unmask(struct irq_data * d)629 static void xive_irq_unmask(struct irq_data *d)
630 {
631 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
632 
633 	pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
634 
635 	/*
636 	 * This is a workaround for PCI LSI problems on P9, for
637 	 * these, we call FW to set the mask. The problems might
638 	 * be fixed by P9 DD2.0, if that is the case, firmware
639 	 * will no longer set that flag.
640 	 */
641 	if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
642 		unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
643 		xive_ops->configure_irq(hw_irq,
644 					get_hard_smp_processor_id(xd->target),
645 					xive_irq_priority, d->irq);
646 		return;
647 	}
648 
649 	xive_do_source_set_mask(xd, false);
650 }
651 
xive_irq_mask(struct irq_data * d)652 static void xive_irq_mask(struct irq_data *d)
653 {
654 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
655 
656 	pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
657 
658 	/*
659 	 * This is a workaround for PCI LSI problems on P9, for
660 	 * these, we call OPAL to set the mask. The problems might
661 	 * be fixed by P9 DD2.0, if that is the case, firmware
662 	 * will no longer set that flag.
663 	 */
664 	if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
665 		unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
666 		xive_ops->configure_irq(hw_irq,
667 					get_hard_smp_processor_id(xd->target),
668 					0xff, d->irq);
669 		return;
670 	}
671 
672 	xive_do_source_set_mask(xd, true);
673 }
674 
xive_irq_set_affinity(struct irq_data * d,const struct cpumask * cpumask,bool force)675 static int xive_irq_set_affinity(struct irq_data *d,
676 				 const struct cpumask *cpumask,
677 				 bool force)
678 {
679 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
680 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
681 	u32 target, old_target;
682 	int rc = 0;
683 
684 	pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
685 
686 	/* Is this valid ? */
687 	if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
688 		return -EINVAL;
689 
690 	/* Don't do anything if the interrupt isn't started */
691 	if (!irqd_is_started(d))
692 		return IRQ_SET_MASK_OK;
693 
694 	/*
695 	 * If existing target is already in the new mask, and is
696 	 * online then do nothing.
697 	 */
698 	if (xd->target != XIVE_INVALID_TARGET &&
699 	    cpu_online(xd->target) &&
700 	    cpumask_test_cpu(xd->target, cpumask))
701 		return IRQ_SET_MASK_OK;
702 
703 	/* Pick a new target */
704 	target = xive_pick_irq_target(d, cpumask);
705 
706 	/* No target found */
707 	if (target == XIVE_INVALID_TARGET)
708 		return -ENXIO;
709 
710 	/* Sanity check */
711 	if (WARN_ON(target >= nr_cpu_ids))
712 		target = smp_processor_id();
713 
714 	old_target = xd->target;
715 
716 	/*
717 	 * Only configure the irq if it's not currently passed-through to
718 	 * a KVM guest
719 	 */
720 	if (!irqd_is_forwarded_to_vcpu(d))
721 		rc = xive_ops->configure_irq(hw_irq,
722 					     get_hard_smp_processor_id(target),
723 					     xive_irq_priority, d->irq);
724 	if (rc < 0) {
725 		pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
726 		return rc;
727 	}
728 
729 	pr_devel("  target: 0x%x\n", target);
730 	xd->target = target;
731 
732 	/* Give up previous target */
733 	if (old_target != XIVE_INVALID_TARGET)
734 	    xive_dec_target_count(old_target);
735 
736 	return IRQ_SET_MASK_OK;
737 }
738 
xive_irq_set_type(struct irq_data * d,unsigned int flow_type)739 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
740 {
741 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
742 
743 	/*
744 	 * We only support these. This has really no effect other than setting
745 	 * the corresponding descriptor bits mind you but those will in turn
746 	 * affect the resend function when re-enabling an edge interrupt.
747 	 *
748 	 * Set set the default to edge as explained in map().
749 	 */
750 	if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
751 		flow_type = IRQ_TYPE_EDGE_RISING;
752 
753 	if (flow_type != IRQ_TYPE_EDGE_RISING &&
754 	    flow_type != IRQ_TYPE_LEVEL_LOW)
755 		return -EINVAL;
756 
757 	irqd_set_trigger_type(d, flow_type);
758 
759 	/*
760 	 * Double check it matches what the FW thinks
761 	 *
762 	 * NOTE: We don't know yet if the PAPR interface will provide
763 	 * the LSI vs MSI information apart from the device-tree so
764 	 * this check might have to move into an optional backend call
765 	 * that is specific to the native backend
766 	 */
767 	if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
768 	    !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
769 		pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
770 			d->irq, (u32)irqd_to_hwirq(d),
771 			(flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
772 			(xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
773 	}
774 
775 	return IRQ_SET_MASK_OK_NOCOPY;
776 }
777 
xive_irq_retrigger(struct irq_data * d)778 static int xive_irq_retrigger(struct irq_data *d)
779 {
780 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
781 
782 	/* This should be only for MSIs */
783 	if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
784 		return 0;
785 
786 	/*
787 	 * To perform a retrigger, we first set the PQ bits to
788 	 * 11, then perform an EOI.
789 	 */
790 	xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
791 
792 	/*
793 	 * Note: We pass "0" to the hw_irq argument in order to
794 	 * avoid calling into the backend EOI code which we don't
795 	 * want to do in the case of a re-trigger. Backends typically
796 	 * only do EOI for LSIs anyway.
797 	 */
798 	xive_do_source_eoi(0, xd);
799 
800 	return 1;
801 }
802 
xive_irq_set_vcpu_affinity(struct irq_data * d,void * state)803 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
804 {
805 	struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
806 	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
807 	int rc;
808 	u8 pq;
809 
810 	/*
811 	 * We only support this on interrupts that do not require
812 	 * firmware calls for masking and unmasking
813 	 */
814 	if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
815 		return -EIO;
816 
817 	/*
818 	 * This is called by KVM with state non-NULL for enabling
819 	 * pass-through or NULL for disabling it
820 	 */
821 	if (state) {
822 		irqd_set_forwarded_to_vcpu(d);
823 
824 		/* Set it to PQ=10 state to prevent further sends */
825 		pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
826 
827 		/* No target ? nothing to do */
828 		if (xd->target == XIVE_INVALID_TARGET) {
829 			/*
830 			 * An untargetted interrupt should have been
831 			 * also masked at the source
832 			 */
833 			WARN_ON(pq & 2);
834 
835 			return 0;
836 		}
837 
838 		/*
839 		 * If P was set, adjust state to PQ=11 to indicate
840 		 * that a resend is needed for the interrupt to reach
841 		 * the guest. Also remember the value of P.
842 		 *
843 		 * This also tells us that it's in flight to a host queue
844 		 * or has already been fetched but hasn't been EOIed yet
845 		 * by the host. This it's potentially using up a host
846 		 * queue slot. This is important to know because as long
847 		 * as this is the case, we must not hard-unmask it when
848 		 * "returning" that interrupt to the host.
849 		 *
850 		 * This saved_p is cleared by the host EOI, when we know
851 		 * for sure the queue slot is no longer in use.
852 		 */
853 		if (pq & 2) {
854 			pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
855 			xd->saved_p = true;
856 
857 			/*
858 			 * Sync the XIVE source HW to ensure the interrupt
859 			 * has gone through the EAS before we change its
860 			 * target to the guest. That should guarantee us
861 			 * that we *will* eventually get an EOI for it on
862 			 * the host. Otherwise there would be a small window
863 			 * for P to be seen here but the interrupt going
864 			 * to the guest queue.
865 			 */
866 			if (xive_ops->sync_source)
867 				xive_ops->sync_source(hw_irq);
868 		} else
869 			xd->saved_p = false;
870 	} else {
871 		irqd_clr_forwarded_to_vcpu(d);
872 
873 		/* No host target ? hard mask and return */
874 		if (xd->target == XIVE_INVALID_TARGET) {
875 			xive_do_source_set_mask(xd, true);
876 			return 0;
877 		}
878 
879 		/*
880 		 * Sync the XIVE source HW to ensure the interrupt
881 		 * has gone through the EAS before we change its
882 		 * target to the host.
883 		 */
884 		if (xive_ops->sync_source)
885 			xive_ops->sync_source(hw_irq);
886 
887 		/*
888 		 * By convention we are called with the interrupt in
889 		 * a PQ=10 or PQ=11 state, ie, it won't fire and will
890 		 * have latched in Q whether there's a pending HW
891 		 * interrupt or not.
892 		 *
893 		 * First reconfigure the target.
894 		 */
895 		rc = xive_ops->configure_irq(hw_irq,
896 					     get_hard_smp_processor_id(xd->target),
897 					     xive_irq_priority, d->irq);
898 		if (rc)
899 			return rc;
900 
901 		/*
902 		 * Then if saved_p is not set, effectively re-enable the
903 		 * interrupt with an EOI. If it is set, we know there is
904 		 * still a message in a host queue somewhere that will be
905 		 * EOId eventually.
906 		 *
907 		 * Note: We don't check irqd_irq_disabled(). Effectively,
908 		 * we *will* let the irq get through even if masked if the
909 		 * HW is still firing it in order to deal with the whole
910 		 * saved_p business properly. If the interrupt triggers
911 		 * while masked, the generic code will re-mask it anyway.
912 		 */
913 		if (!xd->saved_p)
914 			xive_do_source_eoi(hw_irq, xd);
915 
916 	}
917 	return 0;
918 }
919 
920 static struct irq_chip xive_irq_chip = {
921 	.name = "XIVE-IRQ",
922 	.irq_startup = xive_irq_startup,
923 	.irq_shutdown = xive_irq_shutdown,
924 	.irq_eoi = xive_irq_eoi,
925 	.irq_mask = xive_irq_mask,
926 	.irq_unmask = xive_irq_unmask,
927 	.irq_set_affinity = xive_irq_set_affinity,
928 	.irq_set_type = xive_irq_set_type,
929 	.irq_retrigger = xive_irq_retrigger,
930 	.irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
931 };
932 
is_xive_irq(struct irq_chip * chip)933 bool is_xive_irq(struct irq_chip *chip)
934 {
935 	return chip == &xive_irq_chip;
936 }
937 EXPORT_SYMBOL_GPL(is_xive_irq);
938 
xive_cleanup_irq_data(struct xive_irq_data * xd)939 void xive_cleanup_irq_data(struct xive_irq_data *xd)
940 {
941 	if (xd->eoi_mmio) {
942 		iounmap(xd->eoi_mmio);
943 		if (xd->eoi_mmio == xd->trig_mmio)
944 			xd->trig_mmio = NULL;
945 		xd->eoi_mmio = NULL;
946 	}
947 	if (xd->trig_mmio) {
948 		iounmap(xd->trig_mmio);
949 		xd->trig_mmio = NULL;
950 	}
951 }
952 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
953 
xive_irq_alloc_data(unsigned int virq,irq_hw_number_t hw)954 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
955 {
956 	struct xive_irq_data *xd;
957 	int rc;
958 
959 	xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
960 	if (!xd)
961 		return -ENOMEM;
962 	rc = xive_ops->populate_irq_data(hw, xd);
963 	if (rc) {
964 		kfree(xd);
965 		return rc;
966 	}
967 	xd->target = XIVE_INVALID_TARGET;
968 	irq_set_handler_data(virq, xd);
969 
970 	/*
971 	 * Turn OFF by default the interrupt being mapped. A side
972 	 * effect of this check is the mapping the ESB page of the
973 	 * interrupt in the Linux address space. This prevents page
974 	 * fault issues in the crash handler which masks all
975 	 * interrupts.
976 	 */
977 	xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
978 
979 	return 0;
980 }
981 
xive_irq_free_data(unsigned int virq)982 static void xive_irq_free_data(unsigned int virq)
983 {
984 	struct xive_irq_data *xd = irq_get_handler_data(virq);
985 
986 	if (!xd)
987 		return;
988 	irq_set_handler_data(virq, NULL);
989 	xive_cleanup_irq_data(xd);
990 	kfree(xd);
991 }
992 
993 #ifdef CONFIG_SMP
994 
xive_cause_ipi(int cpu)995 static void xive_cause_ipi(int cpu)
996 {
997 	struct xive_cpu *xc;
998 	struct xive_irq_data *xd;
999 
1000 	xc = per_cpu(xive_cpu, cpu);
1001 
1002 	DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1003 		    smp_processor_id(), cpu, xc->hw_ipi);
1004 
1005 	xd = &xc->ipi_data;
1006 	if (WARN_ON(!xd->trig_mmio))
1007 		return;
1008 	out_be64(xd->trig_mmio, 0);
1009 }
1010 
xive_muxed_ipi_action(int irq,void * dev_id)1011 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1012 {
1013 	return smp_ipi_demux();
1014 }
1015 
xive_ipi_eoi(struct irq_data * d)1016 static void xive_ipi_eoi(struct irq_data *d)
1017 {
1018 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1019 
1020 	/* Handle possible race with unplug and drop stale IPIs */
1021 	if (!xc)
1022 		return;
1023 
1024 	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1025 		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1026 
1027 	xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1028 	xive_do_queue_eoi(xc);
1029 }
1030 
xive_ipi_do_nothing(struct irq_data * d)1031 static void xive_ipi_do_nothing(struct irq_data *d)
1032 {
1033 	/*
1034 	 * Nothing to do, we never mask/unmask IPIs, but the callback
1035 	 * has to exist for the struct irq_chip.
1036 	 */
1037 }
1038 
1039 static struct irq_chip xive_ipi_chip = {
1040 	.name = "XIVE-IPI",
1041 	.irq_eoi = xive_ipi_eoi,
1042 	.irq_mask = xive_ipi_do_nothing,
1043 	.irq_unmask = xive_ipi_do_nothing,
1044 };
1045 
xive_request_ipi(void)1046 static void __init xive_request_ipi(void)
1047 {
1048 	unsigned int virq;
1049 
1050 	/*
1051 	 * Initialization failed, move on, we might manage to
1052 	 * reach the point where we display our errors before
1053 	 * the system falls appart
1054 	 */
1055 	if (!xive_irq_domain)
1056 		return;
1057 
1058 	/* Initialize it */
1059 	virq = irq_create_mapping(xive_irq_domain, 0);
1060 	xive_ipi_irq = virq;
1061 
1062 	WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1063 			    IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1064 }
1065 
xive_setup_cpu_ipi(unsigned int cpu)1066 static int xive_setup_cpu_ipi(unsigned int cpu)
1067 {
1068 	struct xive_cpu *xc;
1069 	int rc;
1070 
1071 	pr_debug("Setting up IPI for CPU %d\n", cpu);
1072 
1073 	xc = per_cpu(xive_cpu, cpu);
1074 
1075 	/* Check if we are already setup */
1076 	if (xc->hw_ipi != 0)
1077 		return 0;
1078 
1079 	/* Grab an IPI from the backend, this will populate xc->hw_ipi */
1080 	if (xive_ops->get_ipi(cpu, xc))
1081 		return -EIO;
1082 
1083 	/*
1084 	 * Populate the IRQ data in the xive_cpu structure and
1085 	 * configure the HW / enable the IPIs.
1086 	 */
1087 	rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1088 	if (rc) {
1089 		pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1090 		return -EIO;
1091 	}
1092 	rc = xive_ops->configure_irq(xc->hw_ipi,
1093 				     get_hard_smp_processor_id(cpu),
1094 				     xive_irq_priority, xive_ipi_irq);
1095 	if (rc) {
1096 		pr_err("Failed to map IPI CPU %d\n", cpu);
1097 		return -EIO;
1098 	}
1099 	pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1100 	    xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1101 
1102 	/* Unmask it */
1103 	xive_do_source_set_mask(&xc->ipi_data, false);
1104 
1105 	return 0;
1106 }
1107 
xive_cleanup_cpu_ipi(unsigned int cpu,struct xive_cpu * xc)1108 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1109 {
1110 	/* Disable the IPI and free the IRQ data */
1111 
1112 	/* Already cleaned up ? */
1113 	if (xc->hw_ipi == 0)
1114 		return;
1115 
1116 	/* Mask the IPI */
1117 	xive_do_source_set_mask(&xc->ipi_data, true);
1118 
1119 	/*
1120 	 * Note: We don't call xive_cleanup_irq_data() to free
1121 	 * the mappings as this is called from an IPI on kexec
1122 	 * which is not a safe environment to call iounmap()
1123 	 */
1124 
1125 	/* Deconfigure/mask in the backend */
1126 	xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1127 				0xff, xive_ipi_irq);
1128 
1129 	/* Free the IPIs in the backend */
1130 	xive_ops->put_ipi(cpu, xc);
1131 }
1132 
xive_smp_probe(void)1133 void __init xive_smp_probe(void)
1134 {
1135 	smp_ops->cause_ipi = xive_cause_ipi;
1136 
1137 	/* Register the IPI */
1138 	xive_request_ipi();
1139 
1140 	/* Allocate and setup IPI for the boot CPU */
1141 	xive_setup_cpu_ipi(smp_processor_id());
1142 }
1143 
1144 #endif /* CONFIG_SMP */
1145 
xive_irq_domain_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)1146 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1147 			       irq_hw_number_t hw)
1148 {
1149 	int rc;
1150 
1151 	/*
1152 	 * Mark interrupts as edge sensitive by default so that resend
1153 	 * actually works. Will fix that up below if needed.
1154 	 */
1155 	irq_clear_status_flags(virq, IRQ_LEVEL);
1156 
1157 #ifdef CONFIG_SMP
1158 	/* IPIs are special and come up with HW number 0 */
1159 	if (hw == 0) {
1160 		/*
1161 		 * IPIs are marked per-cpu. We use separate HW interrupts under
1162 		 * the hood but associated with the same "linux" interrupt
1163 		 */
1164 		irq_set_chip_and_handler(virq, &xive_ipi_chip,
1165 					 handle_percpu_irq);
1166 		return 0;
1167 	}
1168 #endif
1169 
1170 	rc = xive_irq_alloc_data(virq, hw);
1171 	if (rc)
1172 		return rc;
1173 
1174 	irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1175 
1176 	return 0;
1177 }
1178 
xive_irq_domain_unmap(struct irq_domain * d,unsigned int virq)1179 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1180 {
1181 	struct irq_data *data = irq_get_irq_data(virq);
1182 	unsigned int hw_irq;
1183 
1184 	/* XXX Assign BAD number */
1185 	if (!data)
1186 		return;
1187 	hw_irq = (unsigned int)irqd_to_hwirq(data);
1188 	if (hw_irq)
1189 		xive_irq_free_data(virq);
1190 }
1191 
xive_irq_domain_xlate(struct irq_domain * h,struct device_node * ct,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)1192 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1193 				 const u32 *intspec, unsigned int intsize,
1194 				 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1195 
1196 {
1197 	*out_hwirq = intspec[0];
1198 
1199 	/*
1200 	 * If intsize is at least 2, we look for the type in the second cell,
1201 	 * we assume the LSB indicates a level interrupt.
1202 	 */
1203 	if (intsize > 1) {
1204 		if (intspec[1] & 1)
1205 			*out_flags = IRQ_TYPE_LEVEL_LOW;
1206 		else
1207 			*out_flags = IRQ_TYPE_EDGE_RISING;
1208 	} else
1209 		*out_flags = IRQ_TYPE_LEVEL_LOW;
1210 
1211 	return 0;
1212 }
1213 
xive_irq_domain_match(struct irq_domain * h,struct device_node * node,enum irq_domain_bus_token bus_token)1214 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1215 				 enum irq_domain_bus_token bus_token)
1216 {
1217 	return xive_ops->match(node);
1218 }
1219 
1220 static const struct irq_domain_ops xive_irq_domain_ops = {
1221 	.match = xive_irq_domain_match,
1222 	.map = xive_irq_domain_map,
1223 	.unmap = xive_irq_domain_unmap,
1224 	.xlate = xive_irq_domain_xlate,
1225 };
1226 
xive_init_host(void)1227 static void __init xive_init_host(void)
1228 {
1229 	xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1230 					       &xive_irq_domain_ops, NULL);
1231 	if (WARN_ON(xive_irq_domain == NULL))
1232 		return;
1233 	irq_set_default_host(xive_irq_domain);
1234 }
1235 
xive_cleanup_cpu_queues(unsigned int cpu,struct xive_cpu * xc)1236 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1237 {
1238 	if (xc->queue[xive_irq_priority].qpage)
1239 		xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1240 }
1241 
xive_setup_cpu_queues(unsigned int cpu,struct xive_cpu * xc)1242 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1243 {
1244 	int rc = 0;
1245 
1246 	/* We setup 1 queues for now with a 64k page */
1247 	if (!xc->queue[xive_irq_priority].qpage)
1248 		rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1249 
1250 	return rc;
1251 }
1252 
xive_prepare_cpu(unsigned int cpu)1253 static int xive_prepare_cpu(unsigned int cpu)
1254 {
1255 	struct xive_cpu *xc;
1256 
1257 	xc = per_cpu(xive_cpu, cpu);
1258 	if (!xc) {
1259 		struct device_node *np;
1260 
1261 		xc = kzalloc_node(sizeof(struct xive_cpu),
1262 				  GFP_KERNEL, cpu_to_node(cpu));
1263 		if (!xc)
1264 			return -ENOMEM;
1265 		np = of_get_cpu_node(cpu, NULL);
1266 		if (np)
1267 			xc->chip_id = of_get_ibm_chip_id(np);
1268 		of_node_put(np);
1269 
1270 		per_cpu(xive_cpu, cpu) = xc;
1271 	}
1272 
1273 	/* Setup EQs if not already */
1274 	return xive_setup_cpu_queues(cpu, xc);
1275 }
1276 
xive_setup_cpu(void)1277 static void xive_setup_cpu(void)
1278 {
1279 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1280 
1281 	/* Debug: Dump the TM state */
1282 	pr_devel("CPU %d [HW 0x%02x] VT=%02x\n",
1283 	    smp_processor_id(), hard_smp_processor_id(),
1284 	    in_8(xive_tima + xive_tima_offset + TM_WORD2));
1285 
1286 	/* The backend might have additional things to do */
1287 	if (xive_ops->setup_cpu)
1288 		xive_ops->setup_cpu(smp_processor_id(), xc);
1289 
1290 	/* Set CPPR to 0xff to enable flow of interrupts */
1291 	xc->cppr = 0xff;
1292 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1293 }
1294 
1295 #ifdef CONFIG_SMP
xive_smp_setup_cpu(void)1296 void xive_smp_setup_cpu(void)
1297 {
1298 	pr_devel("SMP setup CPU %d\n", smp_processor_id());
1299 
1300 	/* This will have already been done on the boot CPU */
1301 	if (smp_processor_id() != boot_cpuid)
1302 		xive_setup_cpu();
1303 
1304 }
1305 
xive_smp_prepare_cpu(unsigned int cpu)1306 int xive_smp_prepare_cpu(unsigned int cpu)
1307 {
1308 	int rc;
1309 
1310 	/* Allocate per-CPU data and queues */
1311 	rc = xive_prepare_cpu(cpu);
1312 	if (rc)
1313 		return rc;
1314 
1315 	/* Allocate and setup IPI for the new CPU */
1316 	return xive_setup_cpu_ipi(cpu);
1317 }
1318 
1319 #ifdef CONFIG_HOTPLUG_CPU
xive_flush_cpu_queue(unsigned int cpu,struct xive_cpu * xc)1320 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1321 {
1322 	u32 irq;
1323 
1324 	/* We assume local irqs are disabled */
1325 	WARN_ON(!irqs_disabled());
1326 
1327 	/* Check what's already in the CPU queue */
1328 	while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1329 		/*
1330 		 * We need to re-route that interrupt to its new destination.
1331 		 * First get and lock the descriptor
1332 		 */
1333 		struct irq_desc *desc = irq_to_desc(irq);
1334 		struct irq_data *d = irq_desc_get_irq_data(desc);
1335 		struct xive_irq_data *xd;
1336 		unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1337 
1338 		/*
1339 		 * Ignore anything that isn't a XIVE irq and ignore
1340 		 * IPIs, so can just be dropped.
1341 		 */
1342 		if (d->domain != xive_irq_domain || hw_irq == 0)
1343 			continue;
1344 
1345 		/*
1346 		 * The IRQ should have already been re-routed, it's just a
1347 		 * stale in the old queue, so re-trigger it in order to make
1348 		 * it reach is new destination.
1349 		 */
1350 #ifdef DEBUG_FLUSH
1351 		pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1352 			cpu, irq);
1353 #endif
1354 		raw_spin_lock(&desc->lock);
1355 		xd = irq_desc_get_handler_data(desc);
1356 
1357 		/*
1358 		 * For LSIs, we EOI, this will cause a resend if it's
1359 		 * still asserted. Otherwise do an MSI retrigger.
1360 		 */
1361 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
1362 			xive_do_source_eoi(irqd_to_hwirq(d), xd);
1363 		else
1364 			xive_irq_retrigger(d);
1365 
1366 		raw_spin_unlock(&desc->lock);
1367 	}
1368 }
1369 
xive_smp_disable_cpu(void)1370 void xive_smp_disable_cpu(void)
1371 {
1372 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1373 	unsigned int cpu = smp_processor_id();
1374 
1375 	/* Migrate interrupts away from the CPU */
1376 	irq_migrate_all_off_this_cpu();
1377 
1378 	/* Set CPPR to 0 to disable flow of interrupts */
1379 	xc->cppr = 0;
1380 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1381 
1382 	/* Flush everything still in the queue */
1383 	xive_flush_cpu_queue(cpu, xc);
1384 
1385 	/* Re-enable CPPR  */
1386 	xc->cppr = 0xff;
1387 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1388 }
1389 
xive_flush_interrupt(void)1390 void xive_flush_interrupt(void)
1391 {
1392 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1393 	unsigned int cpu = smp_processor_id();
1394 
1395 	/* Called if an interrupt occurs while the CPU is hot unplugged */
1396 	xive_flush_cpu_queue(cpu, xc);
1397 }
1398 
1399 #endif /* CONFIG_HOTPLUG_CPU */
1400 
1401 #endif /* CONFIG_SMP */
1402 
xive_teardown_cpu(void)1403 void xive_teardown_cpu(void)
1404 {
1405 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1406 	unsigned int cpu = smp_processor_id();
1407 
1408 	/* Set CPPR to 0 to disable flow of interrupts */
1409 	xc->cppr = 0;
1410 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1411 
1412 	if (xive_ops->teardown_cpu)
1413 		xive_ops->teardown_cpu(cpu, xc);
1414 
1415 #ifdef CONFIG_SMP
1416 	/* Get rid of IPI */
1417 	xive_cleanup_cpu_ipi(cpu, xc);
1418 #endif
1419 
1420 	/* Disable and free the queues */
1421 	xive_cleanup_cpu_queues(cpu, xc);
1422 }
1423 
xive_kexec_teardown_cpu(int secondary)1424 void xive_kexec_teardown_cpu(int secondary)
1425 {
1426 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1427 	unsigned int cpu = smp_processor_id();
1428 
1429 	/* Set CPPR to 0 to disable flow of interrupts */
1430 	xc->cppr = 0;
1431 	out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1432 
1433 	/* Backend cleanup if any */
1434 	if (xive_ops->teardown_cpu)
1435 		xive_ops->teardown_cpu(cpu, xc);
1436 
1437 #ifdef CONFIG_SMP
1438 	/* Get rid of IPI */
1439 	xive_cleanup_cpu_ipi(cpu, xc);
1440 #endif
1441 
1442 	/* Disable and free the queues */
1443 	xive_cleanup_cpu_queues(cpu, xc);
1444 }
1445 
xive_shutdown(void)1446 void xive_shutdown(void)
1447 {
1448 	xive_ops->shutdown();
1449 }
1450 
xive_core_init(const struct xive_ops * ops,void __iomem * area,u32 offset,u8 max_prio)1451 bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1452 			   u8 max_prio)
1453 {
1454 	xive_tima = area;
1455 	xive_tima_offset = offset;
1456 	xive_ops = ops;
1457 	xive_irq_priority = max_prio;
1458 
1459 	ppc_md.get_irq = xive_get_irq;
1460 	__xive_enabled = true;
1461 
1462 	pr_devel("Initializing host..\n");
1463 	xive_init_host();
1464 
1465 	pr_devel("Initializing boot CPU..\n");
1466 
1467 	/* Allocate per-CPU data and queues */
1468 	xive_prepare_cpu(smp_processor_id());
1469 
1470 	/* Get ready for interrupts */
1471 	xive_setup_cpu();
1472 
1473 	pr_info("Interrupt handling initialized with %s backend\n",
1474 		xive_ops->name);
1475 	pr_info("Using priority %d for all interrupts\n", max_prio);
1476 
1477 	return true;
1478 }
1479 
xive_queue_page_alloc(unsigned int cpu,u32 queue_shift)1480 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1481 {
1482 	unsigned int alloc_order;
1483 	struct page *pages;
1484 	__be32 *qpage;
1485 
1486 	alloc_order = xive_alloc_order(queue_shift);
1487 	pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1488 	if (!pages)
1489 		return ERR_PTR(-ENOMEM);
1490 	qpage = (__be32 *)page_address(pages);
1491 	memset(qpage, 0, 1 << queue_shift);
1492 
1493 	return qpage;
1494 }
1495 
xive_off(char * arg)1496 static int __init xive_off(char *arg)
1497 {
1498 	xive_cmdline_disabled = true;
1499 	return 0;
1500 }
1501 __setup("xive=off", xive_off);
1502