1 /*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/irq.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/of.h>
18
19 #include <asm/smp.h>
20 #include <asm/irq.h>
21 #include <asm/errno.h>
22 #include <asm/xics.h>
23 #include <asm/io.h>
24 #include <asm/hvcall.h>
25
icp_hv_get_xirr(unsigned char cppr)26 static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
27 {
28 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
29 long rc;
30 unsigned int ret = XICS_IRQ_SPURIOUS;
31
32 rc = plpar_hcall(H_XIRR, retbuf, cppr);
33 if (rc == H_SUCCESS) {
34 ret = (unsigned int)retbuf[0];
35 } else {
36 pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
37 __func__, cppr, rc);
38 WARN_ON_ONCE(1);
39 }
40
41 return ret;
42 }
43
icp_hv_set_cppr(u8 value)44 static inline void icp_hv_set_cppr(u8 value)
45 {
46 long rc = plpar_hcall_norets(H_CPPR, value);
47 if (rc != H_SUCCESS) {
48 pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
49 __func__, value, rc);
50 WARN_ON_ONCE(1);
51 }
52 }
53
icp_hv_set_xirr(unsigned int value)54 static inline void icp_hv_set_xirr(unsigned int value)
55 {
56 long rc = plpar_hcall_norets(H_EOI, value);
57 if (rc != H_SUCCESS) {
58 pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
59 __func__, value, rc);
60 WARN_ON_ONCE(1);
61 icp_hv_set_cppr(value >> 24);
62 }
63 }
64
icp_hv_set_qirr(int n_cpu,u8 value)65 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
66 {
67 int hw_cpu = get_hard_smp_processor_id(n_cpu);
68 long rc;
69
70 /* Make sure all previous accesses are ordered before IPI sending */
71 mb();
72 rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
73 if (rc != H_SUCCESS) {
74 pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
75 "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
76 WARN_ON_ONCE(1);
77 }
78 }
79
icp_hv_eoi(struct irq_data * d)80 static void icp_hv_eoi(struct irq_data *d)
81 {
82 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
83
84 iosync();
85 icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
86 }
87
icp_hv_teardown_cpu(void)88 static void icp_hv_teardown_cpu(void)
89 {
90 int cpu = smp_processor_id();
91
92 /* Clear any pending IPI */
93 icp_hv_set_qirr(cpu, 0xff);
94 }
95
icp_hv_flush_ipi(void)96 static void icp_hv_flush_ipi(void)
97 {
98 /* We take the ipi irq but and never return so we
99 * need to EOI the IPI, but want to leave our priority 0
100 *
101 * should we check all the other interrupts too?
102 * should we be flagging idle loop instead?
103 * or creating some task to be scheduled?
104 */
105
106 icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
107 }
108
icp_hv_get_irq(void)109 static unsigned int icp_hv_get_irq(void)
110 {
111 unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
112 unsigned int vec = xirr & 0x00ffffff;
113 unsigned int irq;
114
115 if (vec == XICS_IRQ_SPURIOUS)
116 return NO_IRQ;
117
118 irq = irq_radix_revmap_lookup(xics_host, vec);
119 if (likely(irq != NO_IRQ)) {
120 xics_push_cppr(vec);
121 return irq;
122 }
123
124 /* We don't have a linux mapping, so have rtas mask it. */
125 xics_mask_unknown_vec(vec);
126
127 /* We might learn about it later, so EOI it */
128 icp_hv_set_xirr(xirr);
129
130 return NO_IRQ;
131 }
132
icp_hv_set_cpu_priority(unsigned char cppr)133 static void icp_hv_set_cpu_priority(unsigned char cppr)
134 {
135 xics_set_base_cppr(cppr);
136 icp_hv_set_cppr(cppr);
137 iosync();
138 }
139
140 #ifdef CONFIG_SMP
141
icp_hv_cause_ipi(int cpu,unsigned long data)142 static void icp_hv_cause_ipi(int cpu, unsigned long data)
143 {
144 icp_hv_set_qirr(cpu, IPI_PRIORITY);
145 }
146
icp_hv_ipi_action(int irq,void * dev_id)147 static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
148 {
149 int cpu = smp_processor_id();
150
151 icp_hv_set_qirr(cpu, 0xff);
152
153 return smp_ipi_demux();
154 }
155
156 #endif /* CONFIG_SMP */
157
158 static const struct icp_ops icp_hv_ops = {
159 .get_irq = icp_hv_get_irq,
160 .eoi = icp_hv_eoi,
161 .set_priority = icp_hv_set_cpu_priority,
162 .teardown_cpu = icp_hv_teardown_cpu,
163 .flush_ipi = icp_hv_flush_ipi,
164 #ifdef CONFIG_SMP
165 .ipi_action = icp_hv_ipi_action,
166 .cause_ipi = icp_hv_cause_ipi,
167 #endif
168 };
169
icp_hv_init(void)170 int icp_hv_init(void)
171 {
172 struct device_node *np;
173
174 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
175 if (!np)
176 np = of_find_node_by_type(NULL,
177 "PowerPC-External-Interrupt-Presentation");
178 if (!np)
179 return -ENODEV;
180
181 icp_ops = &icp_hv_ops;
182
183 return 0;
184 }
185
186