1 /*
2 * Driver for ePAPR Embedded Hypervisor PIC
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Author: Ashish Kalra <ashish.kalra@freescale.com>
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/irq.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/of.h>
22
23 #include <asm/io.h>
24 #include <asm/irq.h>
25 #include <asm/smp.h>
26 #include <asm/machdep.h>
27 #include <asm/ehv_pic.h>
28 #include <asm/fsl_hcalls.h>
29
30 #include "../../../kernel/irq/settings.h"
31
32 static struct ehv_pic *global_ehv_pic;
33 static DEFINE_SPINLOCK(ehv_pic_lock);
34
35 static u32 hwirq_intspec[NR_EHV_PIC_INTS];
36 static u32 __iomem *mpic_percpu_base_vaddr;
37
38 #define IRQ_TYPE_MPIC_DIRECT 4
39 #define MPIC_EOI 0x00B0
40
41 /*
42 * Linux descriptor level callbacks
43 */
44
ehv_pic_unmask_irq(struct irq_data * d)45 void ehv_pic_unmask_irq(struct irq_data *d)
46 {
47 unsigned int src = virq_to_hw(d->irq);
48
49 ev_int_set_mask(src, 0);
50 }
51
ehv_pic_mask_irq(struct irq_data * d)52 void ehv_pic_mask_irq(struct irq_data *d)
53 {
54 unsigned int src = virq_to_hw(d->irq);
55
56 ev_int_set_mask(src, 1);
57 }
58
ehv_pic_end_irq(struct irq_data * d)59 void ehv_pic_end_irq(struct irq_data *d)
60 {
61 unsigned int src = virq_to_hw(d->irq);
62
63 ev_int_eoi(src);
64 }
65
ehv_pic_direct_end_irq(struct irq_data * d)66 void ehv_pic_direct_end_irq(struct irq_data *d)
67 {
68 out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
69 }
70
ehv_pic_set_affinity(struct irq_data * d,const struct cpumask * dest,bool force)71 int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
72 bool force)
73 {
74 unsigned int src = virq_to_hw(d->irq);
75 unsigned int config, prio, cpu_dest;
76 int cpuid = irq_choose_cpu(dest);
77 unsigned long flags;
78
79 spin_lock_irqsave(&ehv_pic_lock, flags);
80 ev_int_get_config(src, &config, &prio, &cpu_dest);
81 ev_int_set_config(src, config, prio, cpuid);
82 spin_unlock_irqrestore(&ehv_pic_lock, flags);
83
84 return 0;
85 }
86
ehv_pic_type_to_vecpri(unsigned int type)87 static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
88 {
89 /* Now convert sense value */
90
91 switch (type & IRQ_TYPE_SENSE_MASK) {
92 case IRQ_TYPE_EDGE_RISING:
93 return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
94 EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
95
96 case IRQ_TYPE_EDGE_FALLING:
97 case IRQ_TYPE_EDGE_BOTH:
98 return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
99 EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
100
101 case IRQ_TYPE_LEVEL_HIGH:
102 return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
103 EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
104
105 case IRQ_TYPE_LEVEL_LOW:
106 default:
107 return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
108 EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
109 }
110 }
111
ehv_pic_set_irq_type(struct irq_data * d,unsigned int flow_type)112 int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
113 {
114 unsigned int src = virq_to_hw(d->irq);
115 struct irq_desc *desc = irq_to_desc(d->irq);
116 unsigned int vecpri, vold, vnew, prio, cpu_dest;
117 unsigned long flags;
118
119 if (flow_type == IRQ_TYPE_NONE)
120 flow_type = IRQ_TYPE_LEVEL_LOW;
121
122 irq_settings_clr_level(desc);
123 irq_settings_set_trigger_mask(desc, flow_type);
124 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
125 irq_settings_set_level(desc);
126
127 vecpri = ehv_pic_type_to_vecpri(flow_type);
128
129 spin_lock_irqsave(&ehv_pic_lock, flags);
130 ev_int_get_config(src, &vold, &prio, &cpu_dest);
131 vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
132 EHV_PIC_INFO(VECPRI_SENSE_MASK));
133 vnew |= vecpri;
134
135 /*
136 * TODO : Add specific interface call for platform to set
137 * individual interrupt priorities.
138 * platform currently using static/default priority for all ints
139 */
140
141 prio = 8;
142
143 ev_int_set_config(src, vecpri, prio, cpu_dest);
144
145 spin_unlock_irqrestore(&ehv_pic_lock, flags);
146 return 0;
147 }
148
149 static struct irq_chip ehv_pic_irq_chip = {
150 .irq_mask = ehv_pic_mask_irq,
151 .irq_unmask = ehv_pic_unmask_irq,
152 .irq_eoi = ehv_pic_end_irq,
153 .irq_set_type = ehv_pic_set_irq_type,
154 };
155
156 static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
157 .irq_mask = ehv_pic_mask_irq,
158 .irq_unmask = ehv_pic_unmask_irq,
159 .irq_eoi = ehv_pic_direct_end_irq,
160 .irq_set_type = ehv_pic_set_irq_type,
161 };
162
163 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
ehv_pic_get_irq(void)164 unsigned int ehv_pic_get_irq(void)
165 {
166 int irq;
167
168 BUG_ON(global_ehv_pic == NULL);
169
170 if (global_ehv_pic->coreint_flag)
171 irq = mfspr(SPRN_EPR); /* if core int mode */
172 else
173 ev_int_iack(0, &irq); /* legacy mode */
174
175 if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
176 return NO_IRQ;
177
178 /*
179 * this will also setup revmap[] in the slow path for the first
180 * time, next calls will always use fast path by indexing revmap
181 */
182 return irq_linear_revmap(global_ehv_pic->irqhost, irq);
183 }
184
ehv_pic_host_match(struct irq_domain * h,struct device_node * node)185 static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
186 {
187 /* Exact match, unless ehv_pic node is NULL */
188 return h->of_node == NULL || h->of_node == node;
189 }
190
ehv_pic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)191 static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
192 irq_hw_number_t hw)
193 {
194 struct ehv_pic *ehv_pic = h->host_data;
195 struct irq_chip *chip;
196
197 /* Default chip */
198 chip = &ehv_pic->hc_irq;
199
200 if (mpic_percpu_base_vaddr)
201 if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
202 chip = &ehv_pic_direct_eoi_irq_chip;
203
204 irq_set_chip_data(virq, chip);
205 /*
206 * using handle_fasteoi_irq as our irq handler, this will
207 * only call the eoi callback and suitable for the MPIC
208 * controller which set ISR/IPR automatically and clear the
209 * highest priority active interrupt in ISR/IPR when we do
210 * a specific eoi
211 */
212 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
213
214 /* Set default irq type */
215 irq_set_irq_type(virq, IRQ_TYPE_NONE);
216
217 return 0;
218 }
219
ehv_pic_host_xlate(struct irq_domain * h,struct device_node * ct,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)220 static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
221 const u32 *intspec, unsigned int intsize,
222 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
223
224 {
225 /*
226 * interrupt sense values coming from the guest device tree
227 * interrupt specifiers can have four possible sense and
228 * level encoding information and they need to
229 * be translated between firmware type & linux type.
230 */
231
232 static unsigned char map_of_senses_to_linux_irqtype[4] = {
233 IRQ_TYPE_EDGE_FALLING,
234 IRQ_TYPE_EDGE_RISING,
235 IRQ_TYPE_LEVEL_LOW,
236 IRQ_TYPE_LEVEL_HIGH,
237 };
238
239 *out_hwirq = intspec[0];
240 if (intsize > 1) {
241 hwirq_intspec[intspec[0]] = intspec[1];
242 *out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
243 ~IRQ_TYPE_MPIC_DIRECT];
244 } else {
245 *out_flags = IRQ_TYPE_NONE;
246 }
247
248 return 0;
249 }
250
251 static const struct irq_domain_ops ehv_pic_host_ops = {
252 .match = ehv_pic_host_match,
253 .map = ehv_pic_host_map,
254 .xlate = ehv_pic_host_xlate,
255 };
256
ehv_pic_init(void)257 void __init ehv_pic_init(void)
258 {
259 struct device_node *np, *np2;
260 struct ehv_pic *ehv_pic;
261 int coreint_flag = 1;
262
263 np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
264 if (!np) {
265 pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
266 return;
267 }
268
269 if (!of_find_property(np, "has-external-proxy", NULL))
270 coreint_flag = 0;
271
272 ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
273 if (!ehv_pic) {
274 of_node_put(np);
275 return;
276 }
277
278 ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
279 &ehv_pic_host_ops, ehv_pic);
280 if (!ehv_pic->irqhost) {
281 of_node_put(np);
282 kfree(ehv_pic);
283 return;
284 }
285
286 np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
287 if (np2) {
288 mpic_percpu_base_vaddr = of_iomap(np2, 0);
289 if (!mpic_percpu_base_vaddr)
290 pr_err("ehv_pic_init: of_iomap failed\n");
291
292 of_node_put(np2);
293 }
294
295 ehv_pic->hc_irq = ehv_pic_irq_chip;
296 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
297 ehv_pic->coreint_flag = coreint_flag;
298
299 global_ehv_pic = ehv_pic;
300 irq_set_default_host(global_ehv_pic->irqhost);
301 }
302