• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Support for C64x+ Megamodule Interrupt Controller
3  *
4  *  Copyright (C) 2010, 2011 Texas Instruments Incorporated
5  *  Contributed by: Mark Salter <msalter@redhat.com>
6  *
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of the GNU General Public License version 2 as
9  *  published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/of.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/slab.h>
18 #include <asm/soc.h>
19 #include <asm/megamod-pic.h>
20 
21 #define NR_COMBINERS	4
22 #define NR_MUX_OUTPUTS  12
23 
24 #define IRQ_UNMAPPED 0xffff
25 
26 /*
27  * Megamodule Interrupt Controller register layout
28  */
29 struct megamod_regs {
30 	u32	evtflag[8];
31 	u32	evtset[8];
32 	u32	evtclr[8];
33 	u32	reserved0[8];
34 	u32	evtmask[8];
35 	u32	mevtflag[8];
36 	u32	expmask[8];
37 	u32	mexpflag[8];
38 	u32	intmux_unused;
39 	u32	intmux[7];
40 	u32	reserved1[8];
41 	u32	aegmux[2];
42 	u32	reserved2[14];
43 	u32	intxstat;
44 	u32	intxclr;
45 	u32	intdmask;
46 	u32	reserved3[13];
47 	u32	evtasrt;
48 };
49 
50 struct megamod_pic {
51 	struct irq_domain *irqhost;
52 	struct megamod_regs __iomem *regs;
53 	raw_spinlock_t lock;
54 
55 	/* hw mux mapping */
56 	unsigned int output_to_irq[NR_MUX_OUTPUTS];
57 };
58 
59 static struct megamod_pic *mm_pic;
60 
61 struct megamod_cascade_data {
62 	struct megamod_pic *pic;
63 	int index;
64 };
65 
66 static struct megamod_cascade_data cascade_data[NR_COMBINERS];
67 
mask_megamod(struct irq_data * data)68 static void mask_megamod(struct irq_data *data)
69 {
70 	struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
71 	irq_hw_number_t src = irqd_to_hwirq(data);
72 	u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
73 
74 	raw_spin_lock(&pic->lock);
75 	soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
76 	raw_spin_unlock(&pic->lock);
77 }
78 
unmask_megamod(struct irq_data * data)79 static void unmask_megamod(struct irq_data *data)
80 {
81 	struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
82 	irq_hw_number_t src = irqd_to_hwirq(data);
83 	u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
84 
85 	raw_spin_lock(&pic->lock);
86 	soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
87 	raw_spin_unlock(&pic->lock);
88 }
89 
90 static struct irq_chip megamod_chip = {
91 	.name		= "megamod",
92 	.irq_mask	= mask_megamod,
93 	.irq_unmask	= unmask_megamod,
94 };
95 
megamod_irq_cascade(struct irq_desc * desc)96 static void megamod_irq_cascade(struct irq_desc *desc)
97 {
98 	struct megamod_cascade_data *cascade;
99 	struct megamod_pic *pic;
100 	unsigned int irq;
101 	u32 events;
102 	int n, idx;
103 
104 	cascade = irq_desc_get_handler_data(desc);
105 
106 	pic = cascade->pic;
107 	idx = cascade->index;
108 
109 	while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
110 		n = __ffs(events);
111 
112 		irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
113 
114 		soc_writel(1 << n, &pic->regs->evtclr[idx]);
115 
116 		generic_handle_irq(irq);
117 	}
118 }
119 
megamod_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)120 static int megamod_map(struct irq_domain *h, unsigned int virq,
121 		       irq_hw_number_t hw)
122 {
123 	struct megamod_pic *pic = h->host_data;
124 	int i;
125 
126 	/* We shouldn't see a hwirq which is muxed to core controller */
127 	for (i = 0; i < NR_MUX_OUTPUTS; i++)
128 		if (pic->output_to_irq[i] == hw)
129 			return -1;
130 
131 	irq_set_chip_data(virq, pic);
132 	irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
133 
134 	/* Set default irq type */
135 	irq_set_irq_type(virq, IRQ_TYPE_NONE);
136 
137 	return 0;
138 }
139 
140 static const struct irq_domain_ops megamod_domain_ops = {
141 	.map	= megamod_map,
142 	.xlate	= irq_domain_xlate_onecell,
143 };
144 
set_megamod_mux(struct megamod_pic * pic,int src,int output)145 static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
146 {
147 	int index, offset;
148 	u32 val;
149 
150 	if (src < 0 || src >= (NR_COMBINERS * 32)) {
151 		pic->output_to_irq[output] = IRQ_UNMAPPED;
152 		return;
153 	}
154 
155 	/* four mappings per mux register */
156 	index = output / 4;
157 	offset = (output & 3) * 8;
158 
159 	val = soc_readl(&pic->regs->intmux[index]);
160 	val &= ~(0xff << offset);
161 	val |= src << offset;
162 	soc_writel(val, &pic->regs->intmux[index]);
163 }
164 
165 /*
166  * Parse the MUX mapping, if one exists.
167  *
168  * The MUX map is an array of up to 12 cells; one for each usable core priority
169  * interrupt. The value of a given cell is the megamodule interrupt source
170  * which is to me MUXed to the output corresponding to the cell position
171  * withing the array. The first cell in the array corresponds to priority
172  * 4 and the last (12th) cell corresponds to priority 15. The allowed
173  * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
174  * sources (0 - 3) are not allowed to be mapped through this property. They
175  * are handled through the "interrupts" property. This allows us to use a
176  * value of zero as a "do not map" placeholder.
177  */
parse_priority_map(struct megamod_pic * pic,int * mapping,int size)178 static void __init parse_priority_map(struct megamod_pic *pic,
179 				      int *mapping, int size)
180 {
181 	struct device_node *np = irq_domain_get_of_node(pic->irqhost);
182 	const __be32 *map;
183 	int i, maplen;
184 	u32 val;
185 
186 	map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
187 	if (map) {
188 		maplen /= 4;
189 		if (maplen > size)
190 			maplen = size;
191 
192 		for (i = 0; i < maplen; i++) {
193 			val = be32_to_cpup(map);
194 			if (val && val >= 4)
195 				mapping[i] = val;
196 			++map;
197 		}
198 	}
199 }
200 
init_megamod_pic(struct device_node * np)201 static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
202 {
203 	struct megamod_pic *pic;
204 	int i, irq;
205 	int mapping[NR_MUX_OUTPUTS];
206 
207 	pr_info("Initializing C64x+ Megamodule PIC\n");
208 
209 	pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
210 	if (!pic) {
211 		pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
212 		return NULL;
213 	}
214 
215 	pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
216 					     &megamod_domain_ops, pic);
217 	if (!pic->irqhost) {
218 		pr_err("%s: Could not alloc host.\n", np->full_name);
219 		goto error_free;
220 	}
221 
222 	pic->irqhost->host_data = pic;
223 
224 	raw_spin_lock_init(&pic->lock);
225 
226 	pic->regs = of_iomap(np, 0);
227 	if (!pic->regs) {
228 		pr_err("%s: Could not map registers.\n", np->full_name);
229 		goto error_free;
230 	}
231 
232 	/* Initialize MUX map */
233 	for (i = 0; i < ARRAY_SIZE(mapping); i++)
234 		mapping[i] = IRQ_UNMAPPED;
235 
236 	parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
237 
238 	/*
239 	 * We can have up to 12 interrupts cascading to the core controller.
240 	 * These cascades can be from the combined interrupt sources or for
241 	 * individual interrupt sources. The "interrupts" property only
242 	 * deals with the cascaded combined interrupts. The individual
243 	 * interrupts muxed to the core controller use the core controller
244 	 * as their interrupt parent.
245 	 */
246 	for (i = 0; i < NR_COMBINERS; i++) {
247 		struct irq_data *irq_data;
248 		irq_hw_number_t hwirq;
249 
250 		irq = irq_of_parse_and_map(np, i);
251 		if (irq == NO_IRQ)
252 			continue;
253 
254 		irq_data = irq_get_irq_data(irq);
255 		if (!irq_data) {
256 			pr_err("%s: combiner-%d no irq_data for virq %d!\n",
257 			       np->full_name, i, irq);
258 			continue;
259 		}
260 
261 		hwirq = irq_data->hwirq;
262 
263 		/*
264 		 * Check that device tree provided something in the range
265 		 * of the core priority interrupts (4 - 15).
266 		 */
267 		if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
268 			pr_err("%s: combiner-%d core irq %ld out of range!\n",
269 			       np->full_name, i, hwirq);
270 			continue;
271 		}
272 
273 		/* record the mapping */
274 		mapping[hwirq - 4] = i;
275 
276 		pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
277 			 np->full_name, i, hwirq);
278 
279 		cascade_data[i].pic = pic;
280 		cascade_data[i].index = i;
281 
282 		/* mask and clear all events in combiner */
283 		soc_writel(~0, &pic->regs->evtmask[i]);
284 		soc_writel(~0, &pic->regs->evtclr[i]);
285 
286 		irq_set_chained_handler_and_data(irq, megamod_irq_cascade,
287 						 &cascade_data[i]);
288 	}
289 
290 	/* Finally, set up the MUX registers */
291 	for (i = 0; i < NR_MUX_OUTPUTS; i++) {
292 		if (mapping[i] != IRQ_UNMAPPED) {
293 			pr_debug("%s: setting mux %d to priority %d\n",
294 				 np->full_name, mapping[i], i + 4);
295 			set_megamod_mux(pic, mapping[i], i);
296 		}
297 	}
298 
299 	return pic;
300 
301 error_free:
302 	kfree(pic);
303 
304 	return NULL;
305 }
306 
307 /*
308  * Return next active event after ACK'ing it.
309  * Return -1 if no events active.
310  */
get_exception(void)311 static int get_exception(void)
312 {
313 	int i, bit;
314 	u32 mask;
315 
316 	for (i = 0; i < NR_COMBINERS; i++) {
317 		mask = soc_readl(&mm_pic->regs->mexpflag[i]);
318 		if (mask) {
319 			bit = __ffs(mask);
320 			soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
321 			return (i * 32) + bit;
322 		}
323 	}
324 	return -1;
325 }
326 
assert_event(unsigned int val)327 static void assert_event(unsigned int val)
328 {
329 	soc_writel(val, &mm_pic->regs->evtasrt);
330 }
331 
megamod_pic_init(void)332 void __init megamod_pic_init(void)
333 {
334 	struct device_node *np;
335 
336 	np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
337 	if (!np)
338 		return;
339 
340 	mm_pic = init_megamod_pic(np);
341 	of_node_put(np);
342 
343 	soc_ops.get_exception = get_exception;
344 	soc_ops.assert_event = assert_event;
345 
346 	return;
347 }
348