• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #undef DEBUG
2 
3 #include <linux/bitmap.h>
4 #include <linux/init.h>
5 
6 #include <asm/io.h>
7 #include <asm/gic.h>
8 #include <asm/gcmpregs.h>
9 #include <asm/mips-boards/maltaint.h>
10 #include <asm/irq.h>
11 #include <linux/hardirq.h>
12 #include <asm-generic/bitops/find.h>
13 
14 
15 static unsigned long _gic_base;
16 static unsigned int _irqbase, _mapsize, numvpes, numintrs;
17 static struct gic_intr_map *_intrmap;
18 
19 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
20 static struct gic_pending_regs pending_regs[NR_CPUS];
21 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
22 
23 #define gic_wedgeb2bok 0	/*
24 				 * Can GIC handle b2b writes to wedge register?
25 				 */
26 #if gic_wedgeb2bok == 0
27 static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
28 #endif
29 
gic_send_ipi(unsigned int intr)30 void gic_send_ipi(unsigned int intr)
31 {
32 #if gic_wedgeb2bok == 0
33 	unsigned long flags;
34 #endif
35 	pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
36 		 read_c0_status());
37 	if (!gic_wedgeb2bok)
38 		spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
39 	GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
40 	if (!gic_wedgeb2bok) {
41 		(void) GIC_REG(SHARED, GIC_SH_CONFIG);
42 		spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
43 	}
44 }
45 
46 /* This is Malta specific and needs to be exported */
vpe_local_setup(unsigned int numvpes)47 static void vpe_local_setup(unsigned int numvpes)
48 {
49 	int i;
50 	unsigned long timer_interrupt = 5, perf_interrupt = 5;
51 	unsigned int vpe_ctl;
52 
53 	/*
54 	 * Setup the default performance counter timer interrupts
55 	 * for all VPEs
56 	 */
57 	for (i = 0; i < numvpes; i++) {
58 		GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
59 
60 		/* Are Interrupts locally routable? */
61 		GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
62 		if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
63 			GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
64 				 GIC_MAP_TO_PIN_MSK | timer_interrupt);
65 
66 		if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
67 			GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
68 				 GIC_MAP_TO_PIN_MSK | perf_interrupt);
69 	}
70 }
71 
gic_get_int(void)72 unsigned int gic_get_int(void)
73 {
74 	unsigned int i;
75 	unsigned long *pending, *intrmask, *pcpu_mask;
76 	unsigned long *pending_abs, *intrmask_abs;
77 
78 	/* Get per-cpu bitmaps */
79 	pending = pending_regs[smp_processor_id()].pending;
80 	intrmask = intrmask_regs[smp_processor_id()].intrmask;
81 	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
82 
83 	pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
84 							 GIC_SH_PEND_31_0_OFS);
85 	intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
86 							  GIC_SH_MASK_31_0_OFS);
87 
88 	for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
89 		GICREAD(*pending_abs, pending[i]);
90 		GICREAD(*intrmask_abs, intrmask[i]);
91 		pending_abs++;
92 		intrmask_abs++;
93 	}
94 
95 	bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
96 	bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
97 
98 	i = find_first_bit(pending, GIC_NUM_INTRS);
99 
100 	pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
101 
102 	return i;
103 }
104 
gic_irq_startup(unsigned int irq)105 static unsigned int gic_irq_startup(unsigned int irq)
106 {
107 	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
108 	irq -= _irqbase;
109 	/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
110 	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
111 		 1 << (irq % 32));
112 	return 0;
113 }
114 
gic_irq_ack(unsigned int irq)115 static void gic_irq_ack(unsigned int irq)
116 {
117 #if gic_wedgeb2bok == 0
118 	unsigned long flags;
119 #endif
120 	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
121 	irq -= _irqbase;
122 	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
123 		 1 << (irq % 32));
124 
125 	if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
126 		if (!gic_wedgeb2bok)
127 			spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
128 		GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
129 		if (!gic_wedgeb2bok) {
130 			(void) GIC_REG(SHARED, GIC_SH_CONFIG);
131 			spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
132 		}
133 	}
134 }
135 
gic_mask_irq(unsigned int irq)136 static void gic_mask_irq(unsigned int irq)
137 {
138 	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
139 	irq -= _irqbase;
140 	/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
141 	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
142 		 1 << (irq % 32));
143 }
144 
gic_unmask_irq(unsigned int irq)145 static void gic_unmask_irq(unsigned int irq)
146 {
147 	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
148 	irq -= _irqbase;
149 	/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
150 	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
151 		 1 << (irq % 32));
152 }
153 
154 #ifdef CONFIG_SMP
155 
156 static DEFINE_SPINLOCK(gic_lock);
157 
gic_set_affinity(unsigned int irq,const struct cpumask * cpumask)158 static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
159 {
160 	cpumask_t	tmp = CPU_MASK_NONE;
161 	unsigned long	flags;
162 	int		i;
163 
164 	pr_debug(KERN_DEBUG "%s called\n", __func__);
165 	irq -= _irqbase;
166 
167 	cpumask_and(&tmp, cpumask, cpu_online_mask);
168 	if (cpus_empty(tmp))
169 		return;
170 
171 	/* Assumption : cpumask refers to a single CPU */
172 	spin_lock_irqsave(&gic_lock, flags);
173 	for (;;) {
174 		/* Re-route this IRQ */
175 		GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
176 
177 		/*
178 		 * FIXME: assumption that _intrmap is ordered and has no holes
179 		 */
180 
181 		/* Update the intr_map */
182 		_intrmap[irq].cpunum = first_cpu(tmp);
183 
184 		/* Update the pcpu_masks */
185 		for (i = 0; i < NR_CPUS; i++)
186 			clear_bit(irq, pcpu_masks[i].pcpu_mask);
187 		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
188 
189 	}
190 	irq_desc[irq].affinity = *cpumask;
191 	spin_unlock_irqrestore(&gic_lock, flags);
192 
193 }
194 #endif
195 
196 static struct irq_chip gic_irq_controller = {
197 	.name		=	"MIPS GIC",
198 	.startup	=	gic_irq_startup,
199 	.ack		=	gic_irq_ack,
200 	.mask		=	gic_mask_irq,
201 	.mask_ack	=	gic_mask_irq,
202 	.unmask		=	gic_unmask_irq,
203 	.eoi		=	gic_unmask_irq,
204 #ifdef CONFIG_SMP
205 	.set_affinity	=	gic_set_affinity,
206 #endif
207 };
208 
setup_intr(unsigned int intr,unsigned int cpu,unsigned int pin,unsigned int polarity,unsigned int trigtype)209 static void __init setup_intr(unsigned int intr, unsigned int cpu,
210 	unsigned int pin, unsigned int polarity, unsigned int trigtype)
211 {
212 	/* Setup Intr to Pin mapping */
213 	if (pin & GIC_MAP_TO_NMI_MSK) {
214 		GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
215 		/* FIXME: hack to route NMI to all cpu's */
216 		for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
217 			GICWRITE(GIC_REG_ADDR(SHARED,
218 					  GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
219 				 0xffffffff);
220 		}
221 	} else {
222 		GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
223 			 GIC_MAP_TO_PIN_MSK | pin);
224 		/* Setup Intr to CPU mapping */
225 		GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
226 	}
227 
228 	/* Setup Intr Polarity */
229 	GIC_SET_POLARITY(intr, polarity);
230 
231 	/* Setup Intr Trigger Type */
232 	GIC_SET_TRIGGER(intr, trigtype);
233 
234 	/* Init Intr Masks */
235 	GIC_SET_INTR_MASK(intr, 0);
236 }
237 
gic_basic_init(void)238 static void __init gic_basic_init(void)
239 {
240 	unsigned int i, cpu;
241 
242 	/* Setup defaults */
243 	for (i = 0; i < GIC_NUM_INTRS; i++) {
244 		GIC_SET_POLARITY(i, GIC_POL_POS);
245 		GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
246 		GIC_SET_INTR_MASK(i, 0);
247 	}
248 
249 	/* Setup specifics */
250 	for (i = 0; i < _mapsize; i++) {
251 		cpu = _intrmap[i].cpunum;
252 		if (cpu == X)
253 			continue;
254 
255 		setup_intr(_intrmap[i].intrnum,
256 				_intrmap[i].cpunum,
257 				_intrmap[i].pin,
258 				_intrmap[i].polarity,
259 				_intrmap[i].trigtype);
260 		/* Initialise per-cpu Interrupt software masks */
261 		if (_intrmap[i].ipiflag)
262 			set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
263 	}
264 
265 	vpe_local_setup(numvpes);
266 
267 	for (i = _irqbase; i < (_irqbase + numintrs); i++)
268 		set_irq_chip(i, &gic_irq_controller);
269 }
270 
gic_init(unsigned long gic_base_addr,unsigned long gic_addrspace_size,struct gic_intr_map * intr_map,unsigned int intr_map_size,unsigned int irqbase)271 void __init gic_init(unsigned long gic_base_addr,
272 		     unsigned long gic_addrspace_size,
273 		     struct gic_intr_map *intr_map, unsigned int intr_map_size,
274 		     unsigned int irqbase)
275 {
276 	unsigned int gicconfig;
277 
278 	_gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
279 						    gic_addrspace_size);
280 	_irqbase = irqbase;
281 	_intrmap = intr_map;
282 	_mapsize = intr_map_size;
283 
284 	GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
285 	numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
286 		   GIC_SH_CONFIG_NUMINTRS_SHF;
287 	numintrs = ((numintrs + 1) * 8);
288 
289 	numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
290 		  GIC_SH_CONFIG_NUMVPES_SHF;
291 
292 	pr_debug("%s called\n", __func__);
293 
294 	gic_basic_init();
295 }
296