• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
8#include <asm/pil.h>
9
10	.text
11	.align	32
12
13sun4v_cpu_mondo:
14	/* Head offset in %g2, tail offset in %g4.
15	 * If they are the same, no work.
16	 */
17	mov	INTRQ_CPU_MONDO_HEAD, %g2
18	ldxa	[%g2] ASI_QUEUE, %g2
19	mov	INTRQ_CPU_MONDO_TAIL, %g4
20	ldxa	[%g4] ASI_QUEUE, %g4
21	cmp	%g2, %g4
22	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
23	 nop
24
25	/* Get &trap_block[smp_processor_id()] into %g4.  */
26	ldxa	[%g0] ASI_SCRATCHPAD, %g4
27	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
28
29	/* Get smp_processor_id() into %g3 */
30	sethi	%hi(trap_block), %g5
31	or	%g5, %lo(trap_block), %g5
32	sub	%g4, %g5, %g3
33	srlx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
34
35	/* Increment cpu_mondo_counter[smp_processor_id()] */
36	sethi	%hi(cpu_mondo_counter), %g5
37	or	%g5, %lo(cpu_mondo_counter), %g5
38	sllx	%g3, 3, %g3
39	add	%g5, %g3, %g5
40	ldx	[%g5], %g3
41	add	%g3, 1, %g3
42	stx	%g3, [%g5]
43
44	/* Get CPU mondo queue base phys address into %g7.  */
45	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
46
47	/* Now get the cross-call arguments and handler PC, same
48	 * layout as sun4u:
49	 *
50	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
51	 *                  high half is context arg to MMU flushes, into %g5
52	 * 2nd 64-bit word: 64-bit arg, load into %g1
53	 * 3rd 64-bit word: 64-bit arg, load into %g7
54	 */
55	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
56	add	%g2, 0x8, %g2
57	srlx	%g3, 32, %g5
58	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
59	add	%g2, 0x8, %g2
60	srl	%g3, 0, %g3
61	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
62	add	%g2, 0x40 - 0x8 - 0x8, %g2
63
64	/* Update queue head pointer.  */
65	lduw	[%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
66	and	%g2, %g4, %g2
67
68	mov	INTRQ_CPU_MONDO_HEAD, %g4
69	stxa	%g2, [%g4] ASI_QUEUE
70	membar	#Sync
71
72	jmpl	%g3, %g0
73	 nop
74
75sun4v_cpu_mondo_queue_empty:
76	retry
77
78sun4v_dev_mondo:
79	/* Head offset in %g2, tail offset in %g4.  */
80	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
81	ldxa	[%g2] ASI_QUEUE, %g2
82	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
83	ldxa	[%g4] ASI_QUEUE, %g4
84	cmp	%g2, %g4
85	be,pn	%xcc, sun4v_dev_mondo_queue_empty
86	 nop
87
88	/* Get &trap_block[smp_processor_id()] into %g4.  */
89	ldxa	[%g0] ASI_SCRATCHPAD, %g4
90	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
91
92	/* Get DEV mondo queue base phys address into %g5.  */
93	ldx	[%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
94
95	/* Load IVEC into %g3.  */
96	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
97	add	%g2, 0x40, %g2
98
99	/* XXX There can be a full 64-byte block of data here.
100	 * XXX This is how we can get at MSI vector data.
101	 * XXX Current we do not capture this, but when we do we'll
102	 * XXX need to add a 64-byte storage area in the struct ino_bucket
103	 * XXX or the struct irq_desc.
104	 */
105
106	/* Update queue head pointer, this frees up some registers.  */
107	lduw	[%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
108	and	%g2, %g4, %g2
109
110	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
111	stxa	%g2, [%g4] ASI_QUEUE
112	membar	#Sync
113
114	TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
115
116	/* For VIRQs, cookie is encoded as ~bucket_phys_addr  */
117	brlz,pt %g3, 1f
118	 xnor	%g3, %g0, %g4
119
120	/* Get __pa(&ivector_table[IVEC]) into %g4.  */
121	sethi	%hi(ivector_table_pa), %g4
122	ldx	[%g4 + %lo(ivector_table_pa)], %g4
123	sllx	%g3, 4, %g3
124	add	%g4, %g3, %g4
125
1261:	ldx	[%g1], %g2
127	stxa	%g2, [%g4] ASI_PHYS_USE_EC
128	stx	%g4, [%g1]
129
130	/* Signal the interrupt by setting (1 << pil) in %softint.  */
131	wr	%g0, 1 << PIL_DEVICE_IRQ, %set_softint
132
133sun4v_dev_mondo_queue_empty:
134	retry
135
136sun4v_res_mondo:
137	/* Head offset in %g2, tail offset in %g4.  */
138	mov	INTRQ_RESUM_MONDO_HEAD, %g2
139	ldxa	[%g2] ASI_QUEUE, %g2
140	mov	INTRQ_RESUM_MONDO_TAIL, %g4
141	ldxa	[%g4] ASI_QUEUE, %g4
142	cmp	%g2, %g4
143	be,pn	%xcc, sun4v_res_mondo_queue_empty
144	 nop
145
146	/* Get &trap_block[smp_processor_id()] into %g3.  */
147	ldxa	[%g0] ASI_SCRATCHPAD, %g3
148	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
149
150	/* Get RES mondo queue base phys address into %g5.  */
151	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
152
153	/* Get RES kernel buffer base phys address into %g7.  */
154	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
155
156	/* If the first word is non-zero, queue is full.  */
157	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
158	brnz,pn	%g1, sun4v_res_mondo_queue_full
159	 nop
160
161	lduw	[%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
162
163	/* Remember this entry's offset in %g1.  */
164	mov	%g2, %g1
165
166	/* Copy 64-byte queue entry into kernel buffer.  */
167	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
168	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
169	add	%g2, 0x08, %g2
170	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
171	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
172	add	%g2, 0x08, %g2
173	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
174	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
175	add	%g2, 0x08, %g2
176	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
177	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
178	add	%g2, 0x08, %g2
179	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
180	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
181	add	%g2, 0x08, %g2
182	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
183	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
184	add	%g2, 0x08, %g2
185	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
186	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
187	add	%g2, 0x08, %g2
188	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
189	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
190	add	%g2, 0x08, %g2
191
192	/* Update queue head pointer.  */
193	and	%g2, %g4, %g2
194
195	mov	INTRQ_RESUM_MONDO_HEAD, %g4
196	stxa	%g2, [%g4] ASI_QUEUE
197	membar	#Sync
198
199	/* Disable interrupts and save register state so we can call
200	 * C code.  The etrap handling will leave %g4 in %l4 for us
201	 * when it's done.
202	 */
203	rdpr	%pil, %g2
204	wrpr	%g0, PIL_NORMAL_MAX, %pil
205	mov	%g1, %g4
206	ba,pt	%xcc, etrap_irq
207	 rd	%pc, %g7
208#ifdef CONFIG_TRACE_IRQFLAGS
209	call		trace_hardirqs_off
210	 nop
211#endif
212	/* Log the event.  */
213	add	%sp, PTREGS_OFF, %o0
214	call	sun4v_resum_error
215	 mov	%l4, %o1
216
217	/* Return from trap.  */
218	ba,pt	%xcc, rtrap_irq
219	 nop
220
221sun4v_res_mondo_queue_empty:
222	retry
223
224sun4v_res_mondo_queue_full:
225	/* The queue is full, consolidate our damage by setting
226	 * the head equal to the tail.  We'll just trap again otherwise.
227	 * Call C code to log the event.
228	 */
229	mov	INTRQ_RESUM_MONDO_HEAD, %g2
230	stxa	%g4, [%g2] ASI_QUEUE
231	membar	#Sync
232
233	rdpr	%pil, %g2
234	wrpr	%g0, PIL_NORMAL_MAX, %pil
235	ba,pt	%xcc, etrap_irq
236	 rd	%pc, %g7
237#ifdef CONFIG_TRACE_IRQFLAGS
238	call		trace_hardirqs_off
239	 nop
240#endif
241	call	sun4v_resum_overflow
242	 add	%sp, PTREGS_OFF, %o0
243
244	ba,pt	%xcc, rtrap_irq
245	 nop
246
247sun4v_nonres_mondo:
248	/* Head offset in %g2, tail offset in %g4.  */
249	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
250	ldxa	[%g2] ASI_QUEUE, %g2
251	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
252	ldxa	[%g4] ASI_QUEUE, %g4
253	cmp	%g2, %g4
254	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
255	 nop
256
257	/* Get &trap_block[smp_processor_id()] into %g3.  */
258	ldxa	[%g0] ASI_SCRATCHPAD, %g3
259	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
260
261	/* Get RES mondo queue base phys address into %g5.  */
262	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
263
264	/* Get RES kernel buffer base phys address into %g7.  */
265	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
266
267	/* If the first word is non-zero, queue is full.  */
268	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
269	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
270	 nop
271
272	lduw	[%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
273
274	/* Remember this entry's offset in %g1.  */
275	mov	%g2, %g1
276
277	/* Copy 64-byte queue entry into kernel buffer.  */
278	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
279	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
280	add	%g2, 0x08, %g2
281	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
282	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
283	add	%g2, 0x08, %g2
284	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
285	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
286	add	%g2, 0x08, %g2
287	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
288	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
289	add	%g2, 0x08, %g2
290	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
291	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
292	add	%g2, 0x08, %g2
293	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
294	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
295	add	%g2, 0x08, %g2
296	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
297	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
298	add	%g2, 0x08, %g2
299	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
300	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
301	add	%g2, 0x08, %g2
302
303	/* Update queue head pointer.  */
304	and	%g2, %g4, %g2
305
306	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
307	stxa	%g2, [%g4] ASI_QUEUE
308	membar	#Sync
309
310	/* Disable interrupts and save register state so we can call
311	 * C code.  The etrap handling will leave %g4 in %l4 for us
312	 * when it's done.
313	 */
314	rdpr	%pil, %g2
315	wrpr	%g0, PIL_NORMAL_MAX, %pil
316	mov	%g1, %g4
317	ba,pt	%xcc, etrap_irq
318	 rd	%pc, %g7
319#ifdef CONFIG_TRACE_IRQFLAGS
320	call		trace_hardirqs_off
321	 nop
322#endif
323	/* Log the event.  */
324	add	%sp, PTREGS_OFF, %o0
325	call	sun4v_nonresum_error
326	 mov	%l4, %o1
327
328	/* Return from trap.  */
329	ba,pt	%xcc, rtrap_irq
330	 nop
331
332sun4v_nonres_mondo_queue_empty:
333	retry
334
335sun4v_nonres_mondo_queue_full:
336	/* The queue is full, consolidate our damage by setting
337	 * the head equal to the tail.  We'll just trap again otherwise.
338	 * Call C code to log the event.
339	 */
340	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
341	stxa	%g4, [%g2] ASI_QUEUE
342	membar	#Sync
343
344	rdpr	%pil, %g2
345	wrpr	%g0, PIL_NORMAL_MAX, %pil
346	ba,pt	%xcc, etrap_irq
347	 rd	%pc, %g7
348#ifdef CONFIG_TRACE_IRQFLAGS
349	call		trace_hardirqs_off
350	 nop
351#endif
352	call	sun4v_nonresum_overflow
353	 add	%sp, PTREGS_OFF, %o0
354
355	ba,pt	%xcc, rtrap_irq
356	 nop
357