• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19#define _C_LABEL(x)     x
20#define MIPSX(name)     mips32_ ## name
21#define CALLFRAME_SIZ   32
22
23/*
24 * VECTOR
25 *  exception vector entrypoint
26 */
27#define VECTOR(x, regmask)      \
28    .ent    _C_LABEL(x),0;      \
29    EXPORT(x);
30
31#define VECTOR_END(x)      \
32    EXPORT(x);
33
34/* Overload, Danger Will Robinson!! */
35#define PT_HOST_ASID        PT_BVADDR
36#define PT_HOST_USERLOCAL   PT_EPC
37
38#define CP0_DDATA_LO        $28,3
39#define CP0_EBASE           $15,1
40
41#define CP0_INTCTL          $12,1
42#define CP0_SRSCTL          $12,2
43#define CP0_SRSMAP          $12,3
44#define CP0_HWRENA          $7,0
45
46/* Resume Flags */
47#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
48
49#define RESUME_GUEST            0
50#define RESUME_HOST             RESUME_FLAG_HOST
51
52/*
53 * __kvm_mips_vcpu_run: entry point to the guest
54 * a0: run
55 * a1: vcpu
56 */
57	.set	noreorder
58	.set	noat
59
60FEXPORT(__kvm_mips_vcpu_run)
61	/* k0/k1 not being used in host kernel context */
62	INT_ADDIU k1, sp, -PT_SIZE
63	LONG_S	$0, PT_R0(k1)
64	LONG_S	$1, PT_R1(k1)
65	LONG_S	$2, PT_R2(k1)
66	LONG_S	$3, PT_R3(k1)
67
68	LONG_S	$4, PT_R4(k1)
69	LONG_S	$5, PT_R5(k1)
70	LONG_S	$6, PT_R6(k1)
71	LONG_S	$7, PT_R7(k1)
72
73	LONG_S	$8,  PT_R8(k1)
74	LONG_S	$9,  PT_R9(k1)
75	LONG_S	$10, PT_R10(k1)
76	LONG_S	$11, PT_R11(k1)
77	LONG_S	$12, PT_R12(k1)
78	LONG_S	$13, PT_R13(k1)
79	LONG_S	$14, PT_R14(k1)
80	LONG_S	$15, PT_R15(k1)
81	LONG_S	$16, PT_R16(k1)
82	LONG_S	$17, PT_R17(k1)
83
84	LONG_S	$18, PT_R18(k1)
85	LONG_S	$19, PT_R19(k1)
86	LONG_S	$20, PT_R20(k1)
87	LONG_S	$21, PT_R21(k1)
88	LONG_S	$22, PT_R22(k1)
89	LONG_S	$23, PT_R23(k1)
90	LONG_S	$24, PT_R24(k1)
91	LONG_S	$25, PT_R25(k1)
92
93	/*
94	 * XXXKYMA k0/k1 not saved, not being used if we got here through
95	 * an ioctl()
96	 */
97
98	LONG_S	$28, PT_R28(k1)
99	LONG_S	$29, PT_R29(k1)
100	LONG_S	$30, PT_R30(k1)
101	LONG_S	$31, PT_R31(k1)
102
103	/* Save hi/lo */
104	mflo	v0
105	LONG_S	v0, PT_LO(k1)
106	mfhi	v1
107	LONG_S	v1, PT_HI(k1)
108
109	/* Save host status */
110	mfc0	v0, CP0_STATUS
111	LONG_S	v0, PT_STATUS(k1)
112
113	/* Save host ASID, shove it into the BVADDR location */
114	mfc0	v1, CP0_ENTRYHI
115	andi	v1, 0xff
116	LONG_S	v1, PT_HOST_ASID(k1)
117
118	/* Save DDATA_LO, will be used to store pointer to vcpu */
119	mfc0	v1, CP0_DDATA_LO
120	LONG_S	v1, PT_HOST_USERLOCAL(k1)
121
122	/* DDATA_LO has pointer to vcpu */
123	mtc0	a1, CP0_DDATA_LO
124
125	/* Offset into vcpu->arch */
126	INT_ADDIU k1, a1, VCPU_HOST_ARCH
127
128	/*
129	 * Save the host stack to VCPU, used for exception processing
130	 * when we exit from the Guest
131	 */
132	LONG_S	sp, VCPU_HOST_STACK(k1)
133
134	/* Save the kernel gp as well */
135	LONG_S	gp, VCPU_HOST_GP(k1)
136
137	/*
138	 * Setup status register for running the guest in UM, interrupts
139	 * are disabled
140	 */
141	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
142	mtc0	k0, CP0_STATUS
143	ehb
144
145	/* load up the new EBASE */
146	LONG_L	k0, VCPU_GUEST_EBASE(k1)
147	mtc0	k0, CP0_EBASE
148
149	/*
150	 * Now that the new EBASE has been loaded, unset BEV, set
151	 * interrupt mask as it was but make sure that timer interrupts
152	 * are enabled
153	 */
154	li	k0, (ST0_EXL | KSU_USER | ST0_IE)
155	andi	v0, v0, ST0_IM
156	or	k0, k0, v0
157	mtc0	k0, CP0_STATUS
158	ehb
159
160	/* Set Guest EPC */
161	LONG_L	t0, VCPU_PC(k1)
162	mtc0	t0, CP0_EPC
163
164FEXPORT(__kvm_mips_load_asid)
165	/* Set the ASID for the Guest Kernel */
166	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
167			        /* addresses shift to 0x80000000 */
168	bltz	t0, 1f		/* If kernel */
169	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
170	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
1711:
172	/* t1: contains the base of the ASID array, need to get the cpu id */
173	LONG_L	t2, TI_CPU($28)             /* smp_processor_id */
174	INT_SLL	t2, t2, 2                   /* x4 */
175	REG_ADDU t3, t1, t2
176	LONG_L	k0, (t3)
177	andi	k0, k0, 0xff
178	mtc0	k0, CP0_ENTRYHI
179	ehb
180
181	/* Disable RDHWR access */
182	mtc0	zero, CP0_HWRENA
183
184	/* Now load up the Guest Context from VCPU */
185	LONG_L	$1, VCPU_R1(k1)
186	LONG_L	$2, VCPU_R2(k1)
187	LONG_L	$3, VCPU_R3(k1)
188
189	LONG_L	$4, VCPU_R4(k1)
190	LONG_L	$5, VCPU_R5(k1)
191	LONG_L	$6, VCPU_R6(k1)
192	LONG_L	$7, VCPU_R7(k1)
193
194	LONG_L	$8, VCPU_R8(k1)
195	LONG_L	$9, VCPU_R9(k1)
196	LONG_L	$10, VCPU_R10(k1)
197	LONG_L	$11, VCPU_R11(k1)
198	LONG_L	$12, VCPU_R12(k1)
199	LONG_L	$13, VCPU_R13(k1)
200	LONG_L	$14, VCPU_R14(k1)
201	LONG_L	$15, VCPU_R15(k1)
202	LONG_L	$16, VCPU_R16(k1)
203	LONG_L	$17, VCPU_R17(k1)
204	LONG_L	$18, VCPU_R18(k1)
205	LONG_L	$19, VCPU_R19(k1)
206	LONG_L	$20, VCPU_R20(k1)
207	LONG_L	$21, VCPU_R21(k1)
208	LONG_L	$22, VCPU_R22(k1)
209	LONG_L	$23, VCPU_R23(k1)
210	LONG_L	$24, VCPU_R24(k1)
211	LONG_L	$25, VCPU_R25(k1)
212
213	/* k0/k1 loaded up later */
214
215	LONG_L	$28, VCPU_R28(k1)
216	LONG_L	$29, VCPU_R29(k1)
217	LONG_L	$30, VCPU_R30(k1)
218	LONG_L	$31, VCPU_R31(k1)
219
220	/* Restore hi/lo */
221	LONG_L	k0, VCPU_LO(k1)
222	mtlo	k0
223
224	LONG_L	k0, VCPU_HI(k1)
225	mthi	k0
226
227FEXPORT(__kvm_mips_load_k0k1)
228	/* Restore the guest's k0/k1 registers */
229	LONG_L	k0, VCPU_R26(k1)
230	LONG_L	k1, VCPU_R27(k1)
231
232	/* Jump to guest */
233	eret
234EXPORT(__kvm_mips_vcpu_run_end)
235
236VECTOR(MIPSX(exception), unknown)
237/* Find out what mode we came from and jump to the proper handler. */
238	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
239	ehb				#02:
240
241	mfc0	k0, CP0_EBASE		#02: Get EBASE
242	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
243	INT_SLL	k0, k0, 10		#04
244	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
245	INT_ADDIU k0, k0, 0x2000	#06: Exception handler is
246					#    installed @ offset 0x2000
247	j	k0			#07: jump to the function
248	 nop				#08: branch delay slot
249VECTOR_END(MIPSX(exceptionEnd))
250.end MIPSX(exception)
251
252/*
253 * Generic Guest exception handler. We end up here when the guest
254 * does something that causes a trap to kernel mode.
255 */
256NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
257	/* Get the VCPU pointer from DDTATA_LO */
258	mfc0	k1, CP0_DDATA_LO
259	INT_ADDIU k1, k1, VCPU_HOST_ARCH
260
261	/* Start saving Guest context to VCPU */
262	LONG_S	$0, VCPU_R0(k1)
263	LONG_S	$1, VCPU_R1(k1)
264	LONG_S	$2, VCPU_R2(k1)
265	LONG_S	$3, VCPU_R3(k1)
266	LONG_S	$4, VCPU_R4(k1)
267	LONG_S	$5, VCPU_R5(k1)
268	LONG_S	$6, VCPU_R6(k1)
269	LONG_S	$7, VCPU_R7(k1)
270	LONG_S	$8, VCPU_R8(k1)
271	LONG_S	$9, VCPU_R9(k1)
272	LONG_S	$10, VCPU_R10(k1)
273	LONG_S	$11, VCPU_R11(k1)
274	LONG_S	$12, VCPU_R12(k1)
275	LONG_S	$13, VCPU_R13(k1)
276	LONG_S	$14, VCPU_R14(k1)
277	LONG_S	$15, VCPU_R15(k1)
278	LONG_S	$16, VCPU_R16(k1)
279	LONG_S	$17, VCPU_R17(k1)
280	LONG_S	$18, VCPU_R18(k1)
281	LONG_S	$19, VCPU_R19(k1)
282	LONG_S	$20, VCPU_R20(k1)
283	LONG_S	$21, VCPU_R21(k1)
284	LONG_S	$22, VCPU_R22(k1)
285	LONG_S	$23, VCPU_R23(k1)
286	LONG_S	$24, VCPU_R24(k1)
287	LONG_S	$25, VCPU_R25(k1)
288
289	/* Guest k0/k1 saved later */
290
291	LONG_S	$28, VCPU_R28(k1)
292	LONG_S	$29, VCPU_R29(k1)
293	LONG_S	$30, VCPU_R30(k1)
294	LONG_S	$31, VCPU_R31(k1)
295
296	/* We need to save hi/lo and restore them on the way out */
297	mfhi	t0
298	LONG_S	t0, VCPU_HI(k1)
299
300	mflo	t0
301	LONG_S	t0, VCPU_LO(k1)
302
303	/* Finally save guest k0/k1 to VCPU */
304	mfc0	t0, CP0_ERROREPC
305	LONG_S	t0, VCPU_R26(k1)
306
307	/* Get GUEST k1 and save it in VCPU */
308	PTR_LI	t1, ~0x2ff
309	mfc0	t0, CP0_EBASE
310	and	t0, t0, t1
311	LONG_L	t0, 0x3000(t0)
312	LONG_S	t0, VCPU_R27(k1)
313
314	/* Now that context has been saved, we can use other registers */
315
316	/* Restore vcpu */
317	mfc0	a1, CP0_DDATA_LO
318	move	s1, a1
319
320	/* Restore run (vcpu->run) */
321	LONG_L	a0, VCPU_RUN(a1)
322	/* Save pointer to run in s0, will be saved by the compiler */
323	move	s0, a0
324
325	/*
326	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
327	 * process the exception
328	 */
329	mfc0	k0,CP0_EPC
330	LONG_S	k0, VCPU_PC(k1)
331
332	mfc0	k0, CP0_BADVADDR
333	LONG_S	k0, VCPU_HOST_CP0_BADVADDR(k1)
334
335	mfc0	k0, CP0_CAUSE
336	LONG_S	k0, VCPU_HOST_CP0_CAUSE(k1)
337
338	mfc0	k0, CP0_ENTRYHI
339	LONG_S	k0, VCPU_HOST_ENTRYHI(k1)
340
341	/* Now restore the host state just enough to run the handlers */
342
343	/* Swtich EBASE to the one used by Linux */
344	/* load up the host EBASE */
345	mfc0	v0, CP0_STATUS
346
347	.set	at
348	or	k0, v0, ST0_BEV
349	.set	noat
350
351	mtc0	k0, CP0_STATUS
352	ehb
353
354	LONG_L	k0, VCPU_HOST_EBASE(k1)
355	mtc0	k0,CP0_EBASE
356
357	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
358	.set	at
359	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
360	or	v0, v0, ST0_CU0
361	.set	noat
362	mtc0	v0, CP0_STATUS
363	ehb
364
365	/* Load up host GP */
366	LONG_L	gp, VCPU_HOST_GP(k1)
367
368	/* Need a stack before we can jump to "C" */
369	LONG_L	sp, VCPU_HOST_STACK(k1)
370
371	/* Saved host state */
372	INT_ADDIU sp, sp, -PT_SIZE
373
374	/*
375	 * XXXKYMA do we need to load the host ASID, maybe not because the
376	 * kernel entries are marked GLOBAL, need to verify
377	 */
378
379	/* Restore host DDATA_LO */
380	LONG_L	k0, PT_HOST_USERLOCAL(sp)
381	mtc0	k0, CP0_DDATA_LO
382
383	/* Restore RDHWR access */
384	PTR_LI	k0, 0x2000000F
385	mtc0	k0, CP0_HWRENA
386
387	/* Jump to handler */
388FEXPORT(__kvm_mips_jump_to_handler)
389	/*
390	 * XXXKYMA: not sure if this is safe, how large is the stack??
391	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
392	 * with this in the kernel
393	 */
394	PTR_LA	t9, kvm_mips_handle_exit
395	jalr.hb	t9
396	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
397
398	/* Return from handler Make sure interrupts are disabled */
399	di
400	ehb
401
402	/*
403	 * XXXKYMA: k0/k1 could have been blown away if we processed
404	 * an exception while we were handling the exception from the
405	 * guest, reload k1
406	 */
407
408	move	k1, s1
409	INT_ADDIU k1, k1, VCPU_HOST_ARCH
410
411	/*
412	 * Check return value, should tell us if we are returning to the
413	 * host (handle I/O etc)or resuming the guest
414	 */
415	andi	t0, v0, RESUME_HOST
416	bnez	t0, __kvm_mips_return_to_host
417	 nop
418
419__kvm_mips_return_to_guest:
420	/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
421	mtc0	s1, CP0_DDATA_LO
422
423	/* Load up the Guest EBASE to minimize the window where BEV is set */
424	LONG_L	t0, VCPU_GUEST_EBASE(k1)
425
426	/* Switch EBASE back to the one used by KVM */
427	mfc0	v1, CP0_STATUS
428	.set	at
429	or	k0, v1, ST0_BEV
430	.set	noat
431	mtc0	k0, CP0_STATUS
432	ehb
433	mtc0	t0, CP0_EBASE
434
435	/* Setup status register for running guest in UM */
436	.set	at
437	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
438	and	v1, v1, ~(ST0_CU0 | ST0_MX)
439	.set	noat
440	mtc0	v1, CP0_STATUS
441	ehb
442
443	/* Set Guest EPC */
444	LONG_L	t0, VCPU_PC(k1)
445	mtc0	t0, CP0_EPC
446
447	/* Set the ASID for the Guest Kernel */
448	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
449				/* addresses shift to 0x80000000 */
450	bltz	t0, 1f		/* If kernel */
451	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
452	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
4531:
454	/* t1: contains the base of the ASID array, need to get the cpu id  */
455	LONG_L	t2, TI_CPU($28)		/* smp_processor_id */
456	INT_SLL	t2, t2, 2		/* x4 */
457	REG_ADDU t3, t1, t2
458	LONG_L	k0, (t3)
459	andi	k0, k0, 0xff
460	mtc0	k0,CP0_ENTRYHI
461	ehb
462
463	/* Disable RDHWR access */
464	mtc0    zero,  CP0_HWRENA
465
466	/* load the guest context from VCPU and return */
467	LONG_L	$0, VCPU_R0(k1)
468	LONG_L	$1, VCPU_R1(k1)
469	LONG_L	$2, VCPU_R2(k1)
470	LONG_L	$3, VCPU_R3(k1)
471	LONG_L	$4, VCPU_R4(k1)
472	LONG_L	$5, VCPU_R5(k1)
473	LONG_L	$6, VCPU_R6(k1)
474	LONG_L	$7, VCPU_R7(k1)
475	LONG_L	$8, VCPU_R8(k1)
476	LONG_L	$9, VCPU_R9(k1)
477	LONG_L	$10, VCPU_R10(k1)
478	LONG_L	$11, VCPU_R11(k1)
479	LONG_L	$12, VCPU_R12(k1)
480	LONG_L	$13, VCPU_R13(k1)
481	LONG_L	$14, VCPU_R14(k1)
482	LONG_L	$15, VCPU_R15(k1)
483	LONG_L	$16, VCPU_R16(k1)
484	LONG_L	$17, VCPU_R17(k1)
485	LONG_L	$18, VCPU_R18(k1)
486	LONG_L	$19, VCPU_R19(k1)
487	LONG_L	$20, VCPU_R20(k1)
488	LONG_L	$21, VCPU_R21(k1)
489	LONG_L	$22, VCPU_R22(k1)
490	LONG_L	$23, VCPU_R23(k1)
491	LONG_L	$24, VCPU_R24(k1)
492	LONG_L	$25, VCPU_R25(k1)
493
494	/* $/k1 loaded later */
495	LONG_L	$28, VCPU_R28(k1)
496	LONG_L	$29, VCPU_R29(k1)
497	LONG_L	$30, VCPU_R30(k1)
498	LONG_L	$31, VCPU_R31(k1)
499
500FEXPORT(__kvm_mips_skip_guest_restore)
501	LONG_L	k0, VCPU_HI(k1)
502	mthi	k0
503
504	LONG_L	k0, VCPU_LO(k1)
505	mtlo	k0
506
507	LONG_L	k0, VCPU_R26(k1)
508	LONG_L	k1, VCPU_R27(k1)
509
510	eret
511
512__kvm_mips_return_to_host:
513	/* EBASE is already pointing to Linux */
514	LONG_L	k1, VCPU_HOST_STACK(k1)
515	INT_ADDIU k1,k1, -PT_SIZE
516
517	/* Restore host DDATA_LO */
518	LONG_L	k0, PT_HOST_USERLOCAL(k1)
519	mtc0	k0, CP0_DDATA_LO
520
521	/* Restore host ASID */
522	LONG_L	k0, PT_HOST_ASID(sp)
523	andi	k0, 0xff
524	mtc0	k0,CP0_ENTRYHI
525	ehb
526
527	/* Load context saved on the host stack */
528	LONG_L	$0, PT_R0(k1)
529	LONG_L	$1, PT_R1(k1)
530
531	/*
532	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
533	 * to recover the err code
534	 */
535	INT_SRA	k0, v0, 2
536	move	$2, k0
537
538	LONG_L	$3, PT_R3(k1)
539	LONG_L	$4, PT_R4(k1)
540	LONG_L	$5, PT_R5(k1)
541	LONG_L	$6, PT_R6(k1)
542	LONG_L	$7, PT_R7(k1)
543	LONG_L	$8, PT_R8(k1)
544	LONG_L	$9, PT_R9(k1)
545	LONG_L	$10, PT_R10(k1)
546	LONG_L	$11, PT_R11(k1)
547	LONG_L	$12, PT_R12(k1)
548	LONG_L	$13, PT_R13(k1)
549	LONG_L	$14, PT_R14(k1)
550	LONG_L	$15, PT_R15(k1)
551	LONG_L	$16, PT_R16(k1)
552	LONG_L	$17, PT_R17(k1)
553	LONG_L	$18, PT_R18(k1)
554	LONG_L	$19, PT_R19(k1)
555	LONG_L	$20, PT_R20(k1)
556	LONG_L	$21, PT_R21(k1)
557	LONG_L	$22, PT_R22(k1)
558	LONG_L	$23, PT_R23(k1)
559	LONG_L	$24, PT_R24(k1)
560	LONG_L	$25, PT_R25(k1)
561
562	/* Host k0/k1 were not saved */
563
564	LONG_L	$28, PT_R28(k1)
565	LONG_L	$29, PT_R29(k1)
566	LONG_L	$30, PT_R30(k1)
567
568	LONG_L	k0, PT_HI(k1)
569	mthi	k0
570
571	LONG_L	k0, PT_LO(k1)
572	mtlo	k0
573
574	/* Restore RDHWR access */
575	PTR_LI	k0, 0x2000000F
576	mtc0	k0,  CP0_HWRENA
577
578	/* Restore RA, which is the address we will return to */
579	LONG_L  ra, PT_R31(k1)
580	j       ra
581	 nop
582
583VECTOR_END(MIPSX(GuestExceptionEnd))
584.end MIPSX(GuestException)
585
586MIPSX(exceptions):
587	####
588	##### The exception handlers.
589	#####
590	.word _C_LABEL(MIPSX(GuestException))	#  0
591	.word _C_LABEL(MIPSX(GuestException))	#  1
592	.word _C_LABEL(MIPSX(GuestException))	#  2
593	.word _C_LABEL(MIPSX(GuestException))	#  3
594	.word _C_LABEL(MIPSX(GuestException))	#  4
595	.word _C_LABEL(MIPSX(GuestException))	#  5
596	.word _C_LABEL(MIPSX(GuestException))	#  6
597	.word _C_LABEL(MIPSX(GuestException))	#  7
598	.word _C_LABEL(MIPSX(GuestException))	#  8
599	.word _C_LABEL(MIPSX(GuestException))	#  9
600	.word _C_LABEL(MIPSX(GuestException))	# 10
601	.word _C_LABEL(MIPSX(GuestException))	# 11
602	.word _C_LABEL(MIPSX(GuestException))	# 12
603	.word _C_LABEL(MIPSX(GuestException))	# 13
604	.word _C_LABEL(MIPSX(GuestException))	# 14
605	.word _C_LABEL(MIPSX(GuestException))	# 15
606	.word _C_LABEL(MIPSX(GuestException))	# 16
607	.word _C_LABEL(MIPSX(GuestException))	# 17
608	.word _C_LABEL(MIPSX(GuestException))	# 18
609	.word _C_LABEL(MIPSX(GuestException))	# 19
610	.word _C_LABEL(MIPSX(GuestException))	# 20
611	.word _C_LABEL(MIPSX(GuestException))	# 21
612	.word _C_LABEL(MIPSX(GuestException))	# 22
613	.word _C_LABEL(MIPSX(GuestException))	# 23
614	.word _C_LABEL(MIPSX(GuestException))	# 24
615	.word _C_LABEL(MIPSX(GuestException))	# 25
616	.word _C_LABEL(MIPSX(GuestException))	# 26
617	.word _C_LABEL(MIPSX(GuestException))	# 27
618	.word _C_LABEL(MIPSX(GuestException))	# 28
619	.word _C_LABEL(MIPSX(GuestException))	# 29
620	.word _C_LABEL(MIPSX(GuestException))	# 30
621	.word _C_LABEL(MIPSX(GuestException))	# 31
622