1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19#define _C_LABEL(x)     x
20#define MIPSX(name)     mips32_ ## name
21#define CALLFRAME_SIZ   32
22
23/*
24 * VECTOR
25 *  exception vector entrypoint
26 */
27#define VECTOR(x, regmask)      \
28    .ent    _C_LABEL(x),0;      \
29    EXPORT(x);
30
31#define VECTOR_END(x)      \
32    EXPORT(x);
33
34/* Overload, Danger Will Robinson!! */
35#define PT_HOST_ASID        PT_BVADDR
36#define PT_HOST_USERLOCAL   PT_EPC
37
38#define CP0_DDATA_LO        $28,3
39
40/* Resume Flags */
41#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
42
43#define RESUME_GUEST            0
44#define RESUME_HOST             RESUME_FLAG_HOST
45
46/*
47 * __kvm_mips_vcpu_run: entry point to the guest
48 * a0: run
49 * a1: vcpu
50 */
51	.set	noreorder
52	.set	noat
53
54FEXPORT(__kvm_mips_vcpu_run)
55	/* k0/k1 not being used in host kernel context */
56	INT_ADDIU k1, sp, -PT_SIZE
57	LONG_S	$0, PT_R0(k1)
58	LONG_S	$1, PT_R1(k1)
59	LONG_S	$2, PT_R2(k1)
60	LONG_S	$3, PT_R3(k1)
61
62	LONG_S	$4, PT_R4(k1)
63	LONG_S	$5, PT_R5(k1)
64	LONG_S	$6, PT_R6(k1)
65	LONG_S	$7, PT_R7(k1)
66
67	LONG_S	$8,  PT_R8(k1)
68	LONG_S	$9,  PT_R9(k1)
69	LONG_S	$10, PT_R10(k1)
70	LONG_S	$11, PT_R11(k1)
71	LONG_S	$12, PT_R12(k1)
72	LONG_S	$13, PT_R13(k1)
73	LONG_S	$14, PT_R14(k1)
74	LONG_S	$15, PT_R15(k1)
75	LONG_S	$16, PT_R16(k1)
76	LONG_S	$17, PT_R17(k1)
77
78	LONG_S	$18, PT_R18(k1)
79	LONG_S	$19, PT_R19(k1)
80	LONG_S	$20, PT_R20(k1)
81	LONG_S	$21, PT_R21(k1)
82	LONG_S	$22, PT_R22(k1)
83	LONG_S	$23, PT_R23(k1)
84	LONG_S	$24, PT_R24(k1)
85	LONG_S	$25, PT_R25(k1)
86
87	/*
88	 * XXXKYMA k0/k1 not saved, not being used if we got here through
89	 * an ioctl()
90	 */
91
92	LONG_S	$28, PT_R28(k1)
93	LONG_S	$29, PT_R29(k1)
94	LONG_S	$30, PT_R30(k1)
95	LONG_S	$31, PT_R31(k1)
96
97	/* Save hi/lo */
98	mflo	v0
99	LONG_S	v0, PT_LO(k1)
100	mfhi	v1
101	LONG_S	v1, PT_HI(k1)
102
103	/* Save host status */
104	mfc0	v0, CP0_STATUS
105	LONG_S	v0, PT_STATUS(k1)
106
107	/* Save host ASID, shove it into the BVADDR location */
108	mfc0	v1, CP0_ENTRYHI
109	andi	v1, 0xff
110	LONG_S	v1, PT_HOST_ASID(k1)
111
112	/* Save DDATA_LO, will be used to store pointer to vcpu */
113	mfc0	v1, CP0_DDATA_LO
114	LONG_S	v1, PT_HOST_USERLOCAL(k1)
115
116	/* DDATA_LO has pointer to vcpu */
117	mtc0	a1, CP0_DDATA_LO
118
119	/* Offset into vcpu->arch */
120	INT_ADDIU k1, a1, VCPU_HOST_ARCH
121
122	/*
123	 * Save the host stack to VCPU, used for exception processing
124	 * when we exit from the Guest
125	 */
126	LONG_S	sp, VCPU_HOST_STACK(k1)
127
128	/* Save the kernel gp as well */
129	LONG_S	gp, VCPU_HOST_GP(k1)
130
131	/*
132	 * Setup status register for running the guest in UM, interrupts
133	 * are disabled
134	 */
135	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
136	mtc0	k0, CP0_STATUS
137	ehb
138
139	/* load up the new EBASE */
140	LONG_L	k0, VCPU_GUEST_EBASE(k1)
141	mtc0	k0, CP0_EBASE
142
143	/*
144	 * Now that the new EBASE has been loaded, unset BEV, set
145	 * interrupt mask as it was but make sure that timer interrupts
146	 * are enabled
147	 */
148	li	k0, (ST0_EXL | KSU_USER | ST0_IE)
149	andi	v0, v0, ST0_IM
150	or	k0, k0, v0
151	mtc0	k0, CP0_STATUS
152	ehb
153
154	/* Set Guest EPC */
155	LONG_L	t0, VCPU_PC(k1)
156	mtc0	t0, CP0_EPC
157
158FEXPORT(__kvm_mips_load_asid)
159	/* Set the ASID for the Guest Kernel */
160	PTR_L	t0, VCPU_COP0(k1)
161	LONG_L	t0, COP0_STATUS(t0)
162	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
163	xori	t0, KSU_USER
164	bnez	t0, 1f		/* If kernel */
165	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
166	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
1671:
168	/* t1: contains the base of the ASID array, need to get the cpu id */
169	LONG_L	t2, TI_CPU($28)             /* smp_processor_id */
170	INT_SLL	t2, t2, 2                   /* x4 */
171	REG_ADDU t3, t1, t2
172	LONG_L	k0, (t3)
173	andi	k0, k0, 0xff
174	mtc0	k0, CP0_ENTRYHI
175	ehb
176
177	/* Disable RDHWR access */
178	mtc0	zero, CP0_HWRENA
179
180	/* Now load up the Guest Context from VCPU */
181	LONG_L	$1, VCPU_R1(k1)
182	LONG_L	$2, VCPU_R2(k1)
183	LONG_L	$3, VCPU_R3(k1)
184
185	LONG_L	$4, VCPU_R4(k1)
186	LONG_L	$5, VCPU_R5(k1)
187	LONG_L	$6, VCPU_R6(k1)
188	LONG_L	$7, VCPU_R7(k1)
189
190	LONG_L	$8, VCPU_R8(k1)
191	LONG_L	$9, VCPU_R9(k1)
192	LONG_L	$10, VCPU_R10(k1)
193	LONG_L	$11, VCPU_R11(k1)
194	LONG_L	$12, VCPU_R12(k1)
195	LONG_L	$13, VCPU_R13(k1)
196	LONG_L	$14, VCPU_R14(k1)
197	LONG_L	$15, VCPU_R15(k1)
198	LONG_L	$16, VCPU_R16(k1)
199	LONG_L	$17, VCPU_R17(k1)
200	LONG_L	$18, VCPU_R18(k1)
201	LONG_L	$19, VCPU_R19(k1)
202	LONG_L	$20, VCPU_R20(k1)
203	LONG_L	$21, VCPU_R21(k1)
204	LONG_L	$22, VCPU_R22(k1)
205	LONG_L	$23, VCPU_R23(k1)
206	LONG_L	$24, VCPU_R24(k1)
207	LONG_L	$25, VCPU_R25(k1)
208
209	/* k0/k1 loaded up later */
210
211	LONG_L	$28, VCPU_R28(k1)
212	LONG_L	$29, VCPU_R29(k1)
213	LONG_L	$30, VCPU_R30(k1)
214	LONG_L	$31, VCPU_R31(k1)
215
216	/* Restore hi/lo */
217	LONG_L	k0, VCPU_LO(k1)
218	mtlo	k0
219
220	LONG_L	k0, VCPU_HI(k1)
221	mthi	k0
222
223FEXPORT(__kvm_mips_load_k0k1)
224	/* Restore the guest's k0/k1 registers */
225	LONG_L	k0, VCPU_R26(k1)
226	LONG_L	k1, VCPU_R27(k1)
227
228	/* Jump to guest */
229	eret
230
231VECTOR(MIPSX(exception), unknown)
232/* Find out what mode we came from and jump to the proper handler. */
233	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
234	ehb				#02:
235
236	mfc0	k0, CP0_EBASE		#02: Get EBASE
237	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
238	INT_SLL	k0, k0, 10		#04
239	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
240	INT_ADDIU k0, k0, 0x2000	#06: Exception handler is
241					#    installed @ offset 0x2000
242	j	k0			#07: jump to the function
243	 nop				#08: branch delay slot
244VECTOR_END(MIPSX(exceptionEnd))
245.end MIPSX(exception)
246
247/*
248 * Generic Guest exception handler. We end up here when the guest
249 * does something that causes a trap to kernel mode.
250 */
251NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
252	/* Get the VCPU pointer from DDTATA_LO */
253	mfc0	k1, CP0_DDATA_LO
254	INT_ADDIU k1, k1, VCPU_HOST_ARCH
255
256	/* Start saving Guest context to VCPU */
257	LONG_S	$0, VCPU_R0(k1)
258	LONG_S	$1, VCPU_R1(k1)
259	LONG_S	$2, VCPU_R2(k1)
260	LONG_S	$3, VCPU_R3(k1)
261	LONG_S	$4, VCPU_R4(k1)
262	LONG_S	$5, VCPU_R5(k1)
263	LONG_S	$6, VCPU_R6(k1)
264	LONG_S	$7, VCPU_R7(k1)
265	LONG_S	$8, VCPU_R8(k1)
266	LONG_S	$9, VCPU_R9(k1)
267	LONG_S	$10, VCPU_R10(k1)
268	LONG_S	$11, VCPU_R11(k1)
269	LONG_S	$12, VCPU_R12(k1)
270	LONG_S	$13, VCPU_R13(k1)
271	LONG_S	$14, VCPU_R14(k1)
272	LONG_S	$15, VCPU_R15(k1)
273	LONG_S	$16, VCPU_R16(k1)
274	LONG_S	$17, VCPU_R17(k1)
275	LONG_S	$18, VCPU_R18(k1)
276	LONG_S	$19, VCPU_R19(k1)
277	LONG_S	$20, VCPU_R20(k1)
278	LONG_S	$21, VCPU_R21(k1)
279	LONG_S	$22, VCPU_R22(k1)
280	LONG_S	$23, VCPU_R23(k1)
281	LONG_S	$24, VCPU_R24(k1)
282	LONG_S	$25, VCPU_R25(k1)
283
284	/* Guest k0/k1 saved later */
285
286	LONG_S	$28, VCPU_R28(k1)
287	LONG_S	$29, VCPU_R29(k1)
288	LONG_S	$30, VCPU_R30(k1)
289	LONG_S	$31, VCPU_R31(k1)
290
291	/* We need to save hi/lo and restore them on the way out */
292	mfhi	t0
293	LONG_S	t0, VCPU_HI(k1)
294
295	mflo	t0
296	LONG_S	t0, VCPU_LO(k1)
297
298	/* Finally save guest k0/k1 to VCPU */
299	mfc0	t0, CP0_ERROREPC
300	LONG_S	t0, VCPU_R26(k1)
301
302	/* Get GUEST k1 and save it in VCPU */
303	PTR_LI	t1, ~0x2ff
304	mfc0	t0, CP0_EBASE
305	and	t0, t0, t1
306	LONG_L	t0, 0x3000(t0)
307	LONG_S	t0, VCPU_R27(k1)
308
309	/* Now that context has been saved, we can use other registers */
310
311	/* Restore vcpu */
312	mfc0	a1, CP0_DDATA_LO
313	move	s1, a1
314
315	/* Restore run (vcpu->run) */
316	LONG_L	a0, VCPU_RUN(a1)
317	/* Save pointer to run in s0, will be saved by the compiler */
318	move	s0, a0
319
320	/*
321	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
322	 * process the exception
323	 */
324	mfc0	k0,CP0_EPC
325	LONG_S	k0, VCPU_PC(k1)
326
327	mfc0	k0, CP0_BADVADDR
328	LONG_S	k0, VCPU_HOST_CP0_BADVADDR(k1)
329
330	mfc0	k0, CP0_CAUSE
331	LONG_S	k0, VCPU_HOST_CP0_CAUSE(k1)
332
333	mfc0	k0, CP0_ENTRYHI
334	LONG_S	k0, VCPU_HOST_ENTRYHI(k1)
335
336	/* Now restore the host state just enough to run the handlers */
337
338	/* Swtich EBASE to the one used by Linux */
339	/* load up the host EBASE */
340	mfc0	v0, CP0_STATUS
341
342	.set	at
343	or	k0, v0, ST0_BEV
344	.set	noat
345
346	mtc0	k0, CP0_STATUS
347	ehb
348
349	LONG_L	k0, VCPU_HOST_EBASE(k1)
350	mtc0	k0,CP0_EBASE
351
352	/*
353	 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
354	 * trigger FPE for pending exceptions.
355	 */
356	.set	at
357	and	v1, v0, ST0_CU1
358	beqz	v1, 1f
359	 nop
360	.set	push
361	SET_HARDFLOAT
362	cfc1	t0, fcr31
363	sw	t0, VCPU_FCR31(k1)
364	ctc1	zero,fcr31
365	.set	pop
366	.set	noat
3671:
368
369#ifdef CONFIG_CPU_HAS_MSA
370	/*
371	 * If MSA is enabled, save MSACSR and clear it so that later
372	 * instructions don't trigger MSAFPE for pending exceptions.
373	 */
374	mfc0	t0, CP0_CONFIG3
375	ext	t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
376	beqz	t0, 1f
377	 nop
378	mfc0	t0, CP0_CONFIG5
379	ext	t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
380	beqz	t0, 1f
381	 nop
382	_cfcmsa	t0, MSA_CSR
383	sw	t0, VCPU_MSA_CSR(k1)
384	_ctcmsa	MSA_CSR, zero
3851:
386#endif
387
388	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
389	.set	at
390	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
391	or	v0, v0, ST0_CU0
392	.set	noat
393	mtc0	v0, CP0_STATUS
394	ehb
395
396	/* Load up host GP */
397	LONG_L	gp, VCPU_HOST_GP(k1)
398
399	/* Need a stack before we can jump to "C" */
400	LONG_L	sp, VCPU_HOST_STACK(k1)
401
402	/* Saved host state */
403	INT_ADDIU sp, sp, -PT_SIZE
404
405	/*
406	 * XXXKYMA do we need to load the host ASID, maybe not because the
407	 * kernel entries are marked GLOBAL, need to verify
408	 */
409
410	/* Restore host DDATA_LO */
411	LONG_L	k0, PT_HOST_USERLOCAL(sp)
412	mtc0	k0, CP0_DDATA_LO
413
414	/* Restore RDHWR access */
415	PTR_LI	k0, 0x2000000F
416	mtc0	k0, CP0_HWRENA
417
418	/* Jump to handler */
419FEXPORT(__kvm_mips_jump_to_handler)
420	/*
421	 * XXXKYMA: not sure if this is safe, how large is the stack??
422	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
423	 * with this in the kernel
424	 */
425	PTR_LA	t9, kvm_mips_handle_exit
426	jalr.hb	t9
427	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
428
429	/* Return from handler Make sure interrupts are disabled */
430	di
431	ehb
432
433	/*
434	 * XXXKYMA: k0/k1 could have been blown away if we processed
435	 * an exception while we were handling the exception from the
436	 * guest, reload k1
437	 */
438
439	move	k1, s1
440	INT_ADDIU k1, k1, VCPU_HOST_ARCH
441
442	/*
443	 * Check return value, should tell us if we are returning to the
444	 * host (handle I/O etc)or resuming the guest
445	 */
446	andi	t0, v0, RESUME_HOST
447	bnez	t0, __kvm_mips_return_to_host
448	 nop
449
450__kvm_mips_return_to_guest:
451	/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
452	mtc0	s1, CP0_DDATA_LO
453
454	/* Load up the Guest EBASE to minimize the window where BEV is set */
455	LONG_L	t0, VCPU_GUEST_EBASE(k1)
456
457	/* Switch EBASE back to the one used by KVM */
458	mfc0	v1, CP0_STATUS
459	.set	at
460	or	k0, v1, ST0_BEV
461	.set	noat
462	mtc0	k0, CP0_STATUS
463	ehb
464	mtc0	t0, CP0_EBASE
465
466	/* Setup status register for running guest in UM */
467	.set	at
468	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
469	and	v1, v1, ~(ST0_CU0 | ST0_MX)
470	.set	noat
471	mtc0	v1, CP0_STATUS
472	ehb
473
474	/* Set Guest EPC */
475	LONG_L	t0, VCPU_PC(k1)
476	mtc0	t0, CP0_EPC
477
478	/* Set the ASID for the Guest Kernel */
479	PTR_L	t0, VCPU_COP0(k1)
480	LONG_L	t0, COP0_STATUS(t0)
481	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
482	xori	t0, KSU_USER
483	bnez	t0, 1f		/* If kernel */
484	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
485	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
4861:
487	/* t1: contains the base of the ASID array, need to get the cpu id  */
488	LONG_L	t2, TI_CPU($28)		/* smp_processor_id */
489	INT_SLL	t2, t2, 2		/* x4 */
490	REG_ADDU t3, t1, t2
491	LONG_L	k0, (t3)
492	andi	k0, k0, 0xff
493	mtc0	k0,CP0_ENTRYHI
494	ehb
495
496	/* Disable RDHWR access */
497	mtc0    zero,  CP0_HWRENA
498
499	/* load the guest context from VCPU and return */
500	LONG_L	$0, VCPU_R0(k1)
501	LONG_L	$1, VCPU_R1(k1)
502	LONG_L	$2, VCPU_R2(k1)
503	LONG_L	$3, VCPU_R3(k1)
504	LONG_L	$4, VCPU_R4(k1)
505	LONG_L	$5, VCPU_R5(k1)
506	LONG_L	$6, VCPU_R6(k1)
507	LONG_L	$7, VCPU_R7(k1)
508	LONG_L	$8, VCPU_R8(k1)
509	LONG_L	$9, VCPU_R9(k1)
510	LONG_L	$10, VCPU_R10(k1)
511	LONG_L	$11, VCPU_R11(k1)
512	LONG_L	$12, VCPU_R12(k1)
513	LONG_L	$13, VCPU_R13(k1)
514	LONG_L	$14, VCPU_R14(k1)
515	LONG_L	$15, VCPU_R15(k1)
516	LONG_L	$16, VCPU_R16(k1)
517	LONG_L	$17, VCPU_R17(k1)
518	LONG_L	$18, VCPU_R18(k1)
519	LONG_L	$19, VCPU_R19(k1)
520	LONG_L	$20, VCPU_R20(k1)
521	LONG_L	$21, VCPU_R21(k1)
522	LONG_L	$22, VCPU_R22(k1)
523	LONG_L	$23, VCPU_R23(k1)
524	LONG_L	$24, VCPU_R24(k1)
525	LONG_L	$25, VCPU_R25(k1)
526
527	/* $/k1 loaded later */
528	LONG_L	$28, VCPU_R28(k1)
529	LONG_L	$29, VCPU_R29(k1)
530	LONG_L	$30, VCPU_R30(k1)
531	LONG_L	$31, VCPU_R31(k1)
532
533FEXPORT(__kvm_mips_skip_guest_restore)
534	LONG_L	k0, VCPU_HI(k1)
535	mthi	k0
536
537	LONG_L	k0, VCPU_LO(k1)
538	mtlo	k0
539
540	LONG_L	k0, VCPU_R26(k1)
541	LONG_L	k1, VCPU_R27(k1)
542
543	eret
544
545__kvm_mips_return_to_host:
546	/* EBASE is already pointing to Linux */
547	LONG_L	k1, VCPU_HOST_STACK(k1)
548	INT_ADDIU k1,k1, -PT_SIZE
549
550	/* Restore host DDATA_LO */
551	LONG_L	k0, PT_HOST_USERLOCAL(k1)
552	mtc0	k0, CP0_DDATA_LO
553
554	/* Restore host ASID */
555	LONG_L	k0, PT_HOST_ASID(sp)
556	andi	k0, 0xff
557	mtc0	k0,CP0_ENTRYHI
558	ehb
559
560	/* Load context saved on the host stack */
561	LONG_L	$0, PT_R0(k1)
562	LONG_L	$1, PT_R1(k1)
563
564	/*
565	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
566	 * to recover the err code
567	 */
568	INT_SRA	k0, v0, 2
569	move	$2, k0
570
571	LONG_L	$3, PT_R3(k1)
572	LONG_L	$4, PT_R4(k1)
573	LONG_L	$5, PT_R5(k1)
574	LONG_L	$6, PT_R6(k1)
575	LONG_L	$7, PT_R7(k1)
576	LONG_L	$8, PT_R8(k1)
577	LONG_L	$9, PT_R9(k1)
578	LONG_L	$10, PT_R10(k1)
579	LONG_L	$11, PT_R11(k1)
580	LONG_L	$12, PT_R12(k1)
581	LONG_L	$13, PT_R13(k1)
582	LONG_L	$14, PT_R14(k1)
583	LONG_L	$15, PT_R15(k1)
584	LONG_L	$16, PT_R16(k1)
585	LONG_L	$17, PT_R17(k1)
586	LONG_L	$18, PT_R18(k1)
587	LONG_L	$19, PT_R19(k1)
588	LONG_L	$20, PT_R20(k1)
589	LONG_L	$21, PT_R21(k1)
590	LONG_L	$22, PT_R22(k1)
591	LONG_L	$23, PT_R23(k1)
592	LONG_L	$24, PT_R24(k1)
593	LONG_L	$25, PT_R25(k1)
594
595	/* Host k0/k1 were not saved */
596
597	LONG_L	$28, PT_R28(k1)
598	LONG_L	$29, PT_R29(k1)
599	LONG_L	$30, PT_R30(k1)
600
601	LONG_L	k0, PT_HI(k1)
602	mthi	k0
603
604	LONG_L	k0, PT_LO(k1)
605	mtlo	k0
606
607	/* Restore RDHWR access */
608	PTR_LI	k0, 0x2000000F
609	mtc0	k0,  CP0_HWRENA
610
611	/* Restore RA, which is the address we will return to */
612	LONG_L  ra, PT_R31(k1)
613	j       ra
614	 nop
615
616VECTOR_END(MIPSX(GuestExceptionEnd))
617.end MIPSX(GuestException)
618
619MIPSX(exceptions):
620	####
621	##### The exception handlers.
622	#####
623	.word _C_LABEL(MIPSX(GuestException))	#  0
624	.word _C_LABEL(MIPSX(GuestException))	#  1
625	.word _C_LABEL(MIPSX(GuestException))	#  2
626	.word _C_LABEL(MIPSX(GuestException))	#  3
627	.word _C_LABEL(MIPSX(GuestException))	#  4
628	.word _C_LABEL(MIPSX(GuestException))	#  5
629	.word _C_LABEL(MIPSX(GuestException))	#  6
630	.word _C_LABEL(MIPSX(GuestException))	#  7
631	.word _C_LABEL(MIPSX(GuestException))	#  8
632	.word _C_LABEL(MIPSX(GuestException))	#  9
633	.word _C_LABEL(MIPSX(GuestException))	# 10
634	.word _C_LABEL(MIPSX(GuestException))	# 11
635	.word _C_LABEL(MIPSX(GuestException))	# 12
636	.word _C_LABEL(MIPSX(GuestException))	# 13
637	.word _C_LABEL(MIPSX(GuestException))	# 14
638	.word _C_LABEL(MIPSX(GuestException))	# 15
639	.word _C_LABEL(MIPSX(GuestException))	# 16
640	.word _C_LABEL(MIPSX(GuestException))	# 17
641	.word _C_LABEL(MIPSX(GuestException))	# 18
642	.word _C_LABEL(MIPSX(GuestException))	# 19
643	.word _C_LABEL(MIPSX(GuestException))	# 20
644	.word _C_LABEL(MIPSX(GuestException))	# 21
645	.word _C_LABEL(MIPSX(GuestException))	# 22
646	.word _C_LABEL(MIPSX(GuestException))	# 23
647	.word _C_LABEL(MIPSX(GuestException))	# 24
648	.word _C_LABEL(MIPSX(GuestException))	# 25
649	.word _C_LABEL(MIPSX(GuestException))	# 26
650	.word _C_LABEL(MIPSX(GuestException))	# 27
651	.word _C_LABEL(MIPSX(GuestException))	# 28
652	.word _C_LABEL(MIPSX(GuestException))	# 29
653	.word _C_LABEL(MIPSX(GuestException))	# 30
654	.word _C_LABEL(MIPSX(GuestException))	# 31
655