exception.S revision 96207
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * $FreeBSD: head/sys/sparc64/sparc64/exception.S 96207 2002-05-08 04:14:16Z jake $
56 */
57
58#include "opt_ddb.h"
59
60#include <machine/asi.h>
61#include <machine/asmacros.h>
62#include <machine/ktr.h>
63#include <machine/pstate.h>
64#include <machine/trap.h>
65#include <machine/tstate.h>
66#include <machine/wstate.h>
67
68#include "assym.s"
69
70	.register %g2,#ignore
71	.register %g3,#ignore
72	.register %g6,#ignore
73	.register %g7,#ignore
74
75/*
76 * Atomically set the reference bit in a tte.
77 */
78#define	TTE_SET_BIT(r1, r2, r3, bit) \
79	add	r1, TTE_DATA, r1 ; \
80	ldx	[r1], r2 ; \
819:	or	r2, bit, r3 ; \
82	casxa	[r1] ASI_N, r2, r3 ; \
83	cmp	r2, r3 ; \
84	bne,pn	%xcc, 9b ; \
85	 mov	r3, r2
86
87#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
88#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
89
90/*
91 * Macros for spilling and filling live windows.
92 *
93 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
94 * handler will not use more than 24 instructions total, to leave room for
95 * resume vectors which occupy the last 8 instructions.
96 */
97
98#define	SPILL(storer, base, size, asi) \
99	storer	%l0, [base + (0 * size)] asi ; \
100	storer	%l1, [base + (1 * size)] asi ; \
101	storer	%l2, [base + (2 * size)] asi ; \
102	storer	%l3, [base + (3 * size)] asi ; \
103	storer	%l4, [base + (4 * size)] asi ; \
104	storer	%l5, [base + (5 * size)] asi ; \
105	storer	%l6, [base + (6 * size)] asi ; \
106	storer	%l7, [base + (7 * size)] asi ; \
107	storer	%i0, [base + (8 * size)] asi ; \
108	storer	%i1, [base + (9 * size)] asi ; \
109	storer	%i2, [base + (10 * size)] asi ; \
110	storer	%i3, [base + (11 * size)] asi ; \
111	storer	%i4, [base + (12 * size)] asi ; \
112	storer	%i5, [base + (13 * size)] asi ; \
113	storer	%i6, [base + (14 * size)] asi ; \
114	storer	%i7, [base + (15 * size)] asi
115
116#define	FILL(loader, base, size, asi) \
117	loader	[base + (0 * size)] asi, %l0 ; \
118	loader	[base + (1 * size)] asi, %l1 ; \
119	loader	[base + (2 * size)] asi, %l2 ; \
120	loader	[base + (3 * size)] asi, %l3 ; \
121	loader	[base + (4 * size)] asi, %l4 ; \
122	loader	[base + (5 * size)] asi, %l5 ; \
123	loader	[base + (6 * size)] asi, %l6 ; \
124	loader	[base + (7 * size)] asi, %l7 ; \
125	loader	[base + (8 * size)] asi, %i0 ; \
126	loader	[base + (9 * size)] asi, %i1 ; \
127	loader	[base + (10 * size)] asi, %i2 ; \
128	loader	[base + (11 * size)] asi, %i3 ; \
129	loader	[base + (12 * size)] asi, %i4 ; \
130	loader	[base + (13 * size)] asi, %i5 ; \
131	loader	[base + (14 * size)] asi, %i6 ; \
132	loader	[base + (15 * size)] asi, %i7
133
134#define	ERRATUM50(reg)	mov reg, reg
135
136#define	KSTACK_SLOP	1024
137
138/*
139 * Sanity check the kernel stack and bail out if its wrong.
140 * XXX: doesn't handle being on the panic stack.
141 */
142#define	KSTACK_CHECK \
143	dec	16, ASP_REG ; \
144	stx	%g1, [ASP_REG + 0] ; \
145	stx	%g2, [ASP_REG + 8] ; \
146	add	%sp, SPOFF, %g1 ; \
147	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
148	bnz,a	%xcc, tl1_kstack_fault ; \
149	 inc	16, ASP_REG ; \
150	ldx	[PCPU(CURTHREAD)], %g2 ; \
151	ldx	[%g2 + TD_KSTACK], %g2 ; \
152	add	%g2, KSTACK_SLOP, %g2 ; \
153	subcc	%g1, %g2, %g1 ; \
154	ble,a	%xcc, tl1_kstack_fault ; \
155	 inc	16, ASP_REG ; \
156	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
157	cmp	%g1, %g2 ; \
158	bgt,a	%xcc, tl1_kstack_fault ; \
159	 inc	16, ASP_REG ; \
160	ldx	[ASP_REG + 8], %g2 ; \
161	ldx	[ASP_REG + 0], %g1 ; \
162	inc	16, ASP_REG
163
164ENTRY(tl1_kstack_fault)
165	rdpr	%tl, %g1
166	cmp	%g1, 3
167	beq	%xcc, 1f
168	 nop
169	blt	%xcc, 2f
170	 nop
171	sir
172
1731:
174#if KTR_COMPILE & KTR_TRAP
175	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
176	    , %g1, %g2, %g3, 7, 8, 9)
177	rdpr	%tl, %g2
178	stx	%g2, [%g1 + KTR_PARM1]
179	rdpr	%tpc, %g2
180	stx	%g2, [%g1 + KTR_PARM1]
181	rdpr	%tnpc, %g2
182	stx	%g2, [%g1 + KTR_PARM1]
1839:
184#endif
185	wrpr	%g0, 2, %tl
186
1872:
188#if KTR_COMPILE & KTR_TRAP
189	CATR(KTR_TRAP,
190	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
191	    , %g1, %g2, %g3, 7, 8, 9)
192	add	%sp, SPOFF, %g2
193	stx	%g2, [%g1 + KTR_PARM1]
194	ldx	[PCPU(CURTHREAD)], %g2
195	ldx	[%g2 + TD_KSTACK], %g2
196	stx	%g2, [%g1 + KTR_PARM2]
197	rdpr	%canrestore, %g2
198	stx	%g2, [%g1 + KTR_PARM3]
199	rdpr	%cansave, %g2
200	stx	%g2, [%g1 + KTR_PARM4]
201	rdpr	%otherwin, %g2
202	stx	%g2, [%g1 + KTR_PARM5]
203	rdpr	%wstate, %g2
204	stx	%g2, [%g1 + KTR_PARM6]
2059:
206#endif
207
208	wrpr	%g0, 0, %canrestore
209	wrpr	%g0, 6, %cansave
210	wrpr	%g0, 0, %otherwin
211	wrpr	%g0, WSTATE_KERNEL, %wstate
212
213	sub	ASP_REG, SPOFF + CCFSZ, %sp
214	clr	%fp
215
216	b	%xcc, tl1_trap
217	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
218END(tl1_kstack_fault)
219
220/*
221 * Magic to resume from a spill or fill trap.  If we get an alignment or an
222 * mmu fault during a spill or a fill, this macro will detect the fault and
223 * resume at a set instruction offset in the trap handler.
224 *
225 * To check if the previous trap was a spill/fill we convert the trapped pc
226 * to a trap type and verify that it is in the range of spill/fill vectors.
227 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
228 * tl bit allows us to detect both ranges with one test.
229 *
230 * This is:
231 *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
232 *
233 * To calculate the new pc we take advantage of the xor feature of wrpr.
234 * Forcing all the low bits of the trapped pc on we can produce any offset
235 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
236 *
237 *	0x7f ^ 0x1f == 0x60
238 *	0x1f == (0x80 - 0x60) - 1
239 *
240 * Which are the offset and xor value used to resume from alignment faults.
241 */
242
243/*
244 * Determine if we have trapped inside of a spill/fill vector, and if so resume
245 * at a fixed instruction offset in the trap vector.  Must be called on
246 * alternate globals.
247 */
248#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
249	dec	16, ASP_REG ; \
250	stx	%g1, [ASP_REG + 0] ; \
251	stx	%g2, [ASP_REG + 8] ; \
252	rdpr	%tpc, %g1 ; \
253	ERRATUM50(%g1) ; \
254	rdpr	%tba, %g2 ; \
255	sub	%g1, %g2, %g2 ; \
256	srlx	%g2, 5, %g2 ; \
257	andn	%g2, 0x200, %g2 ; \
258	cmp	%g2, 0x80 ; \
259	blu,pt	%xcc, 9f ; \
260	 cmp	%g2, 0x100 ; \
261	bgeu,pt	%xcc, 9f ; \
262	 or	%g1, 0x7f, %g1 ; \
263	wrpr	%g1, xor, %tnpc ; \
264	stxa_g0_sfsr ; \
265	ldx	[ASP_REG + 8], %g2 ; \
266	ldx	[ASP_REG + 0], %g1 ; \
267	inc	16, ASP_REG ; \
268	done ; \
2699:	ldx	[ASP_REG + 8], %g2 ; \
270	ldx	[ASP_REG + 0], %g1 ; \
271	inc	16, ASP_REG
272
273/*
274 * For certain faults we need to clear the sfsr mmu register before returning.
275 */
276#define	RSF_CLR_SFSR \
277	wr	%g0, ASI_DMMU, %asi ; \
278	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
279
280#define	RSF_XOR(off)	((0x80 - off) - 1)
281
282/*
283 * Instruction offsets in spill and fill trap handlers for handling certain
284 * nested traps, and corresponding xor constants for wrpr.
285 */
286#define	RSF_OFF_ALIGN	0x60
287#define	RSF_OFF_MMU	0x70
288
289#define	RESUME_SPILLFILL_ALIGN \
290	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
291#define	RESUME_SPILLFILL_MMU \
292	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
293#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
294	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
295
296/*
297 * Constant to add to %tnpc when taking a fill trap just before returning to
298 * user mode.
299 */
300#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
301
302/*
303 * Retry a spill or fill with a different wstate due to an alignment fault.
304 * We may just be using the wrong stack offset.
305 */
306#define	RSF_ALIGN_RETRY(ws) \
307	wrpr	%g0, (ws), %wstate ; \
308	retry ; \
309	.align	16
310
311/*
312 * Generate a T_SPILL or T_FILL trap if the window operation fails.
313 */
314#define	RSF_TRAP(type) \
315	b	%xcc, tl0_sftrap ; \
316	 mov	type, %g2 ; \
317	.align	16
318
319/*
320 * Game over if the window operation fails.
321 */
322#define	RSF_FATAL(type) \
323	b	%xcc, rsf_fatal ; \
324	 mov	type, %g2 ; \
325	.align	16
326
327/*
328 * Magic to resume from a failed fill a few instructions after the corrsponding
329 * restore.  This is used on return from the kernel to usermode.
330 */
331#define	RSF_FILL_MAGIC \
332	rdpr	%tnpc, %g1 ; \
333	add	%g1, RSF_FILL_INC, %g1 ; \
334	wrpr	%g1, 0, %tnpc ; \
335	done ; \
336	.align	16
337
338/*
339 * Spill to the pcb if a spill to the user stack in kernel mode fails.
340 */
341#define	RSF_SPILL_TOPCB \
342	b,a	%xcc, tl1_spill_topcb ; \
343	 nop ; \
344	.align	16
345
346ENTRY(rsf_fatal)
347#if KTR_COMPILE & KTR_TRAP
348	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
349	    , %g1, %g3, %g4, 7, 8, 9)
350	rdpr	%tt, %g3
351	stx	%g3, [%g1 + KTR_PARM1]
352	stx	%g2, [%g1 + KTR_PARM2]
3539:
354#endif
355
356	KSTACK_CHECK
357
358	sir
359END(rsf_fatal)
360
361	.comm	intrnames, NIV * 8
362	.comm	eintrnames, 0
363
364	.comm	intrcnt, NIV * 8
365	.comm	eintrcnt, 0
366
367/*
368 * Trap table and associated macros
369 *
370 * Due to its size a trap table is an inherently hard thing to represent in
371 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
372 * instructions each, many of which are identical.  The way that this is
373 * layed out is the instructions (8 or 32) for the actual trap vector appear
374 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
375 * but if not supporting code can be placed just after the definition of the
376 * macro.  The macros are then instantiated in a different section (.trap),
377 * which is setup to be placed by the linker at the beginning of .text, and the
378 * code around the macros is moved to the end of trap table.  In this way the
379 * code that must be sequential in memory can be split up, and located near
380 * its supporting code so that it is easier to follow.
381 */
382
383	/*
384	 * Clean window traps occur when %cleanwin is zero to ensure that data
385	 * is not leaked between address spaces in registers.
386	 */
387	.macro	clean_window
388	clr	%o0
389	clr	%o1
390	clr	%o2
391	clr	%o3
392	clr	%o4
393	clr	%o5
394	clr	%o6
395	clr	%o7
396	clr	%l0
397	clr	%l1
398	clr	%l2
399	clr	%l3
400	clr	%l4
401	clr	%l5
402	clr	%l6
403	rdpr	%cleanwin, %l7
404	inc	%l7
405	wrpr	%l7, 0, %cleanwin
406	clr	%l7
407	retry
408	.align	128
409	.endm
410
411	/*
412	 * Stack fixups for entry from user mode.  We are still running on the
413	 * user stack, and with its live registers, so we must save soon.  We
414	 * are on alternate globals so we do have some registers.  Set the
415	 * transitional window state, and do the save.  If this traps we
416	 * we attempt to spill a window to the user stack.  If this fails,
417	 * we spill the window to the pcb and continue.  Spilling to the pcb
418	 * must not fail.
419	 *
420	 * NOTE: Must be called with alternate globals and clobbers %g1.
421	 */
422
423	.macro	tl0_split
424	rdpr	%wstate, %g1
425	wrpr	%g1, WSTATE_TRANSITION, %wstate
426	save
427	.endm
428
429	.macro	tl0_setup	type
430	tl0_split
431	b	%xcc, tl0_trap
432	 mov	\type, %o0
433	.endm
434
435	/*
436	 * Generic trap type.  Call trap() with the specified type.
437	 */
438	.macro	tl0_gen		type
439	tl0_setup \type
440	.align	32
441	.endm
442
443	/*
444	 * This is used to suck up the massive swaths of reserved trap types.
445	 * Generates count "reserved" trap vectors.
446	 */
447	.macro	tl0_reserved	count
448	.rept	\count
449	tl0_gen	T_RESERVED
450	.endr
451	.endm
452
453	.macro	tl0_fp_restore
454	wr	%g0, FPRS_FEF, %fprs
455	wr	%g0, ASI_BLK_S, %asi
456	ldda	[PCB_REG + PCB_FPSTATE + FP_FB0] %asi, %f0
457	ldda	[PCB_REG + PCB_FPSTATE + FP_FB1] %asi, %f16
458	ldda	[PCB_REG + PCB_FPSTATE + FP_FB2] %asi, %f32
459	ldda	[PCB_REG + PCB_FPSTATE + FP_FB3] %asi, %f48
460	membar	#Sync
461	done
462	.align	32
463	.endm
464
465	.macro	tl0_insn_excptn
466	wr	%g0, ASI_IMMU, %asi
467	rdpr	%tpc, %g3
468	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
469	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
470	membar	#Sync
471	b	%xcc, tl0_sfsr_trap
472	 mov	T_INSTRUCTION_EXCEPTION, %g2
473	.align	32
474	.endm
475
476	.macro	tl0_data_excptn
477	wr	%g0, ASI_DMMU, %asi
478	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
479	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
480	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
481	membar	#Sync
482	b	%xcc, tl0_sfsr_trap
483	 mov	T_DATA_EXCEPTION, %g2
484	.align	32
485	.endm
486
487	.macro	tl0_align
488	wr	%g0, ASI_DMMU, %asi
489	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
490	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
491	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
492	membar	#Sync
493	b	%xcc, tl0_sfsr_trap
494	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
495	.align	32
496	.endm
497
498ENTRY(tl0_sfsr_trap)
499	tl0_split
500	mov	%g3, %o4
501	mov	%g4, %o5
502	b	%xcc, tl0_trap
503	 mov	%g2, %o0
504END(tl0_sfsr_trap)
505
506	.macro	tl0_intr level, mask
507	tl0_split
508	set	\mask, %o1
509	b	%xcc, tl0_intr
510	 mov	\level, %o0
511	.align	32
512	.endm
513
514#define	INTR(level, traplvl)						\
515	tl ## traplvl ## _intr	level, 1 << level
516
517#define	TICK(traplvl) \
518	tl ## traplvl ## _intr	PIL_TICK, 1
519
520#define	INTR_LEVEL(tl)							\
521	INTR(1, tl) ;							\
522	INTR(2, tl) ;							\
523	INTR(3, tl) ;							\
524	INTR(4, tl) ;							\
525	INTR(5, tl) ;							\
526	INTR(6, tl) ;							\
527	INTR(7, tl) ;							\
528	INTR(8, tl) ;							\
529	INTR(9, tl) ;							\
530	INTR(10, tl) ;							\
531	INTR(11, tl) ;							\
532	INTR(12, tl) ;							\
533	INTR(13, tl) ;							\
534	TICK(tl) ;							\
535	INTR(15, tl) ;
536
537	.macro	tl0_intr_level
538	INTR_LEVEL(0)
539	.endm
540
541	.macro	tl0_intr_vector
542	b,a	%xcc, intr_enqueue
543	.align	32
544	.endm
545
546	.macro	immu_miss_user
547	/*
548	 * Extract the virtual page number from the contents of the tag
549	 * access register.
550	 */
551	srlx	%g2, TAR_VPN_SHIFT, %g3
552
553	/*
554	 * Compute the tte bucket address.
555	 */
556	set	(1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
557	and	%g1, %g3, %g1
558	sllx	%g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
559	add	%g1, TSB_REG, %g1
560
561	/*
562	 * Loop over the ttes in this bucket
563	 */
564
565	/*
566	 * Load the tte.  Note that this instruction may fault, clobbering
567	 * the context of the tag access register (at least), and the contents
568	 * of %g3, %g4, %g5, and %g6.  Luckily we can recover %g3, and we do
569	 * not use %g4 or %g5 until this instruction completes successfully.
570	 */
5711:	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
572
573	/*
574	 * Recover the virtual page number, which may have been clobbered.
575	 */
576	srlx	%g2, TAR_VPN_SHIFT, %g3
577
578	/*
579	 * Check that its valid and executable and that the virtual page
580	 * numbers match.
581	 */
582	brgez,pn %g5, 2f
583	 andcc	%g5, TD_EXEC, %g0
584	bz,pn	%xcc, 2f
585	 cmp	%g3, %g4
586	bne,pn %xcc, 2f
587	 EMPTY
588
589	/*
590	 * We matched a tte, load the tlb.
591	 */
592
593	/*
594	 * Set the reference bit, if it's currently clear.
595	 */
596	 andcc	%g5, TD_REF, %g0
597	bz,a,pn	%xcc, tl0_immu_miss_set_ref
598	 nop
599
600	/*
601	 * Load the tte tag and data into the tlb and retry the instruction.
602	 */
603	stxa	%g2, [%g0 + AA_IMMU_TAR] %asi
604	stxa	%g5, [%g0] ASI_ITLB_DATA_IN_REG
605	retry
606
607	/*
608	 * Check the low bits to see if we've finished the bucket.
609	 */
6102:	add	%g1, 1 << TTE_SHIFT, %g1
611	andcc	%g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
612	bnz,a,pt %xcc, 1b
613	 nop
614	.endm
615
616	.macro	tl0_immu_miss
617	/*
618	 * Force kernel store order.
619	 */
620	wrpr	%g0, PSTATE_MMU, %pstate
621
622	/*
623	 * Load the virtual page number and context from the tag access
624	 * register.  We ignore the context.
625	 */
626	wr	%g0, ASI_IMMU, %asi
627	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
628
629	immu_miss_user
630
631	b,a	%xcc, tl0_immu_miss_trap
632	 nop
633	.align	128
634	.endm
635
636ENTRY(tl0_immu_miss_set_ref)
637	/*
638	 * Set the reference bit.
639	 */
640	TTE_SET_REF(%g1, %g4, %g5)
641
642	/*
643	 * May have become invalid, in which case start over.
644	 */
645	brgez,pn %g4, 1f
646	 or	%g4, TD_REF, %g4
647
648	/*
649	 * Load the tte tag and data into the tlb and retry the instruction.
650	 */
651	stxa	%g2, [%g0 + AA_IMMU_TAR] %asi
652	stxa	%g4, [%g0] ASI_ITLB_DATA_IN_REG
6531:	retry
654END(tl0_immu_miss_set_ref)
655
656ENTRY(tl0_immu_miss_trap)
657	/*
658	 * Put back the contents of the tag access register, in case we
659	 * faulted.
660	 */
661	stxa	%g2, [%g0 + AA_IMMU_TAR] %asi
662	membar	#Sync
663
664	/*
665	 * Switch to alternate globals.
666	 */
667	wrpr	%g0, PSTATE_ALT, %pstate
668
669	/*
670	 * Reload the tag access register.
671	 */
672	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
673
674	/*
675	 * Save the tag access register, and call common trap code.
676	 */
677	tl0_split
678	mov	%g2, %o3
679	b	%xcc, tl0_trap
680	 mov	T_INSTRUCTION_MISS, %o0
681END(tl0_immu_miss_trap)
682
683	.macro	dmmu_miss_user
684	/*
685	 * Extract the virtual page number from the contents of the tag
686	 * access register.
687	 */
688	srlx	%g2, TAR_VPN_SHIFT, %g3
689
690	/*
691	 * Compute the tte bucket address.
692	 */
693	set	(1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
694	and	%g1, %g3, %g1
695	sllx	%g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
696	add	%g1, TSB_REG, %g1
697
698	/*
699	 * Loop over the ttes in this bucket
700	 */
701
702	/*
703	 * Load the tte.  Note that this instruction may fault, clobbering
704	 * the contents of the tag access register (at least), and the contents
705	 * of %g3, %g4, %g5, and %g6.  Luckily we can recover %g3, and we do
706	 * not use %g4 or %g5 until this instruction completes successfully.
707	 */
7081:	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
709
710	/*
711	 * Recover the virtual page number, which may have been clobbered.
712	 */
713	srlx	%g2, TAR_VPN_SHIFT, %g3
714
715	/*
716	 * Check that its valid and that the virtual page numbers match.
717	 */
718	brgez,pn %g5, 2f
719	 cmp	%g3, %g4
720	bne,pn %xcc, 2f
721	 EMPTY
722
723	/*
724	 * We matched a tte, load the tlb.
725	 */
726
727	/*
728	 * Set the reference bit, if it's currently clear.
729	 */
730	 andcc	%g5, TD_REF, %g0
731	bz,a,pn	%xcc, dmmu_miss_user_set_ref
732	 nop
733
734	/*
735	 * Load the tte tag and data into the tlb and retry the instruction.
736	 */
737	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
738	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
739	retry
740
741	/*
742	 * Check the low bits to see if we've finished the bucket.
743	 */
7442:	add	%g1, 1 << TTE_SHIFT, %g1
745	andcc	%g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
746	bnz,a,pt %xcc, 1b
747	 nop
748	.endm
749
750ENTRY(dmmu_miss_user_set_ref)
751	/*
752	 * Set the reference bit.
753	 */
754	TTE_SET_REF(%g1, %g4, %g5)
755
756	/*
757	 * May have become invalid, in which case start over.
758	 */
759	brgez,pn %g4, 1f
760	 or	%g4, TD_REF, %g4
761
762	/*
763	 * Load the tte tag and data into the tlb and retry the instruction.
764	 */
765	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
766	stxa	%g4, [%g0] ASI_DTLB_DATA_IN_REG
7671:	retry
768END(dmmu_miss_user_set_ref)
769
770	.macro	tl0_dmmu_miss
771	/*
772	 * Force kernel store order.
773	 */
774	wrpr	%g0, PSTATE_MMU, %pstate
775
776	/*
777	 * Load the virtual page number and context from the tag access
778	 * register.  We ignore the context.
779	 */
780	wr	%g0, ASI_DMMU, %asi
781	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
782
783	/*
784	 * Try a fast inline lookup of the primary tsb.
785	 */
786	dmmu_miss_user
787
788	/*
789	 * Not in primary tsb, call c code.  Not much else fits inline.
790	 */
791	b,a	%xcc, tl0_dmmu_miss_trap
792	 nop
793	.align	128
794	.endm
795
796ENTRY(tl0_dmmu_miss_trap)
797	/*
798	 * Put back the contents of the tag access register, in case we
799	 * faulted.
800	 */
801	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
802	membar	#Sync
803
804	/*
805	 * Switch to alternate globals.
806	 */
807	wrpr	%g0, PSTATE_ALT, %pstate
808
809	/*
810	 * Reload the tag access register.
811	 */
812	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
813
814	/*
815	 * Save the tag access register and call common trap code.
816	 */
817	tl0_split
818	mov	%g2, %o3
819	b	%xcc, tl0_trap
820	 mov	T_DATA_MISS, %o0
821END(tl0_dmmu_miss_trap)
822
823	.macro	dmmu_prot_user
824	/*
825	 * Extract the virtual page number from the contents of the tag
826	 * access register.
827	 */
828	srlx	%g2, TAR_VPN_SHIFT, %g3
829
830	/*
831	 * Compute the tte bucket address.
832	 */
833	set	(1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
834	and	%g1, %g3, %g1
835	sllx	%g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
836	add	%g1, TSB_REG, %g1
837
838	/*
839	 * Loop over the ttes in this bucket
840	 */
841
842	/*
843	 * Load the tte.  Note that this instruction may fault, clobbering
844	 * the context of the tag access register (at least), and the contents
845	 * of %g3, %g4, %g5, and %g6.  Luckily we can recover %g3, and we do
846	 * not use %g4 or %g5 until this instruction completes successfully.
847	 */
8481:	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
849
850	/*
851	 * Recover the virtual page number, which may have been clobbered.
852	 */
853	srlx	%g2, TAR_VPN_SHIFT, %g3
854
855	/*
856	 * Check that its valid and writable and that the virtual page
857	 * numbers match.
858	 */
859	brgez,pn %g5, 2f
860	 andcc	%g5, TD_SW, %g0
861	bz,pn	%xcc, 2f
862	 cmp	%g3, %g4
863	bne,pn %xcc, 2f
864	 nop
865
866	/*
867	 * Set the hardware write bit.
868	 */
869	b,a	%xcc, dmmu_prot_set_w
870	 nop
871
872	/*
873	 * Check the low bits to see if we've finished the bucket.
874	 */
8752:	add	%g1, 1 << TTE_SHIFT, %g1
876	andcc	%g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
877	bnz,a,pt %xcc, 1b
878	 nop
879	.endm
880
881	.macro	tl0_dmmu_prot
882	/*
883	 * Force kernel store order.
884	 */
885	wrpr	%g0, PSTATE_MMU, %pstate
886
887	/*
888	 * Load the virtual page number and context from the tag access
889	 * register.  We ignore the context.
890	 */
891	wr	%g0, ASI_DMMU, %asi
892	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
893
894	/*
895	 * Try a fast inline lookup of the tsb.
896	 */
897	dmmu_prot_user
898
899	/*
900	 * Not in tsb.  Call c code.
901	 */
902	b,a	%xcc, tl0_dmmu_prot_trap
903	 nop
904	.align	128
905	.endm
906
907ENTRY(dmmu_prot_set_w)
908	/*
909	 * Set the hardware write bit in the tte.
910	 */
911	TTE_SET_W(%g1, %g4, %g5)
912
913	/*
914	 * Delete the old TLB entry and clear the sfsr.
915	 */
916	sllx	%g3, TAR_VPN_SHIFT, %g3
917	stxa	%g0, [%g3] ASI_DMMU_DEMAP
918	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
919	membar	#Sync
920
921	/*
922	 * May have become invalid in which case start over.
923	 */
924	brgez,pn %g4, 1f
925	 or	%g4, TD_W, %g4
926
927	/*
928	 * Load the tte data into the tlb and retry the instruction.
929	 */
930	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
931	stxa	%g4, [%g0] ASI_DTLB_DATA_IN_REG
9321:	retry
933END(dmmu_prot_set_w)
934
935ENTRY(tl0_dmmu_prot_trap)
936	/*
937	 * Put back the contents of the tag access register, in case we
938	 * faulted.
939	 */
940	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
941	membar	#Sync
942
943	/*
944	 * Switch to alternate globals.
945	 */
946	wrpr	%g0, PSTATE_ALT, %pstate
947
948	/*
949	 * Load the tar, sfar and sfsr.
950	 */
951	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
952
953	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
954	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
955	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
956	membar	#Sync
957
958	/*
959	 * Save the mmu registers and call common trap code.
960	 */
961	tl0_split
962	mov	%g2, %o3
963	mov	%g3, %o4
964	mov	%g4, %o5
965	b	%xcc, tl0_trap
966	 mov	T_DATA_PROTECTION, %o0
967END(tl0_dmmu_prot_trap)
968
969	.macro	tl0_spill_0_n
970	wr	%g0, ASI_AIUP, %asi
971	SPILL(stxa, %sp + SPOFF, 8, %asi)
972	saved
973	retry
974	.align	32
975	RSF_TRAP(T_SPILL)
976	RSF_TRAP(T_SPILL)
977	.endm
978
979	.macro	tl0_spill_1_n
980	wr	%g0, ASI_AIUP, %asi
981	SPILL(stwa, %sp, 4, %asi)
982	saved
983	retry
984	.align	32
985	RSF_TRAP(T_SPILL)
986	RSF_TRAP(T_SPILL)
987	.endm
988
989	.macro	tl0_fill_0_n
990	wr	%g0, ASI_AIUP, %asi
991	FILL(ldxa, %sp + SPOFF, 8, %asi)
992	restored
993	retry
994	.align	32
995	RSF_TRAP(T_FILL)
996	RSF_TRAP(T_FILL)
997	.endm
998
999	.macro	tl0_fill_1_n
1000	wr	%g0, ASI_AIUP, %asi
1001	FILL(lduwa, %sp, 4, %asi)
1002	restored
1003	retry
1004	.align	32
1005	RSF_TRAP(T_FILL)
1006	RSF_TRAP(T_FILL)
1007	.endm
1008
1009ENTRY(tl0_sftrap)
1010	rdpr	%tstate, %g1
1011	and	%g1, TSTATE_CWP_MASK, %g1
1012	wrpr	%g1, 0, %cwp
1013	tl0_split
1014	b	%xcc, tl0_trap
1015	 mov	%g2, %o0
1016END(tl0_sftrap)
1017
1018	.macro	tl0_spill_bad	count
1019	.rept	\count
1020	sir
1021	.align	128
1022	.endr
1023	.endm
1024
1025	.macro	tl0_fill_bad	count
1026	.rept	\count
1027	sir
1028	.align	128
1029	.endr
1030	.endm
1031
1032	.macro	tl0_syscall
1033	tl0_split
1034	b	%xcc, tl0_syscall
1035	 mov	T_SYSCALL, %o0
1036	.align	32
1037	.endm
1038
1039	.macro	tl0_soft	count
1040	.rept	\count
1041	tl0_gen	T_SOFT
1042	.endr
1043	.endm
1044
1045	.macro	tl1_split
1046	rdpr	%wstate, %g1
1047	wrpr	%g1, WSTATE_NESTED, %wstate
1048	save	%sp, -CCFSZ, %sp
1049	.endm
1050
1051	.macro	tl1_setup	type
1052	tl1_split
1053	b	%xcc, tl1_trap
1054	 mov	\type | T_KERNEL, %o0
1055	.endm
1056
1057	.macro	tl1_gen		type
1058	tl1_setup \type
1059	.align	32
1060	.endm
1061
1062	.macro	tl1_reserved	count
1063	.rept	\count
1064	tl1_gen	T_RESERVED
1065	.endr
1066	.endm
1067
1068	.macro	tl1_insn_excptn
1069	wr	%g0, ASI_IMMU, %asi
1070	rdpr	%tpc, %g3
1071	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1072	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
1073	membar	#Sync
1074	b	%xcc, tl1_insn_exceptn_trap
1075	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1076	.align	32
1077	.endm
1078
1079ENTRY(tl1_insn_exceptn_trap)
1080	tl1_split
1081	mov	%g3, %o4
1082	mov	%g4, %o5
1083	b	%xcc, tl1_trap
1084	 mov	%g2, %o0
1085END(tl1_insn_exceptn_trap)
1086
1087	.macro	tl1_data_excptn
1088	b,a	%xcc, tl1_data_excptn_trap
1089	 nop
1090	.align	32
1091	.endm
1092
1093ENTRY(tl1_data_excptn_trap)
1094	wrpr	%g0, PSTATE_ALT, %pstate
1095	RESUME_SPILLFILL_MMU_CLR_SFSR
1096	b	%xcc, tl1_sfsr_trap
1097	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
1098END(tl1_data_excptn_trap)
1099
1100	.macro	tl1_align
1101	b,a	%xcc, tl1_align_trap
1102	 nop
1103	.align	32
1104	.endm
1105
1106ENTRY(tl1_align_trap)
1107	wrpr	%g0, PSTATE_ALT, %pstate
1108	RESUME_SPILLFILL_ALIGN
1109	b	%xcc, tl1_sfsr_trap
1110	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1111END(tl1_data_excptn_trap)
1112
1113ENTRY(tl1_sfsr_trap)
1114	wr	%g0, ASI_DMMU, %asi
1115	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1116	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1117	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1118	membar	#Sync
1119
1120	tl1_split
1121	mov	%g3, %o4
1122	mov	%g4, %o5
1123	b	%xcc, tl1_trap
1124	 mov	%g2, %o0
1125END(tl1_sfsr_trap)
1126
1127	.macro	tl1_intr level, mask
1128	tl1_split
1129	set	\mask, %o1
1130	b	%xcc, tl1_intr
1131	 mov	\level, %o0
1132	.align	32
1133	.endm
1134
1135	.macro	tl1_intr_level
1136	INTR_LEVEL(1)
1137	.endm
1138
1139	.macro	tl1_intr_vector
1140	b,a	intr_enqueue
1141	.align	32
1142	.endm
1143
1144ENTRY(intr_enqueue)
1145	/*
1146	 * Load the interrupt packet from the hardware.
1147	 */
1148	wr	%g0, ASI_SDB_INTR_R, %asi
1149	ldxa	[%g0] ASI_INTR_RECEIVE, %g2
1150	ldxa	[%g0 + AA_SDB_INTR_D0] %asi, %g3
1151	ldxa	[%g0 + AA_SDB_INTR_D1] %asi, %g4
1152	ldxa	[%g0 + AA_SDB_INTR_D2] %asi, %g5
1153	stxa	%g0, [%g0] ASI_INTR_RECEIVE
1154	membar	#Sync
1155
1156	/*
1157	 * If the second data word is present it points to code to execute
1158	 * directly.  Jump to it.
1159	 */
1160	brz,a,pt %g4, 1f
1161	 nop
1162	jmpl	%g4, %g0
1163	 nop
1164
1165	/*
1166	 * Find the head of the queue and advance it.
1167	 */
11681:	ldx	[PCPU(IQ) + IQ_HEAD], %g1
1169	add	%g1, 1, %g6
1170	and	%g6, IQ_MASK, %g6
1171	stx	%g6, [PCPU(IQ) + IQ_HEAD]
1172
1173	/*
1174	 * Find the iqe.
1175	 */
1176	sllx	%g1, IQE_SHIFT, %g1
1177	add	%g1, PCPU_REG, %g1
1178	add	%g1, PC_IQ, %g1
1179
1180	/*
1181	 * Store the tag and first data word in the iqe.  These are always
1182	 * valid.
1183	 */
1184	stw	%g2, [%g1 + IQE_TAG]
1185	stx	%g3, [%g1 + IQE_VEC]
1186
1187#ifdef INVARIANTS
1188	/*
1189	 * If the new head is the same as the tail, the next interrupt will
1190	 * overwrite unserviced packets.  This is bad.
1191	 */
1192	ldx	[PCPU(IQ) + IQ_TAIL], %g2
1193	cmp	%g2, %g6
1194	be	%xcc, 2f
1195	 nop
1196#endif
1197
1198	/*
1199	 * Load the function, argument and priority and store them in the iqe.
1200	 */
1201	sllx	%g3, IV_SHIFT, %g3
1202	SET(intr_vectors, %g6, %g2)
1203	add	%g2, %g3, %g2
1204	ldx	[%g2 + IV_FUNC], %g4
1205	ldx	[%g2 + IV_ARG], %g5
1206	lduw	[%g2 + IV_PRI], %g6
1207	stx	%g4, [%g1 + IQE_FUNC]
1208	stx	%g5, [%g1 + IQE_ARG]
1209	stw	%g6, [%g1 + IQE_PRI]
1210
1211#if KTR_COMPILE & KTR_INTR
1212	CATR(KTR_INTR, "intr_enqueue: head=%d tail=%d pri=%p tag=%#x vec=%#x"
1213	    , %g2, %g3, %g4, 7, 8, 9)
1214	ldx	[PCPU(IQ) + IQ_HEAD], %g3
1215	stx	%g3, [%g2 + KTR_PARM1]
1216	ldx	[PCPU(IQ) + IQ_TAIL], %g3
1217	stx	%g3, [%g2 + KTR_PARM2]
1218	lduw	[%g1 + IQE_PRI], %g3
1219	stx	%g3, [%g2 + KTR_PARM3]
1220	lduw	[%g1 + IQE_TAG], %g3
1221	stx	%g3, [%g2 + KTR_PARM4]
1222	ldx	[%g1 + IQE_VEC], %g3
1223	stx	%g3, [%g2 + KTR_PARM5]
12249:
1225#endif
1226
1227	/*
1228	 * Trigger a softint at the level indicated by the priority.
1229	 */
1230	mov	1, %g1
1231	sllx	%g1, %g6, %g1
1232	wr	%g1, 0, %asr20
1233
1234	retry
1235
1236#ifdef INVARIANTS
1237	/*
1238	 * The interrupt queue is about to overflow.  We are in big trouble.
1239	 */
12402:	sir
1241#endif
1242END(intr_enqueue)
1243
1244	.macro	tl1_immu_miss
1245	/*
1246	 * Load the context and the virtual page number from the tag access
1247	 * register.  We ignore the context.
1248	 */
1249	wr	%g0, ASI_IMMU, %asi
1250	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g6
1251
1252	/*
1253	 * Extract the virtual page number from the contents of the tag access
1254	 * register.
1255	 */
1256	srlx	%g6, TAR_VPN_SHIFT, %g6
1257
1258	/*
1259	 * Find the index into the kernel tsb.
1260	 */
1261	set	TSB_KERNEL_MASK, %g4
1262	and	%g6, %g4, %g3
1263
1264	/*
1265	 * Compute the tte address.
1266	 */
1267	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g4
1268	sllx	%g3, TTE_SHIFT, %g3
1269	add	%g3, %g4, %g3
1270
1271	/*
1272	 * Load the tte.
1273	 */
1274	ldda	[%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1275
1276	/*
1277	 * Check that its valid and executable and that the virtual page
1278	 * numbers match.
1279	 */
1280	brgez,pn %g5, tl1_immu_miss_trap
1281	 andcc	%g5, TD_EXEC, %g0
1282	bz,pn	%xcc, tl1_immu_miss_trap
1283	 cmp	%g4, %g6
1284	bne,pn	%xcc, tl1_immu_miss_trap
1285	 EMPTY
1286
1287	/*
1288	 * Set the reference bit if its currently clear.
1289	 */
1290	 andcc	%g5, TD_REF, %g0
1291	bnz,a,pt %xcc, 1f
1292	 nop
1293	TTE_SET_REF(%g3, %g5, %g4)
1294
1295	/*
1296	 * May have become invalid, in which case start over.
1297	 */
1298	brgez,pn %g5, 2f
1299	 or	%g5, TD_REF, %g5
1300
1301	/*
1302	 * Load the tte data into the TLB and retry the instruction.
1303	 */
13041:	stxa	%g5, [%g0] ASI_ITLB_DATA_IN_REG
13052:	retry
1306	.align	128
1307	.endm
1308
1309ENTRY(tl1_immu_miss_trap)
1310	/*
1311	 * Switch to alternate globals.
1312	 */
1313	wrpr	%g0, PSTATE_ALT, %pstate
1314
1315	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
1316
1317	tl1_split
1318	mov	%g2, %o3
1319	b	%xcc, tl1_trap
1320	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
1321END(tl1_immu_miss_trap)
1322
1323	.macro	tl1_dmmu_miss
1324	/*
1325	 * Load the context and the virtual page number from the tag access
1326	 * register.
1327	 */
1328	wr	%g0, ASI_DMMU, %asi
1329	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g6
1330
1331	/*
1332	 * Extract the context from the contents of the tag access register.
1333	 * If its non-zero this is a fault on a user address, otherwise get
1334	 * the virtual page number.
1335	 */
1336	sllx	%g6, 64 - TAR_VPN_SHIFT, %g5
1337	brnz,a,pn %g5, tl1_dmmu_miss_user
1338	 mov	%g6, %g2
1339
1340	/*
1341	 * Find the index into the kernel tsb.
1342	 */
1343	set	TSB_KERNEL_MASK, %g4
1344	srlx	%g6, TAR_VPN_SHIFT, %g6
1345	and	%g6, %g4, %g3
1346
1347	/*
1348	 * Compute the tte address.
1349	 */
1350	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g4
1351	sllx	%g3, TTE_SHIFT, %g3
1352	add	%g3, %g4, %g3
1353
1354	/*
1355	 * Load the tte.
1356	 */
1357	ldda	[%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1358
1359	/*
1360	 * Check that its valid and that the virtual page numbers match.
1361	 */
1362	brgez,pn %g5, tl1_dmmu_miss_trap
1363	 cmp	%g4, %g6
1364	bne,pn %xcc, tl1_dmmu_miss_trap
1365	 EMPTY
1366
1367	/*
1368	 * Set the reference bit if its currently clear.
1369	 */
1370	 andcc	%g5, TD_REF, %g0
1371	bnz,a,pt %xcc, 1f
1372	 nop
1373	TTE_SET_REF(%g3, %g5, %g4)
1374
1375	/*
1376	 * May have become invalid, in which case start over.
1377	 */
1378	brgez,pn %g5, 2f
1379	 or	%g5, TD_REF, %g5
1380
1381	/*
1382	 * Load the tte data into the TLB and retry the instruction.
1383	 */
13841:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
13852:	retry
1386	.align	128
1387	.endm
1388
1389ENTRY(tl1_dmmu_miss_trap)
1390	/*
1391	 * Switch to alternate globals.
1392	 */
1393	wrpr	%g0, PSTATE_ALT, %pstate
1394
1395	KSTACK_CHECK
1396
1397	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1398
1399	tl1_split
1400	mov	%g2, %o3
1401	b	%xcc, tl1_trap
1402	 mov	T_DATA_MISS | T_KERNEL, %o0
1403END(tl1_dmmu_miss_trap)
1404
1405ENTRY(tl1_dmmu_miss_user)
1406	/*
1407	 * Try a fast inline lookup of the user tsb.
1408	 */
1409	dmmu_miss_user
1410
1411	/*
1412	 * Put back the contents of the tag access register, in case we
1413	 * faulted.
1414	 */
1415	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
1416	membar	#Sync
1417
1418	/*
1419	 * Switch to alternate globals.
1420	 */
1421	wrpr	%g0, PSTATE_ALT, %pstate
1422
1423	/*
1424	 * Handle faults during window spill/fill.
1425	 */
1426	RESUME_SPILLFILL_MMU
1427
1428	/*
1429	 * Reload the tag access register.
1430	 */
1431	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1432
1433	tl1_split
1434	mov	%g2, %o3
1435	b	%xcc, tl1_trap
1436	 mov	T_DATA_MISS | T_KERNEL, %o0
1437END(tl1_dmmu_miss_user)
1438
1439	.macro	tl1_dmmu_prot
1440	/*
1441	 * Load the context and the virtual page number from the tag access
1442	 * register.
1443	 */
1444	wr	%g0, ASI_DMMU, %asi
1445	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g6
1446
1447	/*
1448	 * Extract the context from the contents of the tag access register.
1449	 * If its non-zero this is a fault on a user address, otherwise get
1450	 * the virtual page number.
1451	 */
1452	sllx	%g6, 64 - TAR_VPN_SHIFT, %g5
1453	brnz,a,pn %g5, tl1_dmmu_prot_user
1454	 mov	%g6, %g2
1455
1456	/*
1457	 * Find the index into the kernel tsb.
1458	 */
1459	set	TSB_KERNEL_MASK, %g4
1460	srlx	%g6, TAR_VPN_SHIFT, %g6
1461	and	%g6, %g4, %g5
1462
1463	/*
1464	 * Compute the tte address.
1465	 */
1466	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g4
1467	sllx	%g5, TTE_SHIFT, %g5
1468	add	%g4, %g5, %g3
1469
1470	/*
1471	 * Load the tte.
1472	 */
1473	ldda	[%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1474
1475	/*
1476	 * Check that its valid and writeable and that the virtual page
1477	 * numbers match.
1478	 */
1479	brgez,pn %g5, tl1_dmmu_prot_trap
1480	 andcc	%g5, TD_SW, %g0
1481	bz,pn	%xcc, tl1_dmmu_prot_trap
1482	 cmp	%g4, %g6
1483	bne,pn	%xcc, tl1_dmmu_prot_trap
1484	 EMPTY
1485
1486	/*
1487	 * Delete the old TLB entry and clear the sfsr.
1488	 */
1489	 sllx	%g6, TAR_VPN_SHIFT, %g6
1490	or	%g6, TLB_DEMAP_NUCLEUS, %g6
1491	stxa	%g0, [%g6] ASI_DMMU_DEMAP
1492	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1493	membar	#Sync
1494
1495	ba,a	%xcc, tl1_dmmu_prot_cont
1496	 nop
1497	.align	128
1498	.endm
1499
1500ENTRY(tl1_dmmu_prot_cont)
1501	/*
1502	 * Set the hardware write bit.
1503	 */
1504	TTE_SET_W(%g3, %g5, %g6)
1505
1506	/*
1507	 * Load the tte data into the TLB and retry the instruction.
1508	 */
1509	or	%g5, TD_W, %g5
1510	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1511	retry
1512END(tl1_dmmu_prot_cont)
1513
1514ENTRY(tl1_dmmu_prot_user)
1515	/*
1516	 * Try a fast inline lookup of the user tsb.
1517	 */
1518	dmmu_prot_user
1519
1520	/*
1521	 * Put back the contents of the tag access register, in case we
1522	 * faulted.
1523	 */
1524	stxa	%g2, [%g0 + AA_DMMU_TAR] %asi
1525	membar	#Sync
1526
1527	/*
1528	 * Switch to alternate globals.
1529	 */
1530	wrpr	%g0, PSTATE_ALT, %pstate
1531
1532	/* Handle faults during window spill/fill. */
1533	RESUME_SPILLFILL_MMU_CLR_SFSR
1534
1535	b,a	%xcc, tl1_dmmu_prot_trap
1536	 nop
1537END(tl1_dmmu_prot_user)
1538
1539ENTRY(tl1_dmmu_prot_trap)
1540	/*
1541	 * Switch to alternate globals.
1542	 */
1543	wrpr	%g0, PSTATE_ALT, %pstate
1544
1545	/*
1546	 * Load the sfar, sfsr and tar.  Clear the sfsr.
1547	 */
1548	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1549	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1550	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1551	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1552	membar	#Sync
1553
1554	tl1_split
1555	mov	%g2, %o3
1556	mov	%g3, %o4
1557	mov	%g4, %o5
1558	b	%xcc, tl1_trap
1559	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1560END(tl1_dmmu_prot_trap)
1561
1562	.macro	tl1_spill_0_n
1563	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1564	saved
1565	retry
1566	.align	32
1567	RSF_FATAL(T_SPILL)
1568	RSF_FATAL(T_SPILL)
1569	.endm
1570
1571	.macro	tl1_spill_2_n
1572	wr	%g0, ASI_AIUP, %asi
1573	SPILL(stxa, %sp + SPOFF, 8, %asi)
1574	saved
1575	retry
1576	.align	32
1577	RSF_SPILL_TOPCB
1578	RSF_SPILL_TOPCB
1579	.endm
1580
1581	.macro	tl1_spill_3_n
1582	wr	%g0, ASI_AIUP, %asi
1583	SPILL(stwa, %sp, 4, %asi)
1584	saved
1585	retry
1586	.align	32
1587	RSF_SPILL_TOPCB
1588	RSF_SPILL_TOPCB
1589	.endm
1590
1591	.macro	tl1_spill_0_o
1592	wr	%g0, ASI_AIUP, %asi
1593	SPILL(stxa, %sp + SPOFF, 8, %asi)
1594	saved
1595	retry
1596	.align	32
1597	RSF_SPILL_TOPCB
1598	RSF_SPILL_TOPCB
1599	.endm
1600
1601	.macro	tl1_spill_1_o
1602	wr	%g0, ASI_AIUP, %asi
1603	SPILL(stwa, %sp, 4, %asi)
1604	saved
1605	retry
1606	.align	32
1607	RSF_SPILL_TOPCB
1608	RSF_SPILL_TOPCB
1609	.endm
1610
1611	.macro	tl1_spill_2_o
1612	RSF_SPILL_TOPCB
1613	.align	128
1614	.endm
1615
1616	.macro	tl1_fill_0_n
1617	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1618	restored
1619	retry
1620	.align	32
1621	RSF_FATAL(T_FILL)
1622	RSF_FATAL(T_FILL)
1623	.endm
1624
1625	.macro	tl1_fill_2_n
1626	wr	%g0, ASI_AIUP, %asi
1627	FILL(ldxa, %sp + SPOFF, 8, %asi)
1628	restored
1629	retry
1630	.align 32
1631	RSF_FILL_MAGIC
1632	RSF_FILL_MAGIC
1633	.endm
1634
1635	.macro	tl1_fill_3_n
1636	wr	%g0, ASI_AIUP, %asi
1637	FILL(lduwa, %sp, 4, %asi)
1638	restored
1639	retry
1640	.align 32
1641	RSF_FILL_MAGIC
1642	RSF_FILL_MAGIC
1643	.endm
1644
1645/*
1646 * This is used to spill windows that are still occupied with user
1647 * data on kernel entry to the pcb.
1648 */
1649ENTRY(tl1_spill_topcb)
1650	wrpr	%g0, PSTATE_ALT, %pstate
1651
1652	/* Free some globals for our use. */
1653	dec	24, ASP_REG
1654	stx	%g1, [ASP_REG + 0]
1655	stx	%g2, [ASP_REG + 8]
1656	stx	%g3, [ASP_REG + 16]
1657
1658	ldx	[PCB_REG + PCB_NSAVED], %g1
1659
1660	sllx	%g1, PTR_SHIFT, %g2
1661	add	%g2, PCB_REG, %g2
1662	stx	%sp, [%g2 + PCB_RWSP]
1663
1664	sllx	%g1, RW_SHIFT, %g2
1665	add	%g2, PCB_REG, %g2
1666	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1667
1668	inc	%g1
1669	stx	%g1, [PCB_REG + PCB_NSAVED]
1670
1671#if KTR_COMPILE & KTR_TRAP
1672	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1673	   , %g1, %g2, %g3, 7, 8, 9)
1674	rdpr	%tpc, %g2
1675	stx	%g2, [%g1 + KTR_PARM1]
1676	rdpr	%tnpc, %g2
1677	stx	%g2, [%g1 + KTR_PARM2]
1678	stx	%sp, [%g1 + KTR_PARM3]
1679	ldx	[PCB_REG + PCB_NSAVED], %g2
1680	stx	%g2, [%g1 + KTR_PARM4]
16819:
1682#endif
1683
1684	saved
1685
1686	ldx	[ASP_REG + 16], %g3
1687	ldx	[ASP_REG + 8], %g2
1688	ldx	[ASP_REG + 0], %g1
1689	inc	24, ASP_REG
1690	retry
1691END(tl1_spill_topcb)
1692
1693	.macro	tl1_spill_bad	count
1694	.rept	\count
1695	sir
1696	.align	128
1697	.endr
1698	.endm
1699
1700	.macro	tl1_fill_bad	count
1701	.rept	\count
1702	sir
1703	.align	128
1704	.endr
1705	.endm
1706
1707	.macro	tl1_soft	count
1708	.rept	\count
1709	tl1_gen	T_SOFT | T_KERNEL
1710	.endr
1711	.endm
1712
1713	.sect	.trap
1714	.align	0x8000
1715	.globl	tl0_base
1716
1717tl0_base:
1718	tl0_reserved	8				! 0x0-0x7
1719tl0_insn_excptn:
1720	tl0_insn_excptn					! 0x8
1721	tl0_reserved	1				! 0x9
1722tl0_insn_error:
1723	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
1724	tl0_reserved	5				! 0xb-0xf
1725tl0_insn_illegal:
1726	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
1727tl0_priv_opcode:
1728	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
1729	tl0_reserved	14				! 0x12-0x1f
1730tl0_fp_disabled:
1731	tl0_gen		T_FP_DISABLED			! 0x20
1732tl0_fp_ieee:
1733	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
1734tl0_fp_other:
1735	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
1736tl0_tag_ovflw:
1737	tl0_gen		T_TAG_OFERFLOW			! 0x23
1738tl0_clean_window:
1739	clean_window					! 0x24
1740tl0_divide:
1741	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
1742	tl0_reserved	7				! 0x29-0x2f
1743tl0_data_excptn:
1744	tl0_data_excptn					! 0x30
1745	tl0_reserved	1				! 0x31
1746tl0_data_error:
1747	tl0_gen		T_DATA_ERROR			! 0x32
1748	tl0_reserved	1				! 0x33
1749tl0_align:
1750	tl0_align					! 0x34
1751tl0_align_lddf:
1752	tl0_gen		T_RESERVED			! 0x35
1753tl0_align_stdf:
1754	tl0_gen		T_RESERVED			! 0x36
1755tl0_priv_action:
1756	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
1757	tl0_reserved	9				! 0x38-0x40
1758tl0_intr_level:
1759	tl0_intr_level					! 0x41-0x4f
1760	tl0_reserved	16				! 0x50-0x5f
1761tl0_intr_vector:
1762	tl0_intr_vector					! 0x60
1763tl0_watch_phys:
1764	tl0_gen		T_PA_WATCHPOINT			! 0x61
1765tl0_watch_virt:
1766	tl0_gen		T_VA_WATCHPOINT			! 0x62
1767tl0_ecc:
1768	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
1769tl0_immu_miss:
1770	tl0_immu_miss					! 0x64
1771tl0_dmmu_miss:
1772	tl0_dmmu_miss					! 0x68
1773tl0_dmmu_prot:
1774	tl0_dmmu_prot					! 0x6c
1775	tl0_reserved	16				! 0x70-0x7f
1776tl0_spill_0_n:
1777	tl0_spill_0_n					! 0x80
1778tl0_spill_1_n:
1779	tl0_spill_1_n					! 0x84
1780	tl0_spill_bad	14				! 0x88-0xbf
1781tl0_fill_0_n:
1782	tl0_fill_0_n					! 0xc0
1783tl0_fill_1_n:
1784	tl0_fill_1_n					! 0xc4
1785	tl0_fill_bad	14				! 0xc8-0xff
1786tl0_soft:
1787	tl0_reserved	1				! 0x100
1788	tl0_gen		T_BREAKPOINT			! 0x101
1789	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
1790	tl0_reserved	1				! 0x103
1791	tl0_gen		T_CLEAN_WINDOW			! 0x104
1792	tl0_gen		T_RANGE_CHECK			! 0x105
1793	tl0_gen		T_FIX_ALIGNMENT			! 0x106
1794	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
1795	tl0_reserved	1				! 0x108
1796	tl0_syscall					! 0x109
1797	tl0_fp_restore					! 0x10a
1798	tl0_reserved	5				! 0x10b-0x10f
1799	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
1800	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
1801	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
1802	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
1803	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
1804	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
1805	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
1806	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
1807	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
1808	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
1809	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
1810	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
1811	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
1812	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
1813	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
1814	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
1815	tl0_reserved	224				! 0x120-0x1ff
1816
1817tl1_base:
1818	tl1_reserved	8				! 0x200-0x207
1819tl1_insn_excptn:
1820	tl1_insn_excptn					! 0x208
1821	tl1_reserved	1				! 0x209
1822tl1_insn_error:
1823	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
1824	tl1_reserved	5				! 0x20b-0x20f
1825tl1_insn_illegal:
1826	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
1827tl1_priv_opcode:
1828	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
1829	tl1_reserved	14				! 0x212-0x21f
1830tl1_fp_disabled:
1831	tl1_gen		T_FP_DISABLED			! 0x220
1832tl1_fp_ieee:
1833	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
1834tl1_fp_other:
1835	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
1836tl1_tag_ovflw:
1837	tl1_gen		T_TAG_OFERFLOW			! 0x223
1838tl1_clean_window:
1839	clean_window					! 0x224
1840tl1_divide:
1841	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
1842	tl1_reserved	7				! 0x229-0x22f
1843tl1_data_excptn:
1844	tl1_data_excptn					! 0x230
1845	tl1_reserved	1				! 0x231
1846tl1_data_error:
1847	tl1_gen		T_DATA_ERROR			! 0x232
1848	tl1_reserved	1				! 0x233
1849tl1_align:
1850	tl1_align					! 0x234
1851tl1_align_lddf:
1852	tl1_gen		T_RESERVED			! 0x235
1853tl1_align_stdf:
1854	tl1_gen		T_RESERVED			! 0x236
1855tl1_priv_action:
1856	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
1857	tl1_reserved	9				! 0x238-0x240
1858tl1_intr_level:
1859	tl1_intr_level					! 0x241-0x24f
1860	tl1_reserved	16				! 0x250-0x25f
1861tl1_intr_vector:
1862	tl1_intr_vector					! 0x260
1863tl1_watch_phys:
1864	tl1_gen		T_PA_WATCHPOINT			! 0x261
1865tl1_watch_virt:
1866	tl1_gen		T_VA_WATCHPOINT			! 0x262
1867tl1_ecc:
1868	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
1869tl1_immu_miss:
1870	tl1_immu_miss					! 0x264
1871tl1_dmmu_miss:
1872	tl1_dmmu_miss					! 0x268
1873tl1_dmmu_prot:
1874	tl1_dmmu_prot					! 0x26c
1875	tl1_reserved	16				! 0x270-0x27f
1876tl1_spill_0_n:
1877	tl1_spill_0_n					! 0x280
1878	tl1_spill_bad	1				! 0x284
1879tl1_spill_2_n:
1880	tl1_spill_2_n					! 0x288
1881tl1_spill_3_n:
1882	tl1_spill_3_n					! 0x29c
1883	tl1_spill_bad	4				! 0x290-0x29f
1884tl1_spill_0_o:
1885	tl1_spill_0_o					! 0x2a0
1886tl1_spill_1_o:
1887	tl1_spill_1_o					! 0x2a4
1888tl1_spill_2_o:
1889	tl1_spill_2_o					! 0x2a8
1890	tl1_spill_bad	5				! 0x2ac-0x2bf
1891tl1_fill_0_n:
1892	tl1_fill_0_n					! 0x2c0
1893	tl1_fill_bad	1				! 0x2c4
1894tl1_fill_2_n:
1895	tl1_fill_2_n					! 0x2d0
1896tl1_fill_3_n:
1897	tl1_fill_3_n					! 0x2d4
1898	tl1_fill_bad	12				! 0x2d8-0x2ff
1899	tl1_reserved	1				! 0x300
1900tl1_breakpoint:
1901	tl1_gen		T_BREAKPOINT			! 0x301
1902	tl1_gen		T_RSTRWP_PHYS			! 0x302
1903	tl1_gen		T_RSTRWP_VIRT			! 0x303
1904	tl1_reserved	252				! 0x304-0x3ff
1905
1906/*
1907 * User trap entry point.
1908 *
1909 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
1910 *		 u_int sfsr)
1911 *
1912 * The following setup has been performed:
1913 *	- the windows have been split and the active user window has been saved
1914 *	  (maybe just to the pcb)
1915 *	- we are on alternate globals and interrupts are disabled
1916 *
1917 * We switch to the kernel stack, build a trapframe, switch to normal
1918 * globals, enable interrupts and call trap.
1919 *
1920 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
1921 * it has been pre-set in alternate globals, so we read it from there and setup
1922 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
1923 * of cpu migration and using the wrong pcpup.
1924 */
1925ENTRY(tl0_trap)
1926	/*
1927	 * Force kernel store order.
1928	 */
1929	wrpr	%g0, PSTATE_ALT, %pstate
1930
1931	rdpr	%tstate, %l0
1932	rdpr	%tpc, %l1
1933	rdpr	%tnpc, %l2
1934	rd	%y, %l3
1935	rd	%fprs, %l4
1936	rdpr	%wstate, %l5
1937
1938#if KTR_COMPILE & KTR_TRAP
1939	CATR(KTR_TRAP,
1940	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
1941	    , %g1, %g2, %g3, 7, 8, 9)
1942	ldx	[PCPU(CURTHREAD)], %g2
1943	stx	%g2, [%g1 + KTR_PARM1]
1944	stx	%o0, [%g1 + KTR_PARM2]
1945	rdpr	%pil, %g2
1946	stx	%g2, [%g1 + KTR_PARM3]
1947	stx	%l1, [%g1 + KTR_PARM4]
1948	stx	%l2, [%g1 + KTR_PARM5]
1949	stx	%i6, [%g1 + KTR_PARM6]
19509:
1951#endif
1952
1953	and	%l5, WSTATE_NORMAL_MASK, %l5
1954
1955	cmp	%o0, UT_MAX
1956	bge,a,pt %xcc, 2f
1957	 nop
1958
1959	ldx	[PCPU(CURTHREAD)], %l6
1960	ldx	[%l6 + TD_PROC], %l6
1961	ldx	[%l6 + P_MD + MD_UTRAP], %l6
1962	brz,pt	%l6, 2f
1963	 sllx	%o0, PTR_SHIFT, %l7
1964	ldx	[%l6 + %l7], %l6
1965	brz,pt	%l6, 2f
1966	 andn	%l0, TSTATE_CWP_MASK, %l7
1967
1968	ldx	[PCB_REG + PCB_NSAVED], %g1
1969	brnz,a,pn %g1, 1f
1970	 mov	T_SPILL, %o0
1971
1972#if KTR_COMPILE & KTR_TRAP
1973	CATR(KTR_TRAP, "tl0_trap: user trap npc=%#lx"
1974	    , %g1, %g2, %g3, 7, 8, 9)
1975	stx	%l6, [%g1 + KTR_PARM1]
19769:
1977#endif
1978
1979	wrpr	%l5, %wstate
1980	wrpr	%l6, %tnpc
1981	rdpr	%cwp, %l6
1982	wrpr	%l6, %l7, %tstate
1983
1984	sub	%fp, CCFSZ, %sp
1985
1986	/*
1987	 * Need to store %fsr to pass it to the user trap handler. Otherwise,
1988	 * the ftt field might be zeoed out in a store in another trap or
1989	 * interrupt. Use the temporary stack for that.
1990	 */
1991	rd	%fprs, %l3
1992	or	%l3, FPRS_FEF, %l4
1993	wr	%l4, 0, %fprs
1994	dec	8, ASP_REG
1995	stx	%fsr, [ASP_REG]
1996	ldx	[ASP_REG], %l4
1997	inc	8, ASP_REG
1998	wr	%l3, 0, %fprs
1999
2000	mov	%l0, %l5
2001	mov	%l1, %l6
2002	mov	%l2, %l7
2003
2004	done
2005
20061:
2007#if KTR_COMPILE & KTR_TRAP
2008	CATR(KTR_TRAP, "tl0_trap: defer user trap npc=%#lx nsaved=%#lx"
2009	    , %g1, %g2, %g3, 7, 8, 9)
2010	stx	%l6, [%g1 + KTR_PARM1]
2011	ldx	[PCB_REG + PCB_NSAVED], %g2
2012	stx	%g2, [%g1 + KTR_PARM2]
20139:
2014#endif
2015
20162:	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2017	wrpr	%l5, WSTATE_KERNEL, %wstate
2018	rdpr	%canrestore, %l6
2019	wrpr	%l6, 0, %otherwin
2020	wrpr	%g0, 0, %canrestore
2021
2022	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2023
2024	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2025	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2026	stw	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2027
2028	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2029	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2030	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2031	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2032	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2033	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2034
2035	wr	%g0, FPRS_FEF, %fprs
2036	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2037	wr	%g0, 0, %fprs
2038
2039	mov	PCB_REG, %l0
2040	mov	PCPU_REG, %l1
2041	wrpr	%g0, PSTATE_NORMAL, %pstate
2042
2043	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2044	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2045	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2046	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2047	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2048	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2049	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2050
2051	mov	%l0, PCB_REG
2052	mov	%l1, PCPU_REG
2053	wrpr	%g0, PSTATE_KERNEL, %pstate
2054
2055	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2056	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2057	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2058	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2059	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2060	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2061	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2062	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2063
2064.Ltl0_trap_reenter:
2065	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2066	call	trap
2067	 add	%sp, CCFSZ + SPOFF, %o0
2068	b,a	%xcc, tl0_ret
2069	 nop
2070END(tl0_trap)
2071
2072/*
2073 * void tl0_syscall(u_int type)
2074 */
2075ENTRY(tl0_syscall)
2076	/*
2077	 * Force kernel store order.
2078	 */
2079	wrpr	%g0, PSTATE_ALT, %pstate
2080
2081	rdpr	%tstate, %l0
2082	rdpr	%tpc, %l1
2083	rdpr	%tnpc, %l2
2084	rd	%y, %l3
2085	rd	%fprs, %l4
2086	rdpr	%wstate, %l5
2087
2088#if KTR_COMPILE & KTR_SYSC
2089	CATR(KTR_SYSC,
2090	    "tl0_syscall: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2091	    , %g1, %g2, %g3, 7, 8, 9)
2092	ldx	[PCPU(CURTHREAD)], %g2
2093	stx	%g2, [%g1 + KTR_PARM1]
2094	stx	%o0, [%g1 + KTR_PARM2]
2095	rdpr	%pil, %g2
2096	stx	%g2, [%g1 + KTR_PARM3]
2097	stx	%l1, [%g1 + KTR_PARM4]
2098	stx	%l2, [%g1 + KTR_PARM5]
2099	stx	%i6, [%g1 + KTR_PARM6]
21009:
2101#endif
2102
2103	and	%l5, WSTATE_NORMAL_MASK, %l5
2104	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2105	wrpr	%l5, WSTATE_KERNEL, %wstate
2106	rdpr	%canrestore, %l6
2107	wrpr	%l6, 0, %otherwin
2108	wrpr	%g0, 0, %canrestore
2109
2110	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2111
2112	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2113
2114	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2115	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2116	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2117	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2118	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2119	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2120
2121	wr	%g0, FPRS_FEF, %fprs
2122	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2123	wr	%g0, 0, %fprs
2124
2125	mov	PCB_REG, %l0
2126	mov	PCPU_REG, %l1
2127	wrpr	%g0, PSTATE_NORMAL, %pstate
2128
2129	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2130	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2131	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2132	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2133	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2134	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2135	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2136
2137	mov	%l0, PCB_REG
2138	mov	%l1, PCPU_REG
2139	wrpr	%g0, PSTATE_KERNEL, %pstate
2140
2141	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2142	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2143	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2144	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2145	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2146	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2147	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2148	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2149
2150	call	syscall
2151	 add	%sp, CCFSZ + SPOFF, %o0
2152	b,a	%xcc, tl0_ret
2153	 nop
2154END(tl0_syscall)
2155
2156/*
2157 * void tl0_intr(u_int level, u_int mask)
2158 */
2159ENTRY(tl0_intr)
2160	/*
2161	 * Force kernel store order.
2162	 */
2163	wrpr	%g0, PSTATE_ALT, %pstate
2164
2165	rdpr	%tstate, %l0
2166	rdpr	%tpc, %l1
2167	rdpr	%tnpc, %l2
2168	rd	%y, %l3
2169	rd	%fprs, %l4
2170	rdpr	%wstate, %l5
2171
2172#if KTR_COMPILE & KTR_INTR
2173	CATR(KTR_INTR,
2174	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2175	    , %g1, %g2, %g3, 7, 8, 9)
2176	ldx	[PCPU(CURTHREAD)], %g2
2177	stx	%g2, [%g1 + KTR_PARM1]
2178	stx	%o0, [%g1 + KTR_PARM2]
2179	rdpr	%pil, %g2
2180	stx	%g2, [%g1 + KTR_PARM3]
2181	stx	%l1, [%g1 + KTR_PARM4]
2182	stx	%l2, [%g1 + KTR_PARM5]
2183	stx	%i6, [%g1 + KTR_PARM6]
21849:
2185#endif
2186
2187	wrpr	%o0, 0, %pil
2188	wr	%o1, 0, %asr21
2189
2190	and	%l5, WSTATE_NORMAL_MASK, %l5
2191	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2192	wrpr	%l5, WSTATE_KERNEL, %wstate
2193	rdpr	%canrestore, %l6
2194	wrpr	%l6, 0, %otherwin
2195	wrpr	%g0, 0, %canrestore
2196
2197	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2198
2199	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2200	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2201	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2202	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2203	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2204	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2205
2206	wr	%g0, FPRS_FEF, %fprs
2207	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2208	wr	%g0, 0, %fprs
2209
2210	mov	%o0, %l3
2211	mov	T_INTERRUPT, %o1
2212
2213	stw	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2214	stw	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2215
2216	mov	PCB_REG, %l0
2217	mov	PCPU_REG, %l1
2218	wrpr	%g0, PSTATE_NORMAL, %pstate
2219
2220	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2221	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2222	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2223	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2224	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2225	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2226	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2227
2228	mov	%l0, PCB_REG
2229	mov	%l1, PCPU_REG
2230	wrpr	%g0, PSTATE_KERNEL, %pstate
2231
2232	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2233	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2234	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2235	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2236	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2237	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2238	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2239	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2240
2241	call	critical_enter
2242	 nop
2243
2244	SET(cnt+V_INTR, %l1, %l0)
2245	ATOMIC_INC_INT(%l0, %l1, %l2)
2246
2247	SET(intr_handlers, %l1, %l0)
2248	sllx	%l3, IH_SHIFT, %l1
2249	ldx	[%l0 + %l1], %l1
2250	KASSERT(%l1, "tl0_intr: ih null")
2251	call	%l1
2252	 add	%sp, CCFSZ + SPOFF, %o0
2253
2254	call	critical_exit
2255	 nop
2256
2257	b,a	%xcc, tl0_ret
2258	 nop
2259END(tl0_intr)
2260
2261ENTRY(tl0_ret)
2262#if KTR_COMPILE & KTR_TRAP
2263	CATR(KTR_TRAP, "tl0_ret: check ast td=%p (%s) pil=%#lx sflag=%#x"
2264	    , %g1, %g2, %g3, 7, 8, 9)
2265	ldx	[PCPU(CURTHREAD)], %g2
2266	stx	%g2, [%g1 + KTR_PARM1]
2267	ldx	[%g2 + TD_PROC], %g2
2268	add	%g2, P_COMM, %g3
2269	stx	%g3, [%g1 + KTR_PARM2]
2270	rdpr	%pil, %g3
2271	stx	%g3, [%g1 + KTR_PARM3]
2272	lduw	[%g2 + P_SFLAG], %g3
2273	stx	%g3, [%g1 + KTR_PARM4]
22749:
2275#endif
2276
2277	/*
2278	 * Check for pending asts atomically with returning.  We must raise
2279	 * the pil before checking, and if no asts are found the pil must
2280	 * remain raised until the retry is executed, or we risk missing asts
2281	 * caused by interrupts occuring after the test.  If the pil is lowered,
2282	 * as it is when we call ast, the check must be re-executed.
2283	 */
22841:	wrpr	%g0, PIL_TICK, %pil
2285	ldx	[PCPU(CURTHREAD)], %l0
2286	ldx	[%l0 + TD_KSE], %l1
2287	lduw	[%l1 + KE_FLAGS], %l2
2288	and	%l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
2289	brz,a,pt %l2, 2f
2290	 nop
2291	wrpr	%g0, 0, %pil
2292	call	ast
2293	 add	%sp, CCFSZ + SPOFF, %o0
2294	ba,a	%xcc, 1b
2295	 nop
2296
2297	/*
2298	 * Check for windows that were spilled to the pcb and need to be
2299	 * copied out.  This must be the last thing that is done before the
2300	 * return to usermode.  If there are still user windows in the cpu
2301	 * and we call a nested function after this, which causes them to be
2302	 * spilled to the pcb, they will not be copied out and the stack will
2303	 * be inconsistent.
2304	 */
23052:	ldx	[PCB_REG + PCB_NSAVED], %l1
2306	mov	T_SPILL, %o0
2307	brnz,a,pn %l1, .Ltl0_trap_reenter
2308	 wrpr	%g0, 0, %pil
2309
2310	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2311	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2312	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2313	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2314	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2315	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2316	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2317	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2318
2319	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2320	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2321	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2322	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l3
2323	ldub	[%sp + SPOFF + CCFSZ + TF_FPRS], %l4
2324	ldub	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l5
2325
2326	wrpr	%g0, PSTATE_NORMAL, %pstate
2327
2328	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2329	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2330	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2331	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2332	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2333	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2334	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2335
2336	wrpr	%g0, PSTATE_ALT, %pstate
2337
2338	wrpr	%g0, 0, %pil
2339	wrpr	%l1, 0, %tpc
2340	wrpr	%l2, 0, %tnpc
2341	wr	%l3, 0, %y
2342
2343	andn	%l0, TSTATE_CWP_MASK, %g1
2344	mov	%l4, %g2
2345
2346	srlx	%l5, WSTATE_OTHER_SHIFT, %g3
2347	wrpr	%g3, WSTATE_TRANSITION, %wstate
2348	rdpr	%otherwin, %o0
2349	wrpr	%o0, 0, %canrestore
2350	wrpr	%g0, 0, %otherwin
2351	wrpr	%o0, 0, %cleanwin
2352
2353	/*
2354	 * If this instruction causes a fill trap which fails to fill a window
2355	 * from the user stack, we will resume at tl0_ret_fill_end and call
2356	 * back into the kernel.
2357	 */
2358	restore
2359tl0_ret_fill:
2360
2361	rdpr	%cwp, %g4
2362	wrpr	%g1, %g4, %tstate
2363	wr	%g2, 0, %fprs
2364	wrpr	%g3, 0, %wstate
2365
2366#if KTR_COMPILE & KTR_TRAP
2367	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2368	    , %g2, %g3, %g4, 7, 8, 9)
2369	ldx	[PCPU(CURTHREAD)], %g3
2370	stx	%g3, [%g2 + KTR_PARM1]
2371	rdpr	%pil, %g3
2372	stx	%g3, [%g2 + KTR_PARM2]
2373	rdpr	%tpc, %g3
2374	stx	%g3, [%g2 + KTR_PARM3]
2375	rdpr	%tnpc, %g3
2376	stx	%g3, [%g2 + KTR_PARM4]
2377	stx	%sp, [%g2 + KTR_PARM5]
23789:
2379#endif
2380
2381	retry
2382tl0_ret_fill_end:
2383
2384#if KTR_COMPILE & KTR_TRAP
2385	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2386	    , %l0, %l1, %l2, 7, 8, 9)
2387	rdpr	%pstate, %l1
2388	stx	%l1, [%l0 + KTR_PARM1]
2389	stx	%l5, [%l0 + KTR_PARM2]
2390	stx	%sp, [%l0 + KTR_PARM3]
23919:
2392#endif
2393
2394	/*
2395	 * The fill failed and magic has been performed.  Call trap again,
2396	 * which will copyin the window on the user's behalf.
2397	 */
2398	wrpr	%l5, 0, %wstate
2399	wrpr	%g0, PSTATE_ALT, %pstate
2400	mov	PCB_REG, %o0
2401	mov	PCPU_REG, %o1
2402	wrpr	%g0, PSTATE_NORMAL, %pstate
2403	mov	%o0, PCB_REG
2404	mov	%o1, PCPU_REG
2405	wrpr	%g0, PSTATE_KERNEL, %pstate
2406	b	%xcc, .Ltl0_trap_reenter
2407	 mov	T_FILL_RET, %o0
2408END(tl0_ret)
2409
2410/*
2411 * Kernel trap entry point
2412 *
2413 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2414 *		 u_int sfsr)
2415 *
2416 * This is easy because the stack is already setup and the windows don't need
2417 * to be split.  We build a trapframe and call trap(), the same as above, but
2418 * the outs don't need to be saved.
2419 */
2420ENTRY(tl1_trap)
2421	sub	%sp, TF_SIZEOF, %sp
2422
2423	rdpr	%tstate, %l0
2424	rdpr	%tpc, %l1
2425	rdpr	%tnpc, %l2
2426	rdpr	%pil, %l3
2427	rd	%y, %l4
2428	rdpr	%wstate, %l5
2429
2430#if KTR_COMPILE & KTR_TRAP
2431	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2432	    , %g1, %g2, %g3, 7, 8, 9)
2433	ldx	[PCPU(CURTHREAD)], %g2
2434	stx	%g2, [%g1 + KTR_PARM1]
2435	andn	%o0, T_KERNEL, %g2
2436	stx	%g2, [%g1 + KTR_PARM2]
2437	stx	%l3, [%g1 + KTR_PARM3]
2438	stx	%l1, [%g1 + KTR_PARM4]
2439	stx	%i6, [%g1 + KTR_PARM5]
24409:
2441#endif
2442
2443	wrpr	%g0, 1, %tl
2444
2445	and	%l5, WSTATE_OTHER_MASK, %l5
2446	wrpr	%l5, WSTATE_KERNEL, %wstate
2447
2448	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2449	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2450	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2451	stb	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2452	stw	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2453
2454	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2455	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2456	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2457	stw	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2458
2459	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2460	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2461
2462	mov	PCB_REG, %l4
2463	mov	PCPU_REG, %l5
2464	wrpr	%g0, PSTATE_NORMAL, %pstate
2465
2466	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2467	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2468	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2469	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2470	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2471
2472	mov	%l4, PCB_REG
2473	mov	%l5, PCPU_REG
2474	wrpr	%g0, PSTATE_KERNEL, %pstate
2475
2476	call	trap
2477	 add	%sp, CCFSZ + SPOFF, %o0
2478
2479	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2480	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2481	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2482	ldub	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2483	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2484
2485	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2486	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2487	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2488	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2489	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2490
2491	wrpr	%g0, PSTATE_ALT, %pstate
2492
2493	andn	%l0, TSTATE_CWP_MASK, %g1
2494	mov	%l1, %g2
2495	mov	%l2, %g3
2496
2497	wrpr	%l3, 0, %pil
2498	wr	%l4, 0, %y
2499
2500	restore
2501
2502	wrpr	%g0, 2, %tl
2503
2504	rdpr	%cwp, %g4
2505	wrpr	%g1, %g4, %tstate
2506	wrpr	%g2, 0, %tpc
2507	wrpr	%g3, 0, %tnpc
2508
2509#if KTR_COMPILE & KTR_TRAP
2510	CATR(KTR_TRAP, "tl1_trap: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2511	    , %g2, %g3, %g4, 7, 8, 9)
2512	ldx	[PCPU(CURTHREAD)], %g3
2513	stx	%g3, [%g2 + KTR_PARM1]
2514	rdpr	%pil, %g3
2515	stx	%g3, [%g2 + KTR_PARM2]
2516	rdpr	%tstate, %g3
2517	stx	%g3, [%g2 + KTR_PARM3]
2518	rdpr	%tpc, %g3
2519	stx	%g3, [%g2 + KTR_PARM4]
2520	stx	%sp, [%g2 + KTR_PARM5]
25219:
2522#endif
2523
2524	retry
2525END(tl1_trap)
2526
2527/*
2528 * void tl1_intr(u_int level, u_int mask)
2529 */
2530ENTRY(tl1_intr)
2531	sub	%sp, TF_SIZEOF, %sp
2532
2533	rdpr	%tstate, %l0
2534	rdpr	%tpc, %l1
2535	rdpr	%tnpc, %l2
2536	rdpr	%pil, %l3
2537	rd	%y, %l4
2538	rdpr	%wstate, %l5
2539
2540#if KTR_COMPILE & KTR_INTR
2541	CATR(KTR_INTR,
2542	    "tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
2543	    , %g1, %g2, %g3, 7, 8, 9)
2544	ldx	[PCPU(CURTHREAD)], %g2
2545	stx	%g2, [%g1 + KTR_PARM1]
2546	stx	%o0, [%g1 + KTR_PARM2]
2547	stx	%l3, [%g1 + KTR_PARM3]
2548	stx	%l1, [%g1 + KTR_PARM4]
2549	stx	%i6, [%g1 + KTR_PARM5]
25509:
2551#endif
2552
2553	wrpr	%o0, 0, %pil
2554	wr	%o1, 0, %asr21
2555
2556	wrpr	%g0, 1, %tl
2557
2558	and	%l5, WSTATE_OTHER_MASK, %l5
2559	wrpr	%l5, WSTATE_KERNEL, %wstate
2560
2561	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2562	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2563	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2564	stb	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2565	stw	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2566
2567	mov	%o0, %l7
2568	mov	T_INTERRUPT | T_KERNEL, %o1
2569
2570	stw	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2571	stw	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2572
2573	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2574	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2575
2576	mov	PCB_REG, %l4
2577	mov	PCPU_REG, %l5
2578	wrpr	%g0, PSTATE_NORMAL, %pstate
2579
2580	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2581	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2582	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2583	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2584	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2585
2586	mov	%l4, PCB_REG
2587	mov	%l5, PCPU_REG
2588	wrpr	%g0, PSTATE_KERNEL, %pstate
2589
2590	call	critical_enter
2591	 nop
2592
2593	SET(cnt+V_INTR, %l5, %l4)
2594	ATOMIC_INC_INT(%l4, %l5, %l6)
2595
2596	SET(intr_handlers, %l5, %l4)
2597	sllx	%l7, IH_SHIFT, %l5
2598	ldx	[%l4 + %l5], %l5
2599	KASSERT(%l5, "tl1_intr: ih null")
2600	call	%l5
2601	 add	%sp, CCFSZ + SPOFF, %o0
2602
2603	call	critical_exit
2604	 nop
2605
2606	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2607
2608	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2609	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2610	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2611	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2612	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2613
2614	wrpr	%g0, PSTATE_ALT, %pstate
2615
2616	andn	%l0, TSTATE_CWP_MASK, %g1
2617	mov	%l1, %g2
2618	mov	%l2, %g3
2619	wrpr	%l3, 0, %pil
2620	wr	%l4, 0, %y
2621
2622	restore
2623
2624	wrpr	%g0, 2, %tl
2625
2626	rdpr	%cwp, %g4
2627	wrpr	%g1, %g4, %tstate
2628	wrpr	%g2, 0, %tpc
2629	wrpr	%g3, 0, %tnpc
2630
2631#if KTR_COMPILE & KTR_INTR
2632	CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2633	    , %g2, %g3, %g4, 7, 8, 9)
2634	ldx	[PCPU(CURTHREAD)], %g3
2635	stx	%g3, [%g2 + KTR_PARM1]
2636	rdpr	%pil, %g3
2637	stx	%g3, [%g2 + KTR_PARM2]
2638	rdpr	%tstate, %g3
2639	stx	%g3, [%g2 + KTR_PARM3]
2640	rdpr	%tpc, %g3
2641	stx	%g3, [%g2 + KTR_PARM4]
2642	stx	%sp, [%g2 + KTR_PARM5]
26439:
2644#endif
2645
2646	retry
2647END(tl1_intr)
2648
2649/*
2650 * Freshly forked processes come here when switched to for the first time.
2651 * The arguments to fork_exit() have been setup in the locals, we must move
2652 * them to the outs.
2653 */
2654ENTRY(fork_trampoline)
2655#if KTR_COMPILE & KTR_PROC
2656	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
2657	    , %g1, %g2, %g3, 7, 8, 9)
2658	ldx	[PCPU(CURTHREAD)], %g2
2659	stx	%g2, [%g1 + KTR_PARM1]
2660	ldx	[%g2 + TD_PROC], %g2
2661	add	%g2, P_COMM, %g2
2662	stx	%g2, [%g1 + KTR_PARM2]
2663	rdpr	%cwp, %g2
2664	stx	%g2, [%g1 + KTR_PARM3]
26659:
2666#endif
2667	mov	%l0, %o0
2668	mov	%l1, %o1
2669	call	fork_exit
2670	 mov	%l2, %o2
2671	b,a	%xcc, tl0_ret
2672	 nop
2673END(fork_trampoline)
2674