exception.S revision 103897
180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
2881180Sjake *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake *
5580709Sjake * $FreeBSD: head/sys/sparc64/sparc64/exception.S 103897 2002-09-24 16:42:18Z jake $
5680709Sjake */
5780709Sjake
5880709Sjake#include "opt_ddb.h"
5980709Sjake
6080709Sjake#include <machine/asi.h>
6180709Sjake#include <machine/asmacros.h>
6282906Sjake#include <machine/ktr.h>
6382906Sjake#include <machine/pstate.h>
6480709Sjake#include <machine/trap.h>
6582906Sjake#include <machine/tstate.h>
6682906Sjake#include <machine/wstate.h>
6780709Sjake
6880709Sjake#include "assym.s"
6980709Sjake
70101653Sjake#define	TSB_KERNEL_MASK	0x0
71101653Sjake#define	TSB_KERNEL	0x0
72101653Sjake
7388644Sjake	.register %g2,#ignore
7488644Sjake	.register %g3,#ignore
7588644Sjake	.register %g6,#ignore
7688644Sjake	.register %g7,#ignore
7788644Sjake
7882005Sjake/*
7988644Sjake * Atomically set the reference bit in a tte.
8088644Sjake */
8188644Sjake#define	TTE_SET_BIT(r1, r2, r3, bit) \
8288644Sjake	add	r1, TTE_DATA, r1 ; \
8388644Sjake	ldx	[r1], r2 ; \
8488644Sjake9:	or	r2, bit, r3 ; \
8588644Sjake	casxa	[r1] ASI_N, r2, r3 ; \
8688644Sjake	cmp	r2, r3 ; \
8788644Sjake	bne,pn	%xcc, 9b ; \
8888644Sjake	 mov	r3, r2
8988644Sjake
9088644Sjake#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
9188644Sjake#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
9288644Sjake
9388644Sjake/*
9482906Sjake * Macros for spilling and filling live windows.
9582906Sjake *
9682906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
9782906Sjake * handler will not use more than 24 instructions total, to leave room for
9882906Sjake * resume vectors which occupy the last 8 instructions.
9982005Sjake */
10080709Sjake
10182906Sjake#define	SPILL(storer, base, size, asi) \
10282906Sjake	storer	%l0, [base + (0 * size)] asi ; \
10382906Sjake	storer	%l1, [base + (1 * size)] asi ; \
10482906Sjake	storer	%l2, [base + (2 * size)] asi ; \
10582906Sjake	storer	%l3, [base + (3 * size)] asi ; \
10682906Sjake	storer	%l4, [base + (4 * size)] asi ; \
10782906Sjake	storer	%l5, [base + (5 * size)] asi ; \
10882906Sjake	storer	%l6, [base + (6 * size)] asi ; \
10982906Sjake	storer	%l7, [base + (7 * size)] asi ; \
11082906Sjake	storer	%i0, [base + (8 * size)] asi ; \
11182906Sjake	storer	%i1, [base + (9 * size)] asi ; \
11282906Sjake	storer	%i2, [base + (10 * size)] asi ; \
11382906Sjake	storer	%i3, [base + (11 * size)] asi ; \
11482906Sjake	storer	%i4, [base + (12 * size)] asi ; \
11582906Sjake	storer	%i5, [base + (13 * size)] asi ; \
11682906Sjake	storer	%i6, [base + (14 * size)] asi ; \
11782906Sjake	storer	%i7, [base + (15 * size)] asi
11880709Sjake
11982906Sjake#define	FILL(loader, base, size, asi) \
12082906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
12182906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
12282906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
12382906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
12482906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
12582906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
12682906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
12782906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
12882906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
12982906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
13082906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
13182906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
13282906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
13382906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
13482906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
13582906Sjake	loader	[base + (15 * size)] asi, %i7
13682005Sjake
13782906Sjake#define	ERRATUM50(reg)	mov reg, reg
13882906Sjake
13988781Sjake#define	KSTACK_SLOP	1024
14088781Sjake
14189048Sjake/*
14289048Sjake * Sanity check the kernel stack and bail out if its wrong.
14389048Sjake * XXX: doesn't handle being on the panic stack.
14489048Sjake */
14588781Sjake#define	KSTACK_CHECK \
14688781Sjake	dec	16, ASP_REG ; \
14788781Sjake	stx	%g1, [ASP_REG + 0] ; \
14888781Sjake	stx	%g2, [ASP_REG + 8] ; \
14988781Sjake	add	%sp, SPOFF, %g1 ; \
15088781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
15188781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
15288781Sjake	 inc	16, ASP_REG ; \
15388781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
15488781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
15588781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
15688781Sjake	subcc	%g1, %g2, %g1 ; \
15788781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
15888781Sjake	 inc	16, ASP_REG ; \
15988781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
16088781Sjake	cmp	%g1, %g2 ; \
16188781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
16288781Sjake	 inc	16, ASP_REG ; \
16388781Sjake	ldx	[ASP_REG + 8], %g2 ; \
16488781Sjake	ldx	[ASP_REG + 0], %g1 ; \
16588781Sjake	inc	16, ASP_REG
16688781Sjake
16788781SjakeENTRY(tl1_kstack_fault)
16888781Sjake	rdpr	%tl, %g1
16997263Sjake1:	cmp	%g1, 2
17097263Sjake	be,a	2f
17188781Sjake	 nop
17288781Sjake
17388781Sjake#if KTR_COMPILE & KTR_TRAP
17488781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
17597263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
17697263Sjake	rdpr	%tl, %g3
17797263Sjake	stx	%g3, [%g2 + KTR_PARM1]
17897263Sjake	rdpr	%tpc, %g3
17997263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18097263Sjake	rdpr	%tnpc, %g3
18197263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18288781Sjake9:
18388781Sjake#endif
18488781Sjake
18597263Sjake	sub	%g1, 1, %g1
18697263Sjake	wrpr	%g1, 0, %tl
18797263Sjake	ba,a	%xcc, 1b
18897263Sjake	 nop
18997263Sjake
19088781Sjake2:
19188781Sjake#if KTR_COMPILE & KTR_TRAP
19288781Sjake	CATR(KTR_TRAP,
19388781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
19488781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
19588781Sjake	add	%sp, SPOFF, %g2
19688781Sjake	stx	%g2, [%g1 + KTR_PARM1]
19788781Sjake	ldx	[PCPU(CURTHREAD)], %g2
19888781Sjake	ldx	[%g2 + TD_KSTACK], %g2
19988781Sjake	stx	%g2, [%g1 + KTR_PARM2]
20088781Sjake	rdpr	%canrestore, %g2
20188781Sjake	stx	%g2, [%g1 + KTR_PARM3]
20288781Sjake	rdpr	%cansave, %g2
20388781Sjake	stx	%g2, [%g1 + KTR_PARM4]
20488781Sjake	rdpr	%otherwin, %g2
20588781Sjake	stx	%g2, [%g1 + KTR_PARM5]
20688781Sjake	rdpr	%wstate, %g2
20788781Sjake	stx	%g2, [%g1 + KTR_PARM6]
20888781Sjake9:
20988781Sjake#endif
21088781Sjake
21188781Sjake	wrpr	%g0, 0, %canrestore
21288781Sjake	wrpr	%g0, 6, %cansave
21388781Sjake	wrpr	%g0, 0, %otherwin
21488781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
21588781Sjake
21689048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
21788781Sjake	clr	%fp
21888781Sjake
21988781Sjake	b	%xcc, tl1_trap
22088781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
22188781SjakeEND(tl1_kstack_fault)
22288781Sjake
22382906Sjake/*
22482906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
22582906Sjake * mmu fault during a spill or a fill, this macro will detect the fault and
22688644Sjake * resume at a set instruction offset in the trap handler.
22782906Sjake *
22888644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
22988644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
23082906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
23182906Sjake * tl bit allows us to detect both ranges with one test.
23282906Sjake *
23382906Sjake * This is:
23488644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
23582906Sjake *
23682906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
23782906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
23882906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
23982906Sjake *
24082906Sjake *	0x7f ^ 0x1f == 0x60
24182906Sjake *	0x1f == (0x80 - 0x60) - 1
24282906Sjake *
24386519Sjake * Which are the offset and xor value used to resume from alignment faults.
24482906Sjake */
24582906Sjake
24682906Sjake/*
24788644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
24888644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
24988644Sjake * alternate globals.
25082906Sjake */
25188644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
25288644Sjake	dec	16, ASP_REG ; \
25388644Sjake	stx	%g1, [ASP_REG + 0] ; \
25488644Sjake	stx	%g2, [ASP_REG + 8] ; \
25588644Sjake	rdpr	%tpc, %g1 ; \
25688644Sjake	ERRATUM50(%g1) ; \
25788644Sjake	rdpr	%tba, %g2 ; \
25888644Sjake	sub	%g1, %g2, %g2 ; \
25988644Sjake	srlx	%g2, 5, %g2 ; \
26088644Sjake	andn	%g2, 0x200, %g2 ; \
26188644Sjake	cmp	%g2, 0x80 ; \
26288644Sjake	blu,pt	%xcc, 9f ; \
26388644Sjake	 cmp	%g2, 0x100 ; \
26488644Sjake	bgeu,pt	%xcc, 9f ; \
26588644Sjake	 or	%g1, 0x7f, %g1 ; \
26688644Sjake	wrpr	%g1, xor, %tnpc ; \
26788644Sjake	stxa_g0_sfsr ; \
26888644Sjake	ldx	[ASP_REG + 8], %g2 ; \
26988644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27088644Sjake	inc	16, ASP_REG ; \
27188644Sjake	done ; \
27288644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
27388644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27488644Sjake	inc	16, ASP_REG
27582906Sjake
27688644Sjake/*
27788644Sjake * For certain faults we need to clear the sfsr mmu register before returning.
27888644Sjake */
27988644Sjake#define	RSF_CLR_SFSR \
28088644Sjake	wr	%g0, ASI_DMMU, %asi ; \
28188644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
28288644Sjake
28382906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
28482906Sjake
28582906Sjake/*
28682906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
28782906Sjake * nested traps, and corresponding xor constants for wrpr.
28882906Sjake */
28986519Sjake#define	RSF_OFF_ALIGN	0x60
29086519Sjake#define	RSF_OFF_MMU	0x70
29182906Sjake
29288644Sjake#define	RESUME_SPILLFILL_ALIGN \
29388644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
29488644Sjake#define	RESUME_SPILLFILL_MMU \
29588644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
29688644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
29788644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
29882906Sjake
29982906Sjake/*
30082906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
30188644Sjake * user mode.
30282906Sjake */
30382906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
30482906Sjake
30582906Sjake/*
30682906Sjake * Retry a spill or fill with a different wstate due to an alignment fault.
30782906Sjake * We may just be using the wrong stack offset.
30882906Sjake */
30982906Sjake#define	RSF_ALIGN_RETRY(ws) \
31082906Sjake	wrpr	%g0, (ws), %wstate ; \
31182906Sjake	retry ; \
31282906Sjake	.align	16
31382906Sjake
31482906Sjake/*
31582906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
31682906Sjake */
31782906Sjake#define	RSF_TRAP(type) \
31882906Sjake	b	%xcc, tl0_sftrap ; \
31982906Sjake	 mov	type, %g2 ; \
32082906Sjake	.align	16
32182906Sjake
32282906Sjake/*
32382906Sjake * Game over if the window operation fails.
32482906Sjake */
32582906Sjake#define	RSF_FATAL(type) \
32688781Sjake	b	%xcc, rsf_fatal ; \
32788781Sjake	 mov	type, %g2 ; \
32882906Sjake	.align	16
32982906Sjake
33082906Sjake/*
33182906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
33282906Sjake * restore.  This is used on return from the kernel to usermode.
33382906Sjake */
33482906Sjake#define	RSF_FILL_MAGIC \
33582906Sjake	rdpr	%tnpc, %g1 ; \
33682906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
33782906Sjake	wrpr	%g1, 0, %tnpc ; \
33882906Sjake	done ; \
33982906Sjake	.align	16
34082906Sjake
34182906Sjake/*
34282906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
34382906Sjake */
34482906Sjake#define	RSF_SPILL_TOPCB \
34582906Sjake	b,a	%xcc, tl1_spill_topcb ; \
34682906Sjake	 nop ; \
34782906Sjake	.align	16
34882906Sjake
34988781SjakeENTRY(rsf_fatal)
35088781Sjake#if KTR_COMPILE & KTR_TRAP
35188781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
35288781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
35388781Sjake	rdpr	%tt, %g3
35488781Sjake	stx	%g3, [%g1 + KTR_PARM1]
35588781Sjake	stx	%g2, [%g1 + KTR_PARM2]
35688781Sjake9:
35788781Sjake#endif
35888781Sjake
35988781Sjake	KSTACK_CHECK
36088781Sjake
36188781Sjake	sir
36288781SjakeEND(rsf_fatal)
36388781Sjake
36497265Sjake	.comm	intrnames, IV_MAX * 8
36585243Sjake	.comm	eintrnames, 0
36680709Sjake
36797265Sjake	.comm	intrcnt, IV_MAX * 8
36885243Sjake	.comm	eintrcnt, 0
36980709Sjake
37082906Sjake/*
37182906Sjake * Trap table and associated macros
37282906Sjake *
37382906Sjake * Due to its size a trap table is an inherently hard thing to represent in
37482906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
37582906Sjake * instructions each, many of which are identical.  The way that this is
37682906Sjake * layed out is the instructions (8 or 32) for the actual trap vector appear
37782906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
37882906Sjake * but if not supporting code can be placed just after the definition of the
37982906Sjake * macro.  The macros are then instantiated in a different section (.trap),
38082906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
38182906Sjake * code around the macros is moved to the end of trap table.  In this way the
38282906Sjake * code that must be sequential in memory can be split up, and located near
38382906Sjake * its supporting code so that it is easier to follow.
38482906Sjake */
38582906Sjake
38682906Sjake	/*
38782906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
38882906Sjake	 * is not leaked between address spaces in registers.
38982906Sjake	 */
39080709Sjake	.macro	clean_window
39180709Sjake	clr	%o0
39280709Sjake	clr	%o1
39380709Sjake	clr	%o2
39480709Sjake	clr	%o3
39580709Sjake	clr	%o4
39680709Sjake	clr	%o5
39780709Sjake	clr	%o6
39880709Sjake	clr	%o7
39980709Sjake	clr	%l0
40080709Sjake	clr	%l1
40180709Sjake	clr	%l2
40280709Sjake	clr	%l3
40380709Sjake	clr	%l4
40480709Sjake	clr	%l5
40580709Sjake	clr	%l6
40680709Sjake	rdpr	%cleanwin, %l7
40780709Sjake	inc	%l7
40880709Sjake	wrpr	%l7, 0, %cleanwin
40980709Sjake	clr	%l7
41080709Sjake	retry
41180709Sjake	.align	128
41280709Sjake	.endm
41380709Sjake
41481380Sjake	/*
41582906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
41682906Sjake	 * user stack, and with its live registers, so we must save soon.  We
41782906Sjake	 * are on alternate globals so we do have some registers.  Set the
41888644Sjake	 * transitional window state, and do the save.  If this traps we
41988644Sjake	 * we attempt to spill a window to the user stack.  If this fails,
42088644Sjake	 * we spill the window to the pcb and continue.  Spilling to the pcb
42188644Sjake	 * must not fail.
42282906Sjake	 *
42382906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
42481380Sjake	 */
42582906Sjake
42688644Sjake	.macro	tl0_split
42782906Sjake	rdpr	%wstate, %g1
42882906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
42981380Sjake	save
43081380Sjake	.endm
43181380Sjake
43282906Sjake	.macro	tl0_setup	type
43388644Sjake	tl0_split
434103897Sjake	ba	%xcc, tl0_utrap
43582906Sjake	 mov	\type, %o0
43681380Sjake	.endm
43781380Sjake
43881380Sjake	/*
43982906Sjake	 * Generic trap type.  Call trap() with the specified type.
44081380Sjake	 */
44180709Sjake	.macro	tl0_gen		type
44282906Sjake	tl0_setup \type
44380709Sjake	.align	32
44480709Sjake	.endm
44580709Sjake
44682906Sjake	/*
44782906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
44882906Sjake	 * Generates count "reserved" trap vectors.
44982906Sjake	 */
45080709Sjake	.macro	tl0_reserved	count
45180709Sjake	.rept	\count
45280709Sjake	tl0_gen	T_RESERVED
45380709Sjake	.endr
45480709Sjake	.endm
45580709Sjake
45688780Sjake	.macro	tl0_fp_restore
45788780Sjake	wr	%g0, FPRS_FEF, %fprs
45888780Sjake	wr	%g0, ASI_BLK_S, %asi
45988780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB0] %asi, %f0
46088780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB1] %asi, %f16
46188780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB2] %asi, %f32
46288780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB3] %asi, %f48
46388780Sjake	membar	#Sync
46488780Sjake	done
46588780Sjake	.align	32
46688780Sjake	.endm
46788780Sjake
46888644Sjake	.macro	tl0_insn_excptn
469101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
47088644Sjake	wr	%g0, ASI_IMMU, %asi
47188644Sjake	rdpr	%tpc, %g3
47288644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
47388644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
47488644Sjake	membar	#Sync
47588644Sjake	b	%xcc, tl0_sfsr_trap
47688644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
47788644Sjake	.align	32
47888644Sjake	.endm
47988644Sjake
48082906Sjake	.macro	tl0_data_excptn
481101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
48282906Sjake	wr	%g0, ASI_DMMU, %asi
48382906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
48482906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
48588644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
48688644Sjake	membar	#Sync
48782906Sjake	b	%xcc, tl0_sfsr_trap
48888644Sjake	 mov	T_DATA_EXCEPTION, %g2
48982906Sjake	.align	32
49082906Sjake	.endm
49182906Sjake
49282005Sjake	.macro	tl0_align
49382906Sjake	wr	%g0, ASI_DMMU, %asi
49482906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
49582906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
49688644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
49788644Sjake	membar	#Sync
49882005Sjake	b	%xcc, tl0_sfsr_trap
49988644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
50082005Sjake	.align	32
50182005Sjake	.endm
50282005Sjake
50382005SjakeENTRY(tl0_sfsr_trap)
50488644Sjake	tl0_split
50588644Sjake	mov	%g3, %o4
50688644Sjake	mov	%g4, %o5
507103897Sjake	ba	%xcc, tl0_utrap
50882906Sjake	 mov	%g2, %o0
50982005SjakeEND(tl0_sfsr_trap)
51082005Sjake
51182906Sjake	.macro	tl0_intr level, mask
51288644Sjake	tl0_split
51391246Sjake	set	\mask, %o1
51484186Sjake	b	%xcc, tl0_intr
51591246Sjake	 mov	\level, %o0
51681380Sjake	.align	32
51781380Sjake	.endm
51881380Sjake
51981380Sjake#define	INTR(level, traplvl)						\
52082906Sjake	tl ## traplvl ## _intr	level, 1 << level
52181380Sjake
52281380Sjake#define	TICK(traplvl) \
52382906Sjake	tl ## traplvl ## _intr	PIL_TICK, 1
52481380Sjake
52581380Sjake#define	INTR_LEVEL(tl)							\
52681380Sjake	INTR(1, tl) ;							\
52781380Sjake	INTR(2, tl) ;							\
52881380Sjake	INTR(3, tl) ;							\
52981380Sjake	INTR(4, tl) ;							\
53081380Sjake	INTR(5, tl) ;							\
53181380Sjake	INTR(6, tl) ;							\
53281380Sjake	INTR(7, tl) ;							\
53381380Sjake	INTR(8, tl) ;							\
53481380Sjake	INTR(9, tl) ;							\
53581380Sjake	INTR(10, tl) ;							\
53681380Sjake	INTR(11, tl) ;							\
53781380Sjake	INTR(12, tl) ;							\
53881380Sjake	INTR(13, tl) ;							\
53981380Sjake	TICK(tl) ;							\
54081380Sjake	INTR(15, tl) ;
54181380Sjake
54280709Sjake	.macro	tl0_intr_level
54381380Sjake	INTR_LEVEL(0)
54480709Sjake	.endm
54580709Sjake
54697265Sjake	.macro	intr_vector
54797265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
54897265Sjake	andcc	%g1, IRSR_BUSY, %g0
54997265Sjake	bnz,a,pt %xcc, intr_enqueue
55097265Sjake	 nop
55197265Sjake	sir
55281380Sjake	.align	32
55380709Sjake	.endm
55480709Sjake
55596207Sjake	.macro	immu_miss_user
55681380Sjake	/*
557102040Sjake	 * Initialize the page size walker.
558102040Sjake	 */
559102040Sjake	mov	TS_MIN, %g2
560102040Sjake
561102040Sjake	/*
562102040Sjake	 * Loop over all supported page sizes.
563102040Sjake	 */
564102040Sjake
565102040Sjake	/*
566102040Sjake	 * Compute the page shift for the page size we are currently looking
567102040Sjake	 * for.
568102040Sjake	 */
569102040Sjake1:	add	%g2, %g2, %g3
570102040Sjake	add	%g3, %g2, %g3
571102040Sjake	add	%g3, PAGE_SHIFT, %g3
572102040Sjake
573102040Sjake	/*
57491224Sjake	 * Extract the virtual page number from the contents of the tag
57591224Sjake	 * access register.
57681380Sjake	 */
577102040Sjake	srlx	%g1, %g3, %g3
57881380Sjake
57981380Sjake	/*
58091224Sjake	 * Compute the tte bucket address.
58181380Sjake	 */
582102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
583102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
584102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
585102040Sjake	add	%g4, %g5, %g4
58681380Sjake
58781380Sjake	/*
588102040Sjake	 * Compute the tte tag target.
58981380Sjake	 */
590102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
591102040Sjake	or	%g3, %g2, %g3
59281380Sjake
59381380Sjake	/*
594102040Sjake	 * Loop over the ttes in this bucket
59581380Sjake	 */
59681380Sjake
59781380Sjake	/*
598102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
599102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
600102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
601102040Sjake	 * completes successfully.
60281380Sjake	 */
603102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
60481380Sjake
60581380Sjake	/*
606102040Sjake	 * Check that its valid and executable and that the tte tags match.
60781380Sjake	 */
608102040Sjake	brgez,pn %g7, 3f
609102040Sjake	 andcc	%g7, TD_EXEC, %g0
610102040Sjake	bz,pn	%xcc, 3f
611102040Sjake	 cmp	%g3, %g6
612102040Sjake	bne,pn	%xcc, 3f
61388644Sjake	 EMPTY
61481380Sjake
61581380Sjake	/*
61681380Sjake	 * We matched a tte, load the tlb.
61781380Sjake	 */
61881380Sjake
61981380Sjake	/*
62081380Sjake	 * Set the reference bit, if it's currently clear.
62181380Sjake	 */
622102040Sjake	 andcc	%g7, TD_REF, %g0
62382906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
62481380Sjake	 nop
62581380Sjake
62681380Sjake	/*
62791224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
62881380Sjake	 */
629102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
630102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
63181380Sjake	retry
63281380Sjake
63381380Sjake	/*
634102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
635102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
63681380Sjake	 */
637102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
638102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
639102040Sjake	bnz,pt	%xcc, 2b
640102040Sjake	 EMPTY
64191224Sjake
64291224Sjake	/*
643102040Sjake	 * See if we just checked the largest page size, and advance to the
644102040Sjake	 * next one if not.
64591224Sjake	 */
646102040Sjake	 cmp	%g2, TS_MAX
647102040Sjake	bne,pt	%xcc, 1b
648102040Sjake	 add	%g2, 1, %g2
649102040Sjake	.endm
65091224Sjake
651102040Sjake	.macro	tl0_immu_miss
65296207Sjake	/*
65396207Sjake	 * Load the virtual page number and context from the tag access
65496207Sjake	 * register.  We ignore the context.
65596207Sjake	 */
65696207Sjake	wr	%g0, ASI_IMMU, %asi
657102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
65896207Sjake
659102040Sjake	/*
660102040Sjake	 * Try a fast inline lookup of the user tsb.
661102040Sjake	 */
66296207Sjake	immu_miss_user
66396207Sjake
664102040Sjake	/*
665102040Sjake	 * Not in user tsb, call c code.
666102040Sjake	 */
667102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
66881380Sjake	.align	128
66980709Sjake	.endm
67080709Sjake
67182906SjakeENTRY(tl0_immu_miss_set_ref)
67281380Sjake	/*
67381380Sjake	 * Set the reference bit.
67481380Sjake	 */
675102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
67681380Sjake
67781380Sjake	/*
678102040Sjake	 * May have become invalid during casxa, in which case start over.
67981380Sjake	 */
680102040Sjake	brgez,pn %g2, 1f
681102040Sjake	 nop
68281380Sjake
68381380Sjake	/*
68491224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
68581380Sjake	 */
686102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
687102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
68891224Sjake1:	retry
68982906SjakeEND(tl0_immu_miss_set_ref)
69081380Sjake
69182906SjakeENTRY(tl0_immu_miss_trap)
69281380Sjake	/*
69396207Sjake	 * Put back the contents of the tag access register, in case we
69496207Sjake	 * faulted.
69596207Sjake	 */
696102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
69796207Sjake	membar	#Sync
69896207Sjake
69996207Sjake	/*
70082906Sjake	 * Switch to alternate globals.
70182906Sjake	 */
70282906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
70382906Sjake
70482906Sjake	/*
70591224Sjake	 * Reload the tag access register.
70681380Sjake	 */
70791224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
70881380Sjake
70981380Sjake	/*
71091224Sjake	 * Save the tag access register, and call common trap code.
71181380Sjake	 */
71288644Sjake	tl0_split
71391224Sjake	mov	%g2, %o3
71482906Sjake	b	%xcc, tl0_trap
71588644Sjake	 mov	T_INSTRUCTION_MISS, %o0
71682906SjakeEND(tl0_immu_miss_trap)
71781380Sjake
71881180Sjake	.macro	dmmu_miss_user
71981180Sjake	/*
720102040Sjake	 * Initialize the page size walker.
721102040Sjake	 */
722102040Sjake	mov	TS_MIN, %g2
723102040Sjake
724102040Sjake	/*
725102040Sjake	 * Loop over all supported page sizes.
726102040Sjake	 */
727102040Sjake
728102040Sjake	/*
729102040Sjake	 * Compute the page shift for the page size we are currently looking
730102040Sjake	 * for.
731102040Sjake	 */
732102040Sjake1:	add	%g2, %g2, %g3
733102040Sjake	add	%g3, %g2, %g3
734102040Sjake	add	%g3, PAGE_SHIFT, %g3
735102040Sjake
736102040Sjake	/*
73791224Sjake	 * Extract the virtual page number from the contents of the tag
73891224Sjake	 * access register.
73991224Sjake	 */
740102040Sjake	srlx	%g1, %g3, %g3
74191224Sjake
74291224Sjake	/*
74388644Sjake	 * Compute the tte bucket address.
74481180Sjake	 */
745102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
746102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
747102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
748102040Sjake	add	%g4, %g5, %g4
74981180Sjake
75081180Sjake	/*
751102040Sjake	 * Compute the tte tag target.
75281180Sjake	 */
753102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
754102040Sjake	or	%g3, %g2, %g3
75581180Sjake
75681180Sjake	/*
757102040Sjake	 * Loop over the ttes in this bucket
75881180Sjake	 */
75981180Sjake
76081180Sjake	/*
761102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
762102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
763102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
764102040Sjake	 * completes successfully.
76581180Sjake	 */
766102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
76781180Sjake
76881180Sjake	/*
76991224Sjake	 * Check that its valid and that the virtual page numbers match.
77081180Sjake	 */
771102040Sjake	brgez,pn %g7, 3f
772102040Sjake	 cmp	%g3, %g6
773102040Sjake	bne,pn	%xcc, 3f
77488644Sjake	 EMPTY
77581180Sjake
77681180Sjake	/*
77781180Sjake	 * We matched a tte, load the tlb.
77881180Sjake	 */
77981180Sjake
78081180Sjake	/*
78181180Sjake	 * Set the reference bit, if it's currently clear.
78281180Sjake	 */
783102040Sjake	 andcc	%g7, TD_REF, %g0
78481180Sjake	bz,a,pn	%xcc, dmmu_miss_user_set_ref
78581180Sjake	 nop
78681180Sjake
78781180Sjake	/*
78891224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
78981180Sjake	 */
790102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
791102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
79281180Sjake	retry
79381180Sjake
79481180Sjake	/*
795102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
796102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
79781180Sjake	 */
798102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
799102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
800102040Sjake	bnz,pt	%xcc, 2b
801102040Sjake	 EMPTY
802102040Sjake
803102040Sjake	/*
804102040Sjake	 * See if we just checked the largest page size, and advance to the
805102040Sjake	 * next one if not.
806102040Sjake	 */
807102040Sjake	 cmp	%g2, TS_MAX
808102040Sjake	bne,pt	%xcc, 1b
809102040Sjake	 add	%g2, 1, %g2
81081180Sjake	.endm
81181180Sjake
81281180SjakeENTRY(dmmu_miss_user_set_ref)
81381180Sjake	/*
81481180Sjake	 * Set the reference bit.
81581180Sjake	 */
816102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
81781180Sjake
81881180Sjake	/*
819102040Sjake	 * May have become invalid during casxa, in which case start over.
82081180Sjake	 */
821102040Sjake	brgez,pn %g2, 1f
822102040Sjake	 nop
82381180Sjake
82481180Sjake	/*
82591224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
82681180Sjake	 */
827102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
828102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
82991224Sjake1:	retry
83081180SjakeEND(dmmu_miss_user_set_ref)
83181180Sjake
83280709Sjake	.macro	tl0_dmmu_miss
83381180Sjake	/*
83496207Sjake	 * Load the virtual page number and context from the tag access
83596207Sjake	 * register.  We ignore the context.
83696207Sjake	 */
83796207Sjake	wr	%g0, ASI_DMMU, %asi
838102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
83996207Sjake
84096207Sjake	/*
84181180Sjake	 * Try a fast inline lookup of the primary tsb.
84281180Sjake	 */
84381180Sjake	dmmu_miss_user
84481180Sjake
84581180Sjake	/*
846102040Sjake	 * Not in user tsb, call c code.
84781180Sjake	 */
848102040Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
84981180Sjake	.align	128
85080709Sjake	.endm
85180709Sjake
85281180SjakeENTRY(tl0_dmmu_miss_trap)
85382005Sjake	/*
85496207Sjake	 * Put back the contents of the tag access register, in case we
85596207Sjake	 * faulted.
85696207Sjake	 */
857102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
85896207Sjake	membar	#Sync
85996207Sjake
86096207Sjake	/*
86182906Sjake	 * Switch to alternate globals.
86282005Sjake	 */
86382906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
86482005Sjake
86582005Sjake	/*
86691224Sjake	 * Reload the tag access register.
86782005Sjake	 */
86891224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
86981180Sjake
87081180Sjake	/*
87191224Sjake	 * Save the tag access register and call common trap code.
87281180Sjake	 */
87388644Sjake	tl0_split
87491224Sjake	mov	%g2, %o3
87582906Sjake	b	%xcc, tl0_trap
87688644Sjake	 mov	T_DATA_MISS, %o0
87782906SjakeEND(tl0_dmmu_miss_trap)
87881180Sjake
87988644Sjake	.macro	dmmu_prot_user
88088644Sjake	/*
881102040Sjake	 * Initialize the page size walker.
882102040Sjake	 */
883102040Sjake	mov	TS_MIN, %g2
884102040Sjake
885102040Sjake	/*
886102040Sjake	 * Loop over all supported page sizes.
887102040Sjake	 */
888102040Sjake
889102040Sjake	/*
890102040Sjake	 * Compute the page shift for the page size we are currently looking
891102040Sjake	 * for.
892102040Sjake	 */
893102040Sjake1:	add	%g2, %g2, %g3
894102040Sjake	add	%g3, %g2, %g3
895102040Sjake	add	%g3, PAGE_SHIFT, %g3
896102040Sjake
897102040Sjake	/*
89891224Sjake	 * Extract the virtual page number from the contents of the tag
89991224Sjake	 * access register.
90091224Sjake	 */
901102040Sjake	srlx	%g1, %g3, %g3
90291224Sjake
90391224Sjake	/*
90488644Sjake	 * Compute the tte bucket address.
90588644Sjake	 */
906102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
907102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
908102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
909102040Sjake	add	%g4, %g5, %g4
91088644Sjake
91188644Sjake	/*
912102040Sjake	 * Compute the tte tag target.
91388644Sjake	 */
914102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
915102040Sjake	or	%g3, %g2, %g3
91688644Sjake
91788644Sjake	/*
918102040Sjake	 * Loop over the ttes in this bucket
91988644Sjake	 */
92088644Sjake
92188644Sjake	/*
922102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
923102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
924102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
925102040Sjake	 * completes successfully.
92688644Sjake	 */
927102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
92888644Sjake
92988644Sjake	/*
93091224Sjake	 * Check that its valid and writable and that the virtual page
93191224Sjake	 * numbers match.
93288644Sjake	 */
933102040Sjake	brgez,pn %g7, 4f
934102040Sjake	 andcc	%g7, TD_SW, %g0
935102040Sjake	bz,pn	%xcc, 4f
936102040Sjake	 cmp	%g3, %g6
937102040Sjake	bne,pn	%xcc, 4f
93888644Sjake	 nop
93988644Sjake
94091224Sjake	/*
94191224Sjake	 * Set the hardware write bit.
94291224Sjake	 */
943102040Sjake	TTE_SET_W(%g4, %g2, %g3)
94488644Sjake
94588644Sjake	/*
946102040Sjake	 * Delete the old TLB entry and clear the sfsr.
94788644Sjake	 */
948102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
949102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
950102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
951102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
952102040Sjake	membar	#Sync
95388644Sjake
95481180Sjake	/*
955102040Sjake	 * May have become invalid during casxa, in which case start over.
95688644Sjake	 */
957102040Sjake	brgez,pn %g2, 3f
958102040Sjake	 or	%g2, TD_W, %g2
95988644Sjake
96088644Sjake	/*
961102040Sjake	 * Load the tte data into the tlb and retry the instruction.
96296207Sjake	 */
963102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
964102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
965102040Sjake3:	retry
96696207Sjake
96796207Sjake	/*
968102040Sjake	 * Check the low bits to see if we've finished the bucket.
96988644Sjake	 */
970102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
971102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
972102040Sjake	bnz,pt	%xcc, 2b
973102040Sjake	 EMPTY
97488644Sjake
97588644Sjake	/*
976102040Sjake	 * See if we just checked the largest page size, and advance to the
977102040Sjake	 * next one if not.
97888644Sjake	 */
979102040Sjake	 cmp	%g2, TS_MAX
980102040Sjake	bne,pt	%xcc, 1b
981102040Sjake	 add	%g2, 1, %g2
982102040Sjake	.endm
983102040Sjake
984102040Sjake	.macro	tl0_dmmu_prot
985102040Sjake	ba,a	%xcc, tl0_dmmu_prot_1
98688644Sjake	 nop
98788644Sjake	.align	128
98888644Sjake	.endm
98988644Sjake
990102040SjakeENTRY(tl0_dmmu_prot_1)
99188644Sjake	/*
992102040Sjake	 * Load the virtual page number and context from the tag access
993102040Sjake	 * register.  We ignore the context.
99488644Sjake	 */
995102040Sjake	wr	%g0, ASI_DMMU, %asi
996102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
99788644Sjake
99888644Sjake	/*
999102040Sjake	 * Try a fast inline lookup of the tsb.
100088644Sjake	 */
1001102040Sjake	dmmu_prot_user
100288644Sjake
100388644Sjake	/*
1004102040Sjake	 * Not in user tsb, call c code.
100591224Sjake	 */
1006102040Sjake	b,a	%xcc, tl0_dmmu_prot_trap
1007102040Sjake	 nop
1008102040SjakeEND(tl0_dmmu_prot_1)
100991224Sjake
101088644SjakeENTRY(tl0_dmmu_prot_trap)
101188644Sjake	/*
101296207Sjake	 * Put back the contents of the tag access register, in case we
101396207Sjake	 * faulted.
101496207Sjake	 */
1015102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
101696207Sjake	membar	#Sync
101796207Sjake
101896207Sjake	/*
101982906Sjake	 * Switch to alternate globals.
102081180Sjake	 */
102182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
102281180Sjake
102381180Sjake	/*
102482005Sjake	 * Load the tar, sfar and sfsr.
102582005Sjake	 */
102688644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
102788644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
102888644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
102985243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
103082005Sjake	membar	#Sync
103182005Sjake
103282005Sjake	/*
103391224Sjake	 * Save the mmu registers and call common trap code.
103482005Sjake	 */
103588644Sjake	tl0_split
103688644Sjake	mov	%g2, %o3
103788644Sjake	mov	%g3, %o4
103888644Sjake	mov	%g4, %o5
1039103897Sjake	ba	%xcc, tl0_utrap
104088644Sjake	 mov	T_DATA_PROTECTION, %o0
104188644SjakeEND(tl0_dmmu_prot_trap)
104281180Sjake
104380709Sjake	.macro	tl0_spill_0_n
104491246Sjake	wr	%g0, ASI_AIUP, %asi
104591246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
104680709Sjake	saved
104780709Sjake	retry
104882906Sjake	.align	32
104982906Sjake	RSF_TRAP(T_SPILL)
105082906Sjake	RSF_TRAP(T_SPILL)
105180709Sjake	.endm
105280709Sjake
105382906Sjake	.macro	tl0_spill_1_n
105491246Sjake	wr	%g0, ASI_AIUP, %asi
105582906Sjake	SPILL(stwa, %sp, 4, %asi)
105682906Sjake	saved
105782906Sjake	retry
105882906Sjake	.align	32
105982906Sjake	RSF_TRAP(T_SPILL)
106082906Sjake	RSF_TRAP(T_SPILL)
106182906Sjake	.endm
106282005Sjake
106391246Sjake	.macro	tl0_fill_0_n
106482906Sjake	wr	%g0, ASI_AIUP, %asi
106591246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
106682906Sjake	restored
106782906Sjake	retry
106882906Sjake	.align	32
106982906Sjake	RSF_TRAP(T_FILL)
107082906Sjake	RSF_TRAP(T_FILL)
107180709Sjake	.endm
107280709Sjake
107382906Sjake	.macro	tl0_fill_1_n
107491246Sjake	wr	%g0, ASI_AIUP, %asi
107582906Sjake	FILL(lduwa, %sp, 4, %asi)
107682906Sjake	restored
107782906Sjake	retry
107882906Sjake	.align	32
107982906Sjake	RSF_TRAP(T_FILL)
108082906Sjake	RSF_TRAP(T_FILL)
108182906Sjake	.endm
108282906Sjake
108382906SjakeENTRY(tl0_sftrap)
108482906Sjake	rdpr	%tstate, %g1
108582906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
108682906Sjake	wrpr	%g1, 0, %cwp
108788644Sjake	tl0_split
108882906Sjake	b	%xcc, tl0_trap
108982906Sjake	 mov	%g2, %o0
109082906SjakeEND(tl0_sftrap)
109182906Sjake
109282906Sjake	.macro	tl0_spill_bad	count
109382906Sjake	.rept	\count
109488644Sjake	sir
109588644Sjake	.align	128
109682906Sjake	.endr
109782906Sjake	.endm
109882906Sjake
109980709Sjake	.macro	tl0_fill_bad	count
110080709Sjake	.rept	\count
110188644Sjake	sir
110288644Sjake	.align	128
110380709Sjake	.endr
110480709Sjake	.endm
110580709Sjake
110684186Sjake	.macro	tl0_syscall
110788644Sjake	tl0_split
110884186Sjake	b	%xcc, tl0_syscall
110984186Sjake	 mov	T_SYSCALL, %o0
111088784Sjake	.align	32
111184186Sjake	.endm
111284186Sjake
111380709Sjake	.macro	tl0_soft	count
111482906Sjake	.rept	\count
111582906Sjake	tl0_gen	T_SOFT
111682906Sjake	.endr
111780709Sjake	.endm
111880709Sjake
111991246Sjake	.macro	tl1_split
112091246Sjake	rdpr	%wstate, %g1
112191246Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
112280709Sjake	save	%sp, -CCFSZ, %sp
112382906Sjake	.endm
112482906Sjake
112582906Sjake	.macro	tl1_setup	type
112691246Sjake	tl1_split
112780709Sjake	b	%xcc, tl1_trap
112888644Sjake	 mov	\type | T_KERNEL, %o0
112982906Sjake	.endm
113082906Sjake
113182906Sjake	.macro	tl1_gen		type
113282906Sjake	tl1_setup \type
113380709Sjake	.align	32
113480709Sjake	.endm
113580709Sjake
113680709Sjake	.macro	tl1_reserved	count
113780709Sjake	.rept	\count
113880709Sjake	tl1_gen	T_RESERVED
113980709Sjake	.endr
114080709Sjake	.endm
114180709Sjake
114280709Sjake	.macro	tl1_insn_excptn
1143101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
114488644Sjake	wr	%g0, ASI_IMMU, %asi
114588644Sjake	rdpr	%tpc, %g3
114688644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
114788644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
114888644Sjake	membar	#Sync
114988644Sjake	b	%xcc, tl1_insn_exceptn_trap
115088644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
115180709Sjake	.align	32
115280709Sjake	.endm
115380709Sjake
115488644SjakeENTRY(tl1_insn_exceptn_trap)
115591246Sjake	tl1_split
115688644Sjake	mov	%g3, %o4
115788644Sjake	mov	%g4, %o5
115888644Sjake	b	%xcc, tl1_trap
115988644Sjake	 mov	%g2, %o0
116088644SjakeEND(tl1_insn_exceptn_trap)
116188644Sjake
116282005Sjake	.macro	tl1_data_excptn
1163101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
116488644Sjake	b,a	%xcc, tl1_data_excptn_trap
116582906Sjake	 nop
116682005Sjake	.align	32
116782005Sjake	.endm
116882005Sjake
116988644SjakeENTRY(tl1_data_excptn_trap)
117088644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
117182906Sjake	b	%xcc, tl1_sfsr_trap
117288644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
117388644SjakeEND(tl1_data_excptn_trap)
117482906Sjake
117580709Sjake	.macro	tl1_align
117688644Sjake	b,a	%xcc, tl1_align_trap
117788644Sjake	 nop
117880709Sjake	.align	32
117980709Sjake	.endm
118080709Sjake
118182906SjakeENTRY(tl1_align_trap)
118288644Sjake	RESUME_SPILLFILL_ALIGN
118382906Sjake	b	%xcc, tl1_sfsr_trap
118488644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
118588644SjakeEND(tl1_data_excptn_trap)
118682906Sjake
118780709SjakeENTRY(tl1_sfsr_trap)
118888644Sjake	wr	%g0, ASI_DMMU, %asi
118988644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
119088644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
119180709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
119280709Sjake	membar	#Sync
119382005Sjake
119491246Sjake	tl1_split
119588644Sjake	mov	%g3, %o4
119688644Sjake	mov	%g4, %o5
119780709Sjake	b	%xcc, tl1_trap
119888644Sjake	 mov	%g2, %o0
119988644SjakeEND(tl1_sfsr_trap)
120080709Sjake
120184186Sjake	.macro	tl1_intr level, mask
120291246Sjake	tl1_split
120391246Sjake	set	\mask, %o1
120484186Sjake	b	%xcc, tl1_intr
120591246Sjake	 mov	\level, %o0
120681380Sjake	.align	32
120781380Sjake	.endm
120881380Sjake
120980709Sjake	.macro	tl1_intr_level
121081380Sjake	INTR_LEVEL(1)
121180709Sjake	.endm
121280709Sjake
121397265SjakeENTRY(intr_dequeue)
121497265Sjake	save	%sp, -CCFSZ, %sp
121580709Sjake
121697265Sjake1:	ldx	[PCPU(IRHEAD)], %l0
121797265Sjake	brnz,a,pt %l0, 2f
121897265Sjake	 nop
121997265Sjake
122097265Sjake	ret
122197265Sjake	 restore
122297265Sjake
122397265Sjake2:	wrpr	%g0, PSTATE_NORMAL, %pstate
122497265Sjake
122597265Sjake	ldx	[%l0 + IR_NEXT], %l1
122697265Sjake	brnz,pt	%l1, 3f
122797265Sjake	 stx	%l1, [PCPU(IRHEAD)]
122897265Sjake	PCPU_ADDR(IRHEAD, %l1)
122997265Sjake	stx	%l1, [PCPU(IRTAIL)]
123097265Sjake
123197265Sjake3:	ldx	[%l0 + IR_FUNC], %o0
123297265Sjake	ldx	[%l0 + IR_ARG], %o1
123397265Sjake	ldx	[%l0 + IR_VEC], %o2
123497265Sjake
123597265Sjake	ldx	[PCPU(IRFREE)], %l1
123697265Sjake	stx	%l1, [%l0 + IR_NEXT]
123797265Sjake	stx	%l0, [PCPU(IRFREE)]
123897265Sjake
123997265Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
124097265Sjake
124197265Sjake	call	%o0
124297265Sjake	 mov	%o1, %o0
124397265Sjake	ba,a	%xcc, 1b
124497265Sjake	 nop
124597265SjakeEND(intr_dequeue)
124697265Sjake
124797265Sjake/*
124897265Sjake * Handle a vectored interrupt.
124997265Sjake *
125097265Sjake * This is either a data bearing mondo vector interrupt, or a cross trap
125197265Sjake * request from another cpu.  In either case the hardware supplies an
125297265Sjake * interrupt packet, in the form of 3 data words which are read from internal
125397265Sjake * registers.  A data bearing mondo vector packet consists of an interrupt
125497265Sjake * number in the first data word, and zero in 2nd and 3rd.  We use the
125597265Sjake * interrupt number to find the function, argument and priority from the
125697265Sjake * intr_vector table, allocate and fill in an intr_request from the per-cpu
125797265Sjake * free list, link it onto the per-cpu active list and finally post a softint
125897265Sjake * at the desired priority.  Cross trap requests come in 2 forms, direct
125997265Sjake * and queued.  Direct requests are distinguished by the first data word
126097265Sjake * being zero.  The 2nd data word carries a function to call and the 3rd
126197265Sjake * an argument to pass.  The function is jumped to directly.  It executes
126297265Sjake * in nucleus context on interrupt globals and with all interrupts disabled,
126397265Sjake * therefore it must be fast, and the things that it can do are limited.
126497265Sjake * Queued cross trap requests are handled much like mondo vectors, except
126597265Sjake * that the function, argument and priority are contained in the interrupt
126697265Sjake * packet itself.  They are distinguished by the upper 4 bits of the data
126797265Sjake * word being non-zero, which specifies the priority of the softint to
126897265Sjake * deliver.
126997265Sjake *
127097265Sjake * Register usage:
127197265Sjake *	%g1 - pointer to intr_request
127297265Sjake *	%g2 - pointer to intr_vector, temp once required data is loaded
127397265Sjake *	%g3 - interrupt number for mondo vectors, unused otherwise
127497265Sjake *	%g4 - function, from the interrupt packet for cross traps, or
127597265Sjake *	      loaded from the interrupt registers for mondo vecors
127697265Sjake *	%g5 - argument, as above for %g4
127797265Sjake *	%g6 - softint priority
127897265Sjake */
127981380SjakeENTRY(intr_enqueue)
128081380Sjake	/*
128189049Sjake	 * Load the interrupt packet from the hardware.
128281380Sjake	 */
128389049Sjake	wr	%g0, ASI_SDB_INTR_R, %asi
128489049Sjake	ldxa	[%g0 + AA_SDB_INTR_D0] %asi, %g3
128589049Sjake	ldxa	[%g0 + AA_SDB_INTR_D1] %asi, %g4
128689049Sjake	ldxa	[%g0 + AA_SDB_INTR_D2] %asi, %g5
128789049Sjake	stxa	%g0, [%g0] ASI_INTR_RECEIVE
128889049Sjake	membar	#Sync
128981380Sjake
129097265Sjake#if KTR_COMPILE & KTR_INTR
129197265Sjake	CATR(KTR_INTR, "intr_enqueue: data=%#lx %#lx %#lx"
129297265Sjake	    , %g1, %g2, %g6, 7, 8, 9)
129397265Sjake	stx	%g3, [%g1 + KTR_PARM1]
129497265Sjake	stx	%g4, [%g1 + KTR_PARM2]
129597265Sjake	stx	%g5, [%g1 + KTR_PARM3]
129697265Sjake9:
129797265Sjake#endif
129897265Sjake
129981380Sjake	/*
130097265Sjake	 * If the first data word is zero this is a direct cross trap request.
130197265Sjake	 * The 2nd word points to code to execute and the 3rd is an argument
130297265Sjake	 * to pass.  Jump to it.
130381380Sjake	 */
130497265Sjake	brnz,a,pt %g3, 1f
130581380Sjake	 nop
130697265Sjake
130797265Sjake#if KTR_COMPILE & KTR_INTR
130897265Sjake	CATR(KTR_INTR, "intr_enqueue: direct ipi func=%#lx arg=%#lx"
130997265Sjake	    , %g1, %g2, %g6, 7, 8, 9)
131097265Sjake	stx	%g4, [%g1 + KTR_PARM1]
131197265Sjake	stx	%g5, [%g1 + KTR_PARM2]
131297265Sjake9:
131397265Sjake#endif
131497265Sjake
131589049Sjake	jmpl	%g4, %g0
131689049Sjake	 nop
131797265Sjake	/* NOTREACHED */
131881380Sjake
131981380Sjake	/*
132097265Sjake	 * If the high 4 bits of the 1st data word are non-zero, this is a
132197265Sjake	 * queued cross trap request to be delivered as a softint.  The high
132297265Sjake	 * 4 bits of the 1st data word specify a priority, and the 2nd and
132397265Sjake	 * 3rd a function and argument.
132485243Sjake	 */
132597265Sjake1:	srlx	%g3, 60, %g6
132697265Sjake	brnz,a,pn %g6, 2f
132797265Sjake	 clr	%g3
132885243Sjake
132985243Sjake	/*
133097265Sjake	 * Find the function, argument and desired priority from the
133197265Sjake	 * intr_vector table.
133281380Sjake	 */
133397265Sjake	SET(intr_vectors, %g4, %g2)
133497265Sjake	sllx	%g3, IV_SHIFT, %g4
133597265Sjake	add	%g2, %g4, %g2
133681380Sjake
133797265Sjake#if KTR_COMPILE & KTR_INTR
133897265Sjake	CATR(KTR_INTR, "intr_enqueue: mondo vector func=%#lx arg=%#lx pri=%#lx"
133997265Sjake	    , %g4, %g5, %g6, 7, 8, 9)
134097265Sjake	ldx	[%g2 + IV_FUNC], %g5
134197265Sjake	stx	%g5, [%g4 + KTR_PARM1]
134297265Sjake	ldx	[%g2 + IV_ARG], %g5
134397265Sjake	stx	%g5, [%g4 + KTR_PARM2]
134497265Sjake	ldx	[%g2 + IV_PRI], %g5
134597265Sjake	stx	%g5, [%g4 + KTR_PARM3]
134697265Sjake9:
134797265Sjake#endif
134897265Sjake
134997265Sjake	ldx	[%g2 + IV_FUNC], %g4
135097265Sjake	ldx	[%g2 + IV_ARG], %g5
135197265Sjake	lduw	[%g2 + IV_PRI], %g6
135297265Sjake
135397265Sjake	ba,a	%xcc, 3f
135497265Sjake	 nop
135597265Sjake
135681380Sjake	/*
135797265Sjake	 * Get a intr_request from the free list.  There should always be one
135897265Sjake	 * unless we are getting an interrupt storm from stray interrupts, in
135997265Sjake	 * which case the we will deference a NULL pointer and panic.
136081380Sjake	 */
136197265Sjake2:
136297265Sjake#if KTR_COMPILE & KTR_INTR
136397265Sjake	CATR(KTR_INTR, "intr_enqueue: queued ipi func=%#lx arg=%#lx pri=%#lx"
136497265Sjake	    , %g1, %g2, %g3, 7, 8, 9)
136597265Sjake	stx	%g4, [%g1 + KTR_PARM1]
136697265Sjake	stx	%g5, [%g1 + KTR_PARM2]
136797265Sjake	stx	%g6, [%g1 + KTR_PARM3]
136897265Sjake9:
136997265Sjake	clr	%g3
137097265Sjake#endif
137181380Sjake
137297265Sjake3:
137397265Sjake	ldx	[PCPU(IRFREE)], %g1
137497265Sjake	ldx	[%g1 + IR_NEXT], %g2
137597265Sjake	stx	%g2, [PCPU(IRFREE)]
137697265Sjake
137781380Sjake	/*
137897265Sjake	 * Store the vector number, function, argument and priority.
137981380Sjake	 */
138097265Sjake	stw	%g3, [%g1 + IR_VEC]
138197265Sjake	stx	%g4, [%g1 + IR_FUNC]
138297265Sjake	stx	%g5, [%g1 + IR_ARG]
138397265Sjake	stw	%g6, [%g1 + IR_PRI]
138481380Sjake
138581380Sjake	/*
138697265Sjake	 * Link it onto the end of the active list.
138781380Sjake	 */
138897265Sjake	stx	%g0, [%g1 + IR_NEXT]
138997265Sjake	ldx	[PCPU(IRTAIL)], %g4
139097265Sjake	stx	%g1, [%g4]
139197265Sjake	add	%g1, IR_NEXT, %g1
139297265Sjake	stx	%g1, [PCPU(IRTAIL)]
139381380Sjake
139497265Sjake	/*
139597265Sjake	 * Trigger a softint at the level indicated by the priority.
139697265Sjake	 */
139797265Sjake	mov	1, %g1
139897265Sjake	sllx	%g1, %g6, %g1
139997265Sjake
140088644Sjake#if KTR_COMPILE & KTR_INTR
140197265Sjake	CATR(KTR_INTR, "intr_enqueue: softint pil=%#lx pri=%#lx mask=%#lx"
140289049Sjake	    , %g2, %g3, %g4, 7, 8, 9)
140397265Sjake	rdpr	%pil, %g3
140489049Sjake	stx	%g3, [%g2 + KTR_PARM1]
140597265Sjake	stx	%g6, [%g2 + KTR_PARM2]
140697265Sjake	stx	%g1, [%g2 + KTR_PARM3]
140788644Sjake9:
140888644Sjake#endif
140988644Sjake
141089049Sjake	wr	%g1, 0, %asr20
141182906Sjake
141281380Sjake	/*
141397265Sjake	 * Done, retry the instruction.
141481380Sjake	 */
141597265Sjake	retry
141681380SjakeEND(intr_enqueue)
141781380Sjake
141880709Sjake	.macro	tl1_immu_miss
141991224Sjake	/*
142091224Sjake	 * Load the context and the virtual page number from the tag access
142191224Sjake	 * register.  We ignore the context.
142291224Sjake	 */
142391224Sjake	wr	%g0, ASI_IMMU, %asi
1424102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
142585585Sjake
142691224Sjake	/*
1427102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1428102040Sjake	 * tsb are patched at startup.
142991224Sjake	 */
1430102040Sjake	.globl	tl1_immu_miss_patch_1
1431102040Sjaketl1_immu_miss_patch_1:
1432102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1433102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1434102040Sjake	sethi	%hi(TSB_KERNEL), %g7
143585585Sjake
1436102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1437102040Sjake	and	%g5, %g6, %g6
1438102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1439102040Sjake	add	%g6, %g7, %g6
144085585Sjake
144185585Sjake	/*
144291224Sjake	 * Load the tte.
144391224Sjake	 */
1444102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
144591224Sjake
144691224Sjake	/*
144791224Sjake	 * Check that its valid and executable and that the virtual page
144891224Sjake	 * numbers match.
144991224Sjake	 */
1450102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1451102040Sjake	 andcc	%g7, TD_EXEC, %g0
145291224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1453102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1454102040Sjake	cmp	%g5, %g6
145591224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
145685585Sjake	 EMPTY
145785585Sjake
145885585Sjake	/*
145991224Sjake	 * Set the reference bit if its currently clear.
146085585Sjake	 */
1461102040Sjake	 andcc	%g7, TD_REF, %g0
1462102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
146391224Sjake	 nop
146485585Sjake
146591224Sjake	/*
1466102040Sjake	 * Load the tte data into the TLB and retry the instruction.
146791224Sjake	 */
1468102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1469102040Sjake	retry
1470102040Sjake	.align	128
1471102040Sjake	.endm
147288644Sjake
1473102040SjakeENTRY(tl1_immu_miss_set_ref)
147485585Sjake	/*
1475102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1476102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1477102040Sjake	 */
1478102040Sjake	.globl	tl1_immu_miss_patch_2
1479102040Sjaketl1_immu_miss_patch_2:
1480102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1481102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1482102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1483102040Sjake
1484102040Sjake	and	%g5, %g6, %g5
1485102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1486102040Sjake	add	%g5, %g7, %g5
1487102040Sjake
1488102040Sjake	/*
1489102040Sjake	 * Set the reference bit.
1490102040Sjake	 */
1491102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1492102040Sjake
1493102040Sjake	/*
1494102040Sjake	 * May have become invalid during casxa, in which case start over.
1495102040Sjake	 */
1496102040Sjake	brgez,pn %g6, 1f
1497102040Sjake	 nop
1498102040Sjake
1499102040Sjake	/*
150085585Sjake	 * Load the tte data into the TLB and retry the instruction.
150185585Sjake	 */
1502102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1503102040Sjake1:	retry
1504102040SjakeEND(tl1_immu_miss_set_ref)
150585585Sjake
150691224SjakeENTRY(tl1_immu_miss_trap)
150785585Sjake	/*
150885585Sjake	 * Switch to alternate globals.
150985585Sjake	 */
151091224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
151185585Sjake
151291224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
151385585Sjake
151491246Sjake	tl1_split
151591224Sjake	mov	%g2, %o3
151680709Sjake	b	%xcc, tl1_trap
151788644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
151891224SjakeEND(tl1_immu_miss_trap)
151991224Sjake
152091224Sjake	.macro	tl1_dmmu_miss
152191224Sjake	/*
152291224Sjake	 * Load the context and the virtual page number from the tag access
152391224Sjake	 * register.
152491224Sjake	 */
152591224Sjake	wr	%g0, ASI_DMMU, %asi
1526102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
152780709Sjake
152891224Sjake	/*
152991224Sjake	 * Extract the context from the contents of the tag access register.
1530100771Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1531100771Sjake	 * faulting address is passed in %g2.
153291224Sjake	 */
1533102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1534102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1535102040Sjake	 mov	%g5, %g1
153680709Sjake
153791224Sjake	/*
1538100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1539100771Sjake	 * the high bit set so they are negative.
1540100771Sjake	 */
1541102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1542100771Sjake	 EMPTY
1543100771Sjake
1544100771Sjake	/*
1545102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1546102040Sjake	 * tsb are patched at startup.
154791224Sjake	 */
1548102040Sjake	.globl	tl1_dmmu_miss_patch_1
1549102040Sjaketl1_dmmu_miss_patch_1:
1550102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1551102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1552102040Sjake	sethi	%hi(TSB_KERNEL), %g7
155384186Sjake
1554102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1555102040Sjake	and	%g5, %g6, %g6
1556102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1557102040Sjake	add	%g6, %g7, %g6
155891224Sjake
155991224Sjake	/*
156091224Sjake	 * Load the tte.
156191224Sjake	 */
1562102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
156391224Sjake
156491224Sjake	/*
156591224Sjake	 * Check that its valid and that the virtual page numbers match.
156691224Sjake	 */
1567102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1568102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1569102040Sjake	cmp	%g5, %g6
157091224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
157180709Sjake	 EMPTY
157280709Sjake
157380709Sjake	/*
157491224Sjake	 * Set the reference bit if its currently clear.
157580709Sjake	 */
1576102040Sjake	 andcc	%g7, TD_REF, %g0
1577102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
157891224Sjake	 nop
157980709Sjake
158091224Sjake	/*
1581102040Sjake	 * Load the tte data into the TLB and retry the instruction.
158291224Sjake	 */
1583102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1584102040Sjake	retry
1585102040Sjake	.align	128
1586102040Sjake	.endm
158788644Sjake
1588102040SjakeENTRY(tl1_dmmu_miss_set_ref)
158980709Sjake	/*
1590102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1591102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1592102040Sjake	 */
1593102040Sjake	.globl	tl1_dmmu_miss_patch_2
1594102040Sjaketl1_dmmu_miss_patch_2:
1595102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1596102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1597102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1598102040Sjake
1599102040Sjake	and	%g5, %g6, %g5
1600102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1601102040Sjake	add	%g5, %g7, %g5
1602102040Sjake
1603102040Sjake	/*
1604102040Sjake	 * Set the reference bit.
1605102040Sjake	 */
1606102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1607102040Sjake
1608102040Sjake	/*
1609102040Sjake	 * May have become invalid during casxa, in which case start over.
1610102040Sjake	 */
1611102040Sjake	brgez,pn %g6, 1f
1612102040Sjake	 nop
1613102040Sjake
1614102040Sjake	/*
161582906Sjake	 * Load the tte data into the TLB and retry the instruction.
161680709Sjake	 */
1617102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1618102040Sjake1:	retry
1619102040SjakeEND(tl1_dmmu_miss_set_ref)
162080709Sjake
162191224SjakeENTRY(tl1_dmmu_miss_trap)
162280709Sjake	/*
162382906Sjake	 * Switch to alternate globals.
162480709Sjake	 */
162591224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
162680709Sjake
162788781Sjake	KSTACK_CHECK
162888781Sjake
162991224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
163082906Sjake
163191246Sjake	tl1_split
163291224Sjake	mov	%g2, %o3
163382906Sjake	b	%xcc, tl1_trap
163488644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
163588781SjakeEND(tl1_dmmu_miss_trap)
163680709Sjake
1637100771SjakeENTRY(tl1_dmmu_miss_direct)
1638100771Sjake	/*
1639100771Sjake	 * Check the cache bits in the virtual address to see if this mapping
1640100771Sjake	 * is virtually cacheable.  We set this up so that the masks fit in
1641100771Sjake	 * immediates...  Note that the arithmetic shift sign extends, keeping
1642100771Sjake	 * all the top bits set.
1643100771Sjake	 */
1644102040Sjake	srax	%g5, TLB_DIRECT_SHIFT, %g5
1645102040Sjake	andcc	%g5, TLB_DIRECT_UNCACHEABLE, %g0
1646102040Sjake	mov	TD_CP | TD_CV | TD_W, %g6
1647102040Sjake	movnz	%xcc, TD_CP | TD_W, %g6
1648102040Sjake	or	%g5, %g6, %g5
1649100771Sjake
1650100771Sjake	/*
1651100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1652100771Sjake	 * address, and or in the tte bits.  The high bit is left set in the
1653100771Sjake	 * physical address, which corresponds to the tte valid bit, so that
1654100771Sjake	 * we don't have to include it in the tte bits.  We ignore the cache
1655100771Sjake	 * bits, since they get shifted into the soft tte bits anyway.
1656100771Sjake	 */
1657102040Sjake	setx	TLB_DIRECT_MASK & ~TD_V, %g7, %g6
1658102040Sjake	andn	%g5, %g6, %g5
1659100771Sjake
1660100771Sjake	/*
1661100771Sjake	 * Load the tte data into the TLB and retry the instruction.
1662100771Sjake	 */
1663102040Sjake	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1664100771Sjake	retry
1665100771SjakeEND(tl1_dmmu_miss_direct)
1666100771Sjake
166781180SjakeENTRY(tl1_dmmu_miss_user)
166881180Sjake	/*
166988644Sjake	 * Try a fast inline lookup of the user tsb.
167081180Sjake	 */
167181180Sjake	dmmu_miss_user
167281180Sjake
167381180Sjake	/*
167496207Sjake	 * Put back the contents of the tag access register, in case we
167596207Sjake	 * faulted.
167696207Sjake	 */
1677102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
167896207Sjake	membar	#Sync
167996207Sjake
168096207Sjake	/*
168182906Sjake	 * Switch to alternate globals.
168281180Sjake	 */
168382906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
168481180Sjake
168591224Sjake	/*
168691224Sjake	 * Handle faults during window spill/fill.
168791224Sjake	 */
168888644Sjake	RESUME_SPILLFILL_MMU
168988644Sjake
169091224Sjake	/*
169191224Sjake	 * Reload the tag access register.
169291224Sjake	 */
169391224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
169491224Sjake
169591246Sjake	tl1_split
169691224Sjake	mov	%g2, %o3
169782906Sjake	b	%xcc, tl1_trap
169888644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
169982906SjakeEND(tl1_dmmu_miss_user)
170081180Sjake
170182906Sjake	.macro	tl1_dmmu_prot
1702102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1703102040Sjake	 nop
1704102040Sjake	.align	128
1705102040Sjake	.endm
1706102040Sjake
1707102040SjakeENTRY(tl1_dmmu_prot_1)
170891224Sjake	/*
170991224Sjake	 * Load the context and the virtual page number from the tag access
171091224Sjake	 * register.
171191224Sjake	 */
171291224Sjake	wr	%g0, ASI_DMMU, %asi
1713102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
171488644Sjake
171591224Sjake	/*
171691224Sjake	 * Extract the context from the contents of the tag access register.
171791224Sjake	 * If its non-zero this is a fault on a user address, otherwise get
171891224Sjake	 * the virtual page number.
171991224Sjake	 */
1720102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1721102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1722102040Sjake	 mov	%g5, %g1
172388644Sjake
172491224Sjake	/*
1725102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1726102040Sjake	 * tsb are patched at startup.
172791224Sjake	 */
1728102040Sjake	.globl	tl1_dmmu_prot_patch_1
1729102040Sjaketl1_dmmu_prot_patch_1:
1730102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1731102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1732102040Sjake	sethi	%hi(TSB_KERNEL), %g7
173388644Sjake
1734102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1735102040Sjake	and	%g5, %g6, %g6
1736102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1737102040Sjake	add	%g6, %g7, %g6
173891224Sjake
173991224Sjake	/*
174091224Sjake	 * Load the tte.
174191224Sjake	 */
1742102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
174391224Sjake
174491224Sjake	/*
174591224Sjake	 * Check that its valid and writeable and that the virtual page
174691224Sjake	 * numbers match.
174791224Sjake	 */
1748102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1749102040Sjake	 andcc	%g7, TD_SW, %g0
175091224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1751102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1752102040Sjake	cmp	%g5, %g6
175391224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
175488644Sjake	 EMPTY
175588644Sjake
175688644Sjake	/*
175791224Sjake	 * Delete the old TLB entry and clear the sfsr.
175888644Sjake	 */
1759102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
176091224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
176191224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
176281180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
176391224Sjake	membar	#Sync
176481180Sjake
1765102040Sjake	/*
1766102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1767102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1768102040Sjake	 */
1769102040Sjake	.globl	tl1_dmmu_prot_patch_2
1770102040Sjaketl1_dmmu_prot_patch_2:
1771102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1772102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1773102040Sjake	sethi	%hi(TSB_KERNEL), %g7
177496207Sjake
1775102040Sjake	and	%g5, %g6, %g5
1776102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1777102040Sjake	add	%g5, %g7, %g5
1778102040Sjake
177981180Sjake	/*
178091224Sjake	 * Set the hardware write bit.
178191224Sjake	 */
1782102040Sjake	TTE_SET_W(%g5, %g6, %g7)
178391224Sjake
178491224Sjake	/*
1785102040Sjake	 * May have become invalid during casxa, in which case start over.
1786102040Sjake	 */
1787102040Sjake	brgez,pn %g6, 1f
1788102040Sjake	 or	%g6, TD_W, %g6
1789102040Sjake
1790102040Sjake	/*
179188644Sjake	 * Load the tte data into the TLB and retry the instruction.
179288644Sjake	 */
1793102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1794102040Sjake1:	retry
1795102040SjakeEND(tl1_dmmu_prot_1)
179688644Sjake
179788644SjakeENTRY(tl1_dmmu_prot_user)
179888644Sjake	/*
179988644Sjake	 * Try a fast inline lookup of the user tsb.
180088644Sjake	 */
180188644Sjake	dmmu_prot_user
180288644Sjake
180388644Sjake	/*
180496207Sjake	 * Put back the contents of the tag access register, in case we
180596207Sjake	 * faulted.
180696207Sjake	 */
1807102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
180896207Sjake	membar	#Sync
180996207Sjake
181096207Sjake	/*
181182906Sjake	 * Switch to alternate globals.
181281180Sjake	 */
181388644Sjake	wrpr	%g0, PSTATE_ALT, %pstate
181480709Sjake
181588644Sjake	/* Handle faults during window spill/fill. */
181688644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
181788644Sjake
181888644Sjake	b,a	%xcc, tl1_dmmu_prot_trap
181988644Sjake	 nop
182088644SjakeEND(tl1_dmmu_prot_user)
182188644Sjake
182288644SjakeENTRY(tl1_dmmu_prot_trap)
182381180Sjake	/*
182491224Sjake	 * Switch to alternate globals.
182591224Sjake	 */
182691224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
182791224Sjake
182891224Sjake	/*
182981180Sjake	 * Load the sfar, sfsr and tar.  Clear the sfsr.
183081180Sjake	 */
183188644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
183288644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
183388644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
183481180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
183581180Sjake	membar	#Sync
183681180Sjake
183791246Sjake	tl1_split
183888644Sjake	mov	%g2, %o3
183988644Sjake	mov	%g3, %o4
184088644Sjake	mov	%g4, %o5
184182906Sjake	b	%xcc, tl1_trap
184288644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
184388644SjakeEND(tl1_dmmu_prot_trap)
184481180Sjake
184580709Sjake	.macro	tl1_spill_0_n
184682906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
184780709Sjake	saved
184880709Sjake	retry
184982906Sjake	.align	32
185082906Sjake	RSF_FATAL(T_SPILL)
185182906Sjake	RSF_FATAL(T_SPILL)
185280709Sjake	.endm
185380709Sjake
185491246Sjake	.macro	tl1_spill_2_n
185591246Sjake	wr	%g0, ASI_AIUP, %asi
185691246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
185782906Sjake	saved
185882906Sjake	retry
185982906Sjake	.align	32
186082906Sjake	RSF_SPILL_TOPCB
186182906Sjake	RSF_SPILL_TOPCB
186281380Sjake	.endm
186381380Sjake
186491246Sjake	.macro	tl1_spill_3_n
186591246Sjake	wr	%g0, ASI_AIUP, %asi
186692200Sjake	SPILL(stwa, %sp, 4, %asi)
186782906Sjake	saved
186882906Sjake	retry
186982906Sjake	.align	32
187082906Sjake	RSF_SPILL_TOPCB
187182906Sjake	RSF_SPILL_TOPCB
187282906Sjake	.endm
187382906Sjake
187491246Sjake	.macro	tl1_spill_0_o
187582906Sjake	wr	%g0, ASI_AIUP, %asi
187682906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
187782906Sjake	saved
187882906Sjake	retry
187982906Sjake	.align	32
188082906Sjake	RSF_SPILL_TOPCB
188182906Sjake	RSF_SPILL_TOPCB
188282906Sjake	.endm
188382906Sjake
188482906Sjake	.macro	tl1_spill_1_o
188591246Sjake	wr	%g0, ASI_AIUP, %asi
188682906Sjake	SPILL(stwa, %sp, 4, %asi)
188782005Sjake	saved
188882005Sjake	retry
188982906Sjake	.align	32
189082906Sjake	RSF_SPILL_TOPCB
189182906Sjake	RSF_SPILL_TOPCB
189282906Sjake	.endm
189382005Sjake
189482906Sjake	.macro	tl1_spill_2_o
189582906Sjake	RSF_SPILL_TOPCB
189691246Sjake	.align	128
189780709Sjake	.endm
189880709Sjake
189980709Sjake	.macro	tl1_fill_0_n
190082906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
190180709Sjake	restored
190280709Sjake	retry
190382906Sjake	.align	32
190482906Sjake	RSF_FATAL(T_FILL)
190582906Sjake	RSF_FATAL(T_FILL)
190680709Sjake	.endm
190780709Sjake
190891246Sjake	.macro	tl1_fill_2_n
190982906Sjake	wr	%g0, ASI_AIUP, %asi
191082906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
191182906Sjake	restored
191282906Sjake	retry
191382906Sjake	.align 32
191482906Sjake	RSF_FILL_MAGIC
191591246Sjake	RSF_FILL_MAGIC
191682906Sjake	.endm
191782906Sjake
191891246Sjake	.macro	tl1_fill_3_n
191982906Sjake	wr	%g0, ASI_AIUP, %asi
192082906Sjake	FILL(lduwa, %sp, 4, %asi)
192182906Sjake	restored
192282906Sjake	retry
192382906Sjake	.align 32
192482906Sjake	RSF_FILL_MAGIC
192591246Sjake	RSF_FILL_MAGIC
192682906Sjake	.endm
192782906Sjake
192882005Sjake/*
192982906Sjake * This is used to spill windows that are still occupied with user
193082906Sjake * data on kernel entry to the pcb.
193182005Sjake */
193282906SjakeENTRY(tl1_spill_topcb)
193382906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
193482906Sjake
193582005Sjake	/* Free some globals for our use. */
193688644Sjake	dec	24, ASP_REG
193788644Sjake	stx	%g1, [ASP_REG + 0]
193888644Sjake	stx	%g2, [ASP_REG + 8]
193988644Sjake	stx	%g3, [ASP_REG + 16]
194082906Sjake
194188644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
194282906Sjake
194388644Sjake	sllx	%g1, PTR_SHIFT, %g2
194488644Sjake	add	%g2, PCB_REG, %g2
194588644Sjake	stx	%sp, [%g2 + PCB_RWSP]
194682906Sjake
194788644Sjake	sllx	%g1, RW_SHIFT, %g2
194888644Sjake	add	%g2, PCB_REG, %g2
194988644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
195082906Sjake
195188644Sjake	inc	%g1
195288644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
195382906Sjake
195485243Sjake#if KTR_COMPILE & KTR_TRAP
195588785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
195682906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
195782906Sjake	rdpr	%tpc, %g2
195882906Sjake	stx	%g2, [%g1 + KTR_PARM1]
195988785Sjake	rdpr	%tnpc, %g2
196088785Sjake	stx	%g2, [%g1 + KTR_PARM2]
196188785Sjake	stx	%sp, [%g1 + KTR_PARM3]
196288644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
196388785Sjake	stx	%g2, [%g1 + KTR_PARM4]
196482906Sjake9:
196582906Sjake#endif
196682906Sjake
196782906Sjake	saved
196882906Sjake
196988644Sjake	ldx	[ASP_REG + 16], %g3
197088644Sjake	ldx	[ASP_REG + 8], %g2
197188644Sjake	ldx	[ASP_REG + 0], %g1
197288644Sjake	inc	24, ASP_REG
197382005Sjake	retry
197482906SjakeEND(tl1_spill_topcb)
197582005Sjake
197682906Sjake	.macro	tl1_spill_bad	count
197782906Sjake	.rept	\count
197888644Sjake	sir
197988644Sjake	.align	128
198082906Sjake	.endr
198182906Sjake	.endm
198282906Sjake
198380709Sjake	.macro	tl1_fill_bad	count
198480709Sjake	.rept	\count
198588644Sjake	sir
198688644Sjake	.align	128
198780709Sjake	.endr
198880709Sjake	.endm
198980709Sjake
199080709Sjake	.macro	tl1_soft	count
199182906Sjake	.rept	\count
199282906Sjake	tl1_gen	T_SOFT | T_KERNEL
199382906Sjake	.endr
199480709Sjake	.endm
199580709Sjake
199680709Sjake	.sect	.trap
199780709Sjake	.align	0x8000
199880709Sjake	.globl	tl0_base
199980709Sjake
200080709Sjaketl0_base:
200188779Sjake	tl0_reserved	8				! 0x0-0x7
200280709Sjaketl0_insn_excptn:
200388779Sjake	tl0_insn_excptn					! 0x8
200488779Sjake	tl0_reserved	1				! 0x9
200580709Sjaketl0_insn_error:
200688779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
200788779Sjake	tl0_reserved	5				! 0xb-0xf
200880709Sjaketl0_insn_illegal:
200988779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
201080709Sjaketl0_priv_opcode:
201188779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
201288779Sjake	tl0_reserved	14				! 0x12-0x1f
201380709Sjaketl0_fp_disabled:
201488779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
201580709Sjaketl0_fp_ieee:
201688779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
201780709Sjaketl0_fp_other:
201888779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
201980709Sjaketl0_tag_ovflw:
202088779Sjake	tl0_gen		T_TAG_OFERFLOW			! 0x23
202180709Sjaketl0_clean_window:
202288779Sjake	clean_window					! 0x24
202380709Sjaketl0_divide:
202488779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
202588779Sjake	tl0_reserved	7				! 0x29-0x2f
202680709Sjaketl0_data_excptn:
202788779Sjake	tl0_data_excptn					! 0x30
202888779Sjake	tl0_reserved	1				! 0x31
202980709Sjaketl0_data_error:
203088779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
203188779Sjake	tl0_reserved	1				! 0x33
203280709Sjaketl0_align:
203388779Sjake	tl0_align					! 0x34
203480709Sjaketl0_align_lddf:
203588779Sjake	tl0_gen		T_RESERVED			! 0x35
203680709Sjaketl0_align_stdf:
203788779Sjake	tl0_gen		T_RESERVED			! 0x36
203880709Sjaketl0_priv_action:
203988779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
204088779Sjake	tl0_reserved	9				! 0x38-0x40
204180709Sjaketl0_intr_level:
204288779Sjake	tl0_intr_level					! 0x41-0x4f
204388779Sjake	tl0_reserved	16				! 0x50-0x5f
204480709Sjaketl0_intr_vector:
204597265Sjake	intr_vector					! 0x60
204680709Sjaketl0_watch_phys:
204788779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
204880709Sjaketl0_watch_virt:
204988779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
205080709Sjaketl0_ecc:
205188779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
205280709Sjaketl0_immu_miss:
205388779Sjake	tl0_immu_miss					! 0x64
205480709Sjaketl0_dmmu_miss:
205588779Sjake	tl0_dmmu_miss					! 0x68
205680709Sjaketl0_dmmu_prot:
205788779Sjake	tl0_dmmu_prot					! 0x6c
205888779Sjake	tl0_reserved	16				! 0x70-0x7f
205980709Sjaketl0_spill_0_n:
206088779Sjake	tl0_spill_0_n					! 0x80
206182906Sjaketl0_spill_1_n:
206288779Sjake	tl0_spill_1_n					! 0x84
206391246Sjake	tl0_spill_bad	14				! 0x88-0xbf
206480709Sjaketl0_fill_0_n:
206588779Sjake	tl0_fill_0_n					! 0xc0
206682906Sjaketl0_fill_1_n:
206788779Sjake	tl0_fill_1_n					! 0xc4
206891246Sjake	tl0_fill_bad	14				! 0xc8-0xff
206988644Sjaketl0_soft:
207088779Sjake	tl0_reserved	1				! 0x100
207188779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
207288779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
207388779Sjake	tl0_reserved	1				! 0x103
207488779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
207588779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
207688779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
207788779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
207888779Sjake	tl0_reserved	1				! 0x108
207988779Sjake	tl0_syscall					! 0x109
208088779Sjake	tl0_fp_restore					! 0x10a
208188779Sjake	tl0_reserved	5				! 0x10b-0x10f
208288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
208388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
208488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
208588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
208688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
208788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
208888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
208988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
209088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
209188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
209288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
209388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
209488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
209588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
209688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
209788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
209888779Sjake	tl0_reserved	224				! 0x120-0x1ff
209980709Sjake
210080709Sjaketl1_base:
210188779Sjake	tl1_reserved	8				! 0x200-0x207
210280709Sjaketl1_insn_excptn:
210388779Sjake	tl1_insn_excptn					! 0x208
210488779Sjake	tl1_reserved	1				! 0x209
210580709Sjaketl1_insn_error:
210688779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
210788779Sjake	tl1_reserved	5				! 0x20b-0x20f
210880709Sjaketl1_insn_illegal:
210988779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
211080709Sjaketl1_priv_opcode:
211188779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
211288779Sjake	tl1_reserved	14				! 0x212-0x21f
211380709Sjaketl1_fp_disabled:
211488779Sjake	tl1_gen		T_FP_DISABLED			! 0x220
211580709Sjaketl1_fp_ieee:
211688779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
211780709Sjaketl1_fp_other:
211888779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
211980709Sjaketl1_tag_ovflw:
212088779Sjake	tl1_gen		T_TAG_OFERFLOW			! 0x223
212180709Sjaketl1_clean_window:
212288779Sjake	clean_window					! 0x224
212380709Sjaketl1_divide:
212488779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
212588779Sjake	tl1_reserved	7				! 0x229-0x22f
212680709Sjaketl1_data_excptn:
212788779Sjake	tl1_data_excptn					! 0x230
212888779Sjake	tl1_reserved	1				! 0x231
212980709Sjaketl1_data_error:
213088779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
213188779Sjake	tl1_reserved	1				! 0x233
213280709Sjaketl1_align:
213388779Sjake	tl1_align					! 0x234
213480709Sjaketl1_align_lddf:
213588779Sjake	tl1_gen		T_RESERVED			! 0x235
213680709Sjaketl1_align_stdf:
213788779Sjake	tl1_gen		T_RESERVED			! 0x236
213880709Sjaketl1_priv_action:
213988779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
214088779Sjake	tl1_reserved	9				! 0x238-0x240
214180709Sjaketl1_intr_level:
214288779Sjake	tl1_intr_level					! 0x241-0x24f
214388779Sjake	tl1_reserved	16				! 0x250-0x25f
214480709Sjaketl1_intr_vector:
214597265Sjake	intr_vector					! 0x260
214680709Sjaketl1_watch_phys:
214788779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
214880709Sjaketl1_watch_virt:
214988779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
215080709Sjaketl1_ecc:
215188779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
215280709Sjaketl1_immu_miss:
215388779Sjake	tl1_immu_miss					! 0x264
215480709Sjaketl1_dmmu_miss:
215588779Sjake	tl1_dmmu_miss					! 0x268
215680709Sjaketl1_dmmu_prot:
215788779Sjake	tl1_dmmu_prot					! 0x26c
215888779Sjake	tl1_reserved	16				! 0x270-0x27f
215980709Sjaketl1_spill_0_n:
216088779Sjake	tl1_spill_0_n					! 0x280
216191246Sjake	tl1_spill_bad	1				! 0x284
216291246Sjaketl1_spill_2_n:
216391246Sjake	tl1_spill_2_n					! 0x288
216491246Sjaketl1_spill_3_n:
216591246Sjake	tl1_spill_3_n					! 0x29c
216691246Sjake	tl1_spill_bad	4				! 0x290-0x29f
216781380Sjaketl1_spill_0_o:
216888779Sjake	tl1_spill_0_o					! 0x2a0
216982906Sjaketl1_spill_1_o:
217088779Sjake	tl1_spill_1_o					! 0x2a4
217182906Sjaketl1_spill_2_o:
217288779Sjake	tl1_spill_2_o					! 0x2a8
217391246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
217480709Sjaketl1_fill_0_n:
217588779Sjake	tl1_fill_0_n					! 0x2c0
217691246Sjake	tl1_fill_bad	1				! 0x2c4
217791246Sjaketl1_fill_2_n:
217891246Sjake	tl1_fill_2_n					! 0x2d0
217991246Sjaketl1_fill_3_n:
218091246Sjake	tl1_fill_3_n					! 0x2d4
218191246Sjake	tl1_fill_bad	12				! 0x2d8-0x2ff
218288779Sjake	tl1_reserved	1				! 0x300
218380709Sjaketl1_breakpoint:
218488779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
218588779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
218688779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
218788779Sjake	tl1_reserved	252				! 0x304-0x3ff
218880709Sjake
218981380Sjake/*
219082906Sjake * User trap entry point.
219182906Sjake *
2192103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2193103897Sjake *                u_long sfsr)
2194103897Sjake *
2195103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
2196103897Sjake * program must have first registered a trap handler with the kernel using
2197103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2198103897Sjake * for it to return to the trapping code directly, it will not return through
2199103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2200103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2201103897Sjake * parameters passed in out registers may be used by the user trap handler.
2202103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2203103897Sjake *
2204103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2205103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2206103897Sjake */
2207103897SjakeENTRY(tl0_utrap)
2208103897Sjake	/*
2209103897Sjake	 * Check if the trap type allows user traps.
2210103897Sjake	 */
2211103897Sjake	cmp	%o0, UT_MAX
2212103897Sjake	bge,a,pt %xcc, tl0_trap
2213103897Sjake	 nop
2214103897Sjake
2215103897Sjake	/*
2216103897Sjake	 * Load the user trap handler from the utrap table.
2217103897Sjake	 */
2218103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2219103897Sjake	ldx	[%l0 + TD_PROC], %l0
2220103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2221103897Sjake	brz,pt	%l0, tl0_trap
2222103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2223103897Sjake	ldx	[%l0 + %l1], %l0
2224103897Sjake	brz,a,pt %l0, tl0_trap
2225103897Sjake	 nop
2226103897Sjake
2227103897Sjake	/*
2228103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2229103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2230103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2231103897Sjake	 * not be able to find them, since the user trap handler returns
2232103897Sjake	 * directly to the trapping code.  Note that we only support precise
2233103897Sjake	 * user traps, which implies that the condition that caused the trap
2234103897Sjake	 * in the first place is still valid, so it will occur again when we
2235103897Sjake	 * re-execute the trapping instruction.
2236103897Sjake	 */
2237103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2238103897Sjake	brnz,a,pn %l1, tl0_trap
2239103897Sjake	 mov	T_SPILL, %o0
2240103897Sjake
2241103897Sjake	/*
2242103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2243103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2244103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2245103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2246103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2247103897Sjake	 * temporary stack for that.
2248103897Sjake	 */
2249103897Sjake	rd	%fprs, %l1
2250103897Sjake	or	%l1, FPRS_FEF, %l2
2251103897Sjake	wr	%l2, 0, %fprs
2252103897Sjake	dec	8, ASP_REG
2253103897Sjake	stx	%fsr, [ASP_REG]
2254103897Sjake	ldx	[ASP_REG], %l4
2255103897Sjake	inc	8, ASP_REG
2256103897Sjake	wr	%l1, 0, %fprs
2257103897Sjake
2258103897Sjake	rdpr	%tstate, %l5
2259103897Sjake	rdpr	%tpc, %l6
2260103897Sjake	rdpr	%tnpc, %l7
2261103897Sjake
2262103897Sjake	/*
2263103897Sjake	 * Setup %tnpc to return to.
2264103897Sjake	 */
2265103897Sjake	wrpr	%l0, 0, %tnpc
2266103897Sjake
2267103897Sjake	/*
2268103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2269103897Sjake	 */
2270103897Sjake	rdpr	%wstate, %l1
2271103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2272103897Sjake	wrpr	%l1, 0, %wstate
2273103897Sjake
2274103897Sjake	/*
2275103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2276103897Sjake	 * current window instead of the window at the time of the trap.
2277103897Sjake	 */
2278103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2279103897Sjake	rdpr	%cwp, %l2
2280103897Sjake	wrpr	%l1, %l2, %tstate
2281103897Sjake
2282103897Sjake	/*
2283103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2284103897Sjake	 */
2285103897Sjake	sub	%fp, CCFSZ, %sp
2286103897Sjake
2287103897Sjake	/*
2288103897Sjake	 * Execute the user trap handler.
2289103897Sjake	 */
2290103897Sjake	done
2291103897SjakeEND(tl0_utrap)
2292103897Sjake
2293103897Sjake/*
2294103897Sjake * (Real) User trap entry point.
2295103897Sjake *
229688644Sjake * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
229788644Sjake *		 u_int sfsr)
229882906Sjake *
229982906Sjake * The following setup has been performed:
230082906Sjake *	- the windows have been split and the active user window has been saved
230182906Sjake *	  (maybe just to the pcb)
230282906Sjake *	- we are on alternate globals and interrupts are disabled
230382906Sjake *
230489050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
230588644Sjake * globals, enable interrupts and call trap.
230682906Sjake *
230782906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
230882906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
230982906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
231087702Sjhb * of cpu migration and using the wrong pcpup.
231181380Sjake */
231282005SjakeENTRY(tl0_trap)
231382906Sjake	/*
231482906Sjake	 * Force kernel store order.
231582906Sjake	 */
231682906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
231780709Sjake
231881380Sjake	rdpr	%tstate, %l0
231988644Sjake	rdpr	%tpc, %l1
232088644Sjake	rdpr	%tnpc, %l2
232188644Sjake	rd	%y, %l3
232288644Sjake	rd	%fprs, %l4
232388644Sjake	rdpr	%wstate, %l5
232488644Sjake
232588644Sjake#if KTR_COMPILE & KTR_TRAP
232688644Sjake	CATR(KTR_TRAP,
232788644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
232888644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
232988644Sjake	ldx	[PCPU(CURTHREAD)], %g2
233088644Sjake	stx	%g2, [%g1 + KTR_PARM1]
233188644Sjake	stx	%o0, [%g1 + KTR_PARM2]
233288644Sjake	rdpr	%pil, %g2
233388644Sjake	stx	%g2, [%g1 + KTR_PARM3]
233488644Sjake	stx	%l1, [%g1 + KTR_PARM4]
233588644Sjake	stx	%l2, [%g1 + KTR_PARM5]
233688644Sjake	stx	%i6, [%g1 + KTR_PARM6]
233788644Sjake9:
233888644Sjake#endif
233988644Sjake
2340103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2341103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
234288644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
234388644Sjake	rdpr	%canrestore, %l6
234488644Sjake	wrpr	%l6, 0, %otherwin
234588644Sjake	wrpr	%g0, 0, %canrestore
234688644Sjake
234788644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
234888644Sjake
2349103784Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
235088644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
235188644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
235288644Sjake	stw	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
235388644Sjake
235481380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
235581380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
235681380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
235788644Sjake	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
235888644Sjake	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
235988644Sjake	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
236081380Sjake
236188644Sjake	wr	%g0, FPRS_FEF, %fprs
236288644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
236388644Sjake	wr	%g0, 0, %fprs
236482906Sjake
236589050Sjake	mov	PCB_REG, %l0
236689050Sjake	mov	PCPU_REG, %l1
236782906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
236882005Sjake
236982005Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
237082005Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
237182005Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
237282005Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
237382005Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
237482005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
237582005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
237682005Sjake
237789050Sjake	mov	%l0, PCB_REG
237889050Sjake	mov	%l1, PCPU_REG
237988644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
238084186Sjake
238184186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
238284186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
238384186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
238484186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
238584186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
238684186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
238784186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
238884186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
238984186Sjake
239084186Sjake	call	trap
239184186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
239284186Sjake	b,a	%xcc, tl0_ret
239384186Sjake	 nop
239484186SjakeEND(tl0_trap)
239584186Sjake
239688644Sjake/*
239791246Sjake * void tl0_syscall(u_int type)
239888644Sjake */
239984186SjakeENTRY(tl0_syscall)
240084186Sjake	/*
240184186Sjake	 * Force kernel store order.
240284186Sjake	 */
240384186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
240484186Sjake
240584186Sjake	rdpr	%tstate, %l0
240688644Sjake	rdpr	%tpc, %l1
240788644Sjake	rdpr	%tnpc, %l2
240888644Sjake	rd	%y, %l3
240988644Sjake	rd	%fprs, %l4
241088644Sjake	rdpr	%wstate, %l5
241188644Sjake
241288644Sjake#if KTR_COMPILE & KTR_SYSC
241388644Sjake	CATR(KTR_SYSC,
241488644Sjake	    "tl0_syscall: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
241588644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
241688644Sjake	ldx	[PCPU(CURTHREAD)], %g2
241788644Sjake	stx	%g2, [%g1 + KTR_PARM1]
241888644Sjake	stx	%o0, [%g1 + KTR_PARM2]
241988644Sjake	rdpr	%pil, %g2
242088644Sjake	stx	%g2, [%g1 + KTR_PARM3]
242188644Sjake	stx	%l1, [%g1 + KTR_PARM4]
242288644Sjake	stx	%l2, [%g1 + KTR_PARM5]
242388644Sjake	stx	%i6, [%g1 + KTR_PARM6]
242488644Sjake9:
242588644Sjake#endif
242688644Sjake
242788644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
242888644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
242988644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
243088644Sjake	rdpr	%canrestore, %l6
243188644Sjake	wrpr	%l6, 0, %otherwin
243288644Sjake	wrpr	%g0, 0, %canrestore
243388644Sjake
243488644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
243588644Sjake
243688644Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
243788644Sjake
243884186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
243984186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
244084186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
244188644Sjake	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
244288644Sjake	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
244388644Sjake	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
244484186Sjake
244588644Sjake	wr	%g0, FPRS_FEF, %fprs
244688644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
244788644Sjake	wr	%g0, 0, %fprs
244884186Sjake
244989050Sjake	mov	PCB_REG, %l0
245089050Sjake	mov	PCPU_REG, %l1
245184186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
245284186Sjake
245384186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
245484186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
245584186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
245684186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
245784186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
245884186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
245984186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
246084186Sjake
246189050Sjake	mov	%l0, PCB_REG
246289050Sjake	mov	%l1, PCPU_REG
246388644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
246482005Sjake
246581380Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
246681380Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
246781380Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
246881380Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
246981380Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
247081380Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
247182005Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
247281380Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
247381380Sjake
247484186Sjake	call	syscall
247581380Sjake	 add	%sp, CCFSZ + SPOFF, %o0
247684186Sjake	b,a	%xcc, tl0_ret
247784186Sjake	 nop
247884186SjakeEND(tl0_syscall)
247984186Sjake
248091246Sjake/*
248191246Sjake * void tl0_intr(u_int level, u_int mask)
248291246Sjake */
248384186SjakeENTRY(tl0_intr)
248484186Sjake	/*
248584186Sjake	 * Force kernel store order.
248684186Sjake	 */
248784186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
248884186Sjake
248984186Sjake	rdpr	%tstate, %l0
249088644Sjake	rdpr	%tpc, %l1
249188644Sjake	rdpr	%tnpc, %l2
249288644Sjake	rd	%y, %l3
249388644Sjake	rd	%fprs, %l4
249488644Sjake	rdpr	%wstate, %l5
249588644Sjake
249688644Sjake#if KTR_COMPILE & KTR_INTR
249788644Sjake	CATR(KTR_INTR,
249891246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
249988644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
250088644Sjake	ldx	[PCPU(CURTHREAD)], %g2
250188644Sjake	stx	%g2, [%g1 + KTR_PARM1]
250288644Sjake	stx	%o0, [%g1 + KTR_PARM2]
250388644Sjake	rdpr	%pil, %g2
250488644Sjake	stx	%g2, [%g1 + KTR_PARM3]
250588644Sjake	stx	%l1, [%g1 + KTR_PARM4]
250688644Sjake	stx	%l2, [%g1 + KTR_PARM5]
250788644Sjake	stx	%i6, [%g1 + KTR_PARM6]
250888644Sjake9:
250988644Sjake#endif
251088644Sjake
251191246Sjake	wrpr	%o0, 0, %pil
251291246Sjake	wr	%o1, 0, %asr21
251391246Sjake
251488644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
251588644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
251688644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
251788644Sjake	rdpr	%canrestore, %l6
251888644Sjake	wrpr	%l6, 0, %otherwin
251988644Sjake	wrpr	%g0, 0, %canrestore
252088644Sjake
252188644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
252288644Sjake
252384186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
252484186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
252584186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
252688644Sjake	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
252788644Sjake	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
252888644Sjake	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
252981380Sjake
253088644Sjake	wr	%g0, FPRS_FEF, %fprs
253188644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
253288644Sjake	wr	%g0, 0, %fprs
253384186Sjake
253491246Sjake	mov	%o0, %l3
253591246Sjake	mov	T_INTERRUPT, %o1
253689050Sjake
253791246Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
253891246Sjake	stw	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
253988644Sjake
254089050Sjake	mov	PCB_REG, %l0
254189050Sjake	mov	PCPU_REG, %l1
254284186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
254384186Sjake
254484186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
254584186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
254684186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
254784186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
254884186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
254984186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
255084186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
255184186Sjake
255289050Sjake	mov	%l0, PCB_REG
255389050Sjake	mov	%l1, PCPU_REG
255488644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
255584186Sjake
255684186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
255784186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
255884186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
255984186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
256084186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
256184186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
256284186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
256384186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
256484186Sjake
256589050Sjake	call	critical_enter
256689050Sjake	 nop
256789050Sjake
256886519Sjake	SET(cnt+V_INTR, %l1, %l0)
256988644Sjake	ATOMIC_INC_INT(%l0, %l1, %l2)
257084186Sjake
257186519Sjake	SET(intr_handlers, %l1, %l0)
257289050Sjake	sllx	%l3, IH_SHIFT, %l1
257388644Sjake	ldx	[%l0 + %l1], %l1
257489050Sjake	KASSERT(%l1, "tl0_intr: ih null")
257584186Sjake	call	%l1
257684186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
257789050Sjake
257889050Sjake	call	critical_exit
257989050Sjake	 nop
258089050Sjake
258184186Sjake	b,a	%xcc, tl0_ret
258284186Sjake	 nop
258384186SjakeEND(tl0_intr)
258484186Sjake
258582005SjakeENTRY(tl0_ret)
258693389Sjake	/*
258793389Sjake	 * Check for pending asts atomically with returning.  We must raise
258893389Sjake	 * the pil before checking, and if no asts are found the pil must
258993389Sjake	 * remain raised until the retry is executed, or we risk missing asts
259093389Sjake	 * caused by interrupts occuring after the test.  If the pil is lowered,
259193389Sjake	 * as it is when we call ast, the check must be re-executed.
259293389Sjake	 */
2593103784Sjake	wrpr	%g0, PIL_TICK, %pil
259484186Sjake	ldx	[PCPU(CURTHREAD)], %l0
259584186Sjake	ldx	[%l0 + TD_KSE], %l1
259684186Sjake	lduw	[%l1 + KE_FLAGS], %l2
259784186Sjake	and	%l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
2598103784Sjake	brz,a,pt %l2, 1f
259982906Sjake	 nop
260093389Sjake	wrpr	%g0, 0, %pil
260182906Sjake	call	ast
260282906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2603103784Sjake	ba,a	%xcc, tl0_ret
260493389Sjake	 nop
260582906Sjake
260693389Sjake	/*
260793389Sjake	 * Check for windows that were spilled to the pcb and need to be
260893389Sjake	 * copied out.  This must be the last thing that is done before the
260993389Sjake	 * return to usermode.  If there are still user windows in the cpu
261093389Sjake	 * and we call a nested function after this, which causes them to be
261193389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
261293389Sjake	 * be inconsistent.
261393389Sjake	 */
2614103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2615103784Sjake	brz,a,pt %l1, 2f
2616103784Sjake	 nop
2617103784Sjake	wrpr	%g0, 0, %pil
261893389Sjake	mov	T_SPILL, %o0
2619103784Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2620103784Sjake	call	trap
2621103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2622103784Sjake	ba,a	%xcc, tl0_ret
2623103784Sjake	 nop
262482906Sjake
2625103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
262682906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
262782906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
262882906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
262982906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
263082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
263182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
263282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
263381380Sjake
263488644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
263585243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
263685243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
263788644Sjake	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l3
263888644Sjake	ldub	[%sp + SPOFF + CCFSZ + TF_FPRS], %l4
263988644Sjake	ldub	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l5
264082906Sjake
264189050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
264289050Sjake
264389050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
264489050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
264589050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
264689050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
264789050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
264889050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
264989050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
265089050Sjake
265182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
265282906Sjake
265388644Sjake	wrpr	%g0, 0, %pil
265488644Sjake	wrpr	%l1, 0, %tpc
265588644Sjake	wrpr	%l2, 0, %tnpc
265688644Sjake	wr	%l3, 0, %y
265782906Sjake
265888644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
265988644Sjake	mov	%l4, %g2
266082906Sjake
266188644Sjake	srlx	%l5, WSTATE_OTHER_SHIFT, %g3
266288644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
266388644Sjake	rdpr	%otherwin, %o0
266488644Sjake	wrpr	%o0, 0, %canrestore
266582906Sjake	wrpr	%g0, 0, %otherwin
266688644Sjake	wrpr	%o0, 0, %cleanwin
266781380Sjake
266882005Sjake	/*
266982906Sjake	 * If this instruction causes a fill trap which fails to fill a window
267082906Sjake	 * from the user stack, we will resume at tl0_ret_fill_end and call
267182906Sjake	 * back into the kernel.
267282005Sjake	 */
267382906Sjake	restore
267482906Sjaketl0_ret_fill:
267581380Sjake
267688644Sjake	rdpr	%cwp, %g4
267788644Sjake	wrpr	%g1, %g4, %tstate
267888644Sjake	wr	%g2, 0, %fprs
267988644Sjake	wrpr	%g3, 0, %wstate
268085243Sjake
268184186Sjake#if KTR_COMPILE & KTR_TRAP
268288644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
268382906Sjake	    , %g2, %g3, %g4, 7, 8, 9)
268483366Sjulian	ldx	[PCPU(CURTHREAD)], %g3
268582906Sjake	stx	%g3, [%g2 + KTR_PARM1]
268685243Sjake	rdpr	%pil, %g3
268785243Sjake	stx	%g3, [%g2 + KTR_PARM2]
268888644Sjake	rdpr	%tpc, %g3
268984186Sjake	stx	%g3, [%g2 + KTR_PARM3]
269088644Sjake	rdpr	%tnpc, %g3
269184186Sjake	stx	%g3, [%g2 + KTR_PARM4]
269284186Sjake	stx	%sp, [%g2 + KTR_PARM5]
269382906Sjake9:
269482906Sjake#endif
269581380Sjake
269682906Sjake	retry
269782906Sjaketl0_ret_fill_end:
269882005Sjake
269984186Sjake#if KTR_COMPILE & KTR_TRAP
270088785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
270182906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
270288785Sjake	rdpr	%pstate, %l1
270388785Sjake	stx	%l1, [%l0 + KTR_PARM1]
270488785Sjake	stx	%l5, [%l0 + KTR_PARM2]
270588785Sjake	stx	%sp, [%l0 + KTR_PARM3]
270682906Sjake9:
270782906Sjake#endif
270882906Sjake
270982906Sjake	/*
271084186Sjake	 * The fill failed and magic has been performed.  Call trap again,
271182906Sjake	 * which will copyin the window on the user's behalf.
271282906Sjake	 */
271388644Sjake	wrpr	%l5, 0, %wstate
271488785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
271589050Sjake	mov	PCB_REG, %o0
271689050Sjake	mov	PCPU_REG, %o1
271788785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
271889050Sjake	mov	%o0, PCB_REG
271989050Sjake	mov	%o1, PCPU_REG
272088644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2721103784Sjake	mov	T_FILL_RET, %o0
2722103784Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2723103784Sjake	call	trap
2724103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2725103784Sjake	ba,a	%xcc, tl0_ret
2726103784Sjake	 nop
272782005SjakeEND(tl0_ret)
272881380Sjake
272980709Sjake/*
273082906Sjake * Kernel trap entry point
273182906Sjake *
273291246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
273388644Sjake *		 u_int sfsr)
273482906Sjake *
273582906Sjake * This is easy because the stack is already setup and the windows don't need
273682906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
273782906Sjake * the outs don't need to be saved.
273880709Sjake */
273980709SjakeENTRY(tl1_trap)
274080709Sjake	sub	%sp, TF_SIZEOF, %sp
274182906Sjake
274280709Sjake	rdpr	%tstate, %l0
274380709Sjake	rdpr	%tpc, %l1
274480709Sjake	rdpr	%tnpc, %l2
274591246Sjake	rdpr	%pil, %l3
274691316Sjake	rd	%y, %l4
274791316Sjake	rdpr	%wstate, %l5
274880709Sjake
274984186Sjake#if KTR_COMPILE & KTR_TRAP
275088644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
275188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
275288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
275388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
275497265Sjake	stx	%o0, [%g1 + KTR_PARM2]
275591246Sjake	stx	%l3, [%g1 + KTR_PARM3]
275688644Sjake	stx	%l1, [%g1 + KTR_PARM4]
275788644Sjake	stx	%i6, [%g1 + KTR_PARM5]
275882906Sjake9:
275982906Sjake#endif
276082906Sjake
276180709Sjake	wrpr	%g0, 1, %tl
276288644Sjake
276391316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
276491316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
276591246Sjake
276688644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
276788644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
276888644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
276991246Sjake	stb	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
277091316Sjake	stw	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
277188644Sjake
277288644Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
277388644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
277488644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
277588644Sjake	stw	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
277688644Sjake
277788644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
277888644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
277988644Sjake
278091158Sjake	mov	PCB_REG, %l4
278191158Sjake	mov	PCPU_REG, %l5
278291158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
278391158Sjake
278480709Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
278580709Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
278680709Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
278780709Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
278880709Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
278980709Sjake
279091158Sjake	mov	%l4, PCB_REG
279191158Sjake	mov	%l5, PCPU_REG
279291158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
279391158Sjake
279480709Sjake	call	trap
279580709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
279680709Sjake
279788644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
279888644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
279988644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
280088644Sjake	ldub	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
280191316Sjake	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l4
280288644Sjake
280380709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
280480709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
280580709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
280680709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
280780709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
280880709Sjake
280982906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
281080709Sjake
281188644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
281286519Sjake	mov	%l1, %g2
281386519Sjake	mov	%l2, %g3
281481380Sjake
281588644Sjake	wrpr	%l3, 0, %pil
281691316Sjake	wr	%l4, 0, %y
281786519Sjake
281886519Sjake	restore
281986519Sjake
282080709Sjake	wrpr	%g0, 2, %tl
282180709Sjake
282288644Sjake	rdpr	%cwp, %g4
282388644Sjake	wrpr	%g1, %g4, %tstate
282486519Sjake	wrpr	%g2, 0, %tpc
282586519Sjake	wrpr	%g3, 0, %tnpc
282686519Sjake
282784186Sjake#if KTR_COMPILE & KTR_TRAP
282886519Sjake	CATR(KTR_TRAP, "tl1_trap: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
282986519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
283086519Sjake	ldx	[PCPU(CURTHREAD)], %g3
283186519Sjake	stx	%g3, [%g2 + KTR_PARM1]
283286519Sjake	rdpr	%pil, %g3
283386519Sjake	stx	%g3, [%g2 + KTR_PARM2]
283486519Sjake	rdpr	%tstate, %g3
283586519Sjake	stx	%g3, [%g2 + KTR_PARM3]
283686519Sjake	rdpr	%tpc, %g3
283786519Sjake	stx	%g3, [%g2 + KTR_PARM4]
283886519Sjake	stx	%sp, [%g2 + KTR_PARM5]
283982906Sjake9:
284082906Sjake#endif
284182906Sjake
284280709Sjake	retry
284380709SjakeEND(tl1_trap)
284480709Sjake
284591246Sjake/*
284691246Sjake * void tl1_intr(u_int level, u_int mask)
284791246Sjake */
284884186SjakeENTRY(tl1_intr)
284984186Sjake	sub	%sp, TF_SIZEOF, %sp
285084186Sjake
285184186Sjake	rdpr	%tstate, %l0
285284186Sjake	rdpr	%tpc, %l1
285384186Sjake	rdpr	%tnpc, %l2
285491246Sjake	rdpr	%pil, %l3
285591316Sjake	rd	%y, %l4
285691316Sjake	rdpr	%wstate, %l5
285784186Sjake
285884186Sjake#if KTR_COMPILE & KTR_INTR
285989050Sjake	CATR(KTR_INTR,
286091246Sjake	    "tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
286188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
286288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
286388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
286491246Sjake	stx	%o0, [%g1 + KTR_PARM2]
286591246Sjake	stx	%l3, [%g1 + KTR_PARM3]
286691246Sjake	stx	%l1, [%g1 + KTR_PARM4]
286791246Sjake	stx	%i6, [%g1 + KTR_PARM5]
286884186Sjake9:
286984186Sjake#endif
287084186Sjake
287191246Sjake	wrpr	%o0, 0, %pil
287291246Sjake	wr	%o1, 0, %asr21
287391246Sjake
287484186Sjake	wrpr	%g0, 1, %tl
287588644Sjake
287691316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
287791316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
287891246Sjake
287988644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
288088644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
288188644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
288291246Sjake	stb	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
288391316Sjake	stw	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
288488644Sjake
288591246Sjake	mov	%o0, %l7
288691246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
288789050Sjake
288891246Sjake	stw	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
288991246Sjake	stw	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
289088644Sjake
289188644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
289288644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
289388644Sjake
289491158Sjake	mov	PCB_REG, %l4
289591158Sjake	mov	PCPU_REG, %l5
289691158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
289791158Sjake
289884186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
289984186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
290084186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
290184186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
290284186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
290384186Sjake
290491158Sjake	mov	%l4, PCB_REG
290591158Sjake	mov	%l5, PCPU_REG
290691158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
290791158Sjake
290889050Sjake	call	critical_enter
290989050Sjake	 nop
291084186Sjake
291188644Sjake	SET(cnt+V_INTR, %l5, %l4)
291288644Sjake	ATOMIC_INC_INT(%l4, %l5, %l6)
291388644Sjake
291488644Sjake	SET(intr_handlers, %l5, %l4)
291589050Sjake	sllx	%l7, IH_SHIFT, %l5
291688644Sjake	ldx	[%l4 + %l5], %l5
291789050Sjake	KASSERT(%l5, "tl1_intr: ih null")
291888644Sjake	call	%l5
291984186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
292084186Sjake
292189050Sjake	call	critical_exit
292289050Sjake	 nop
292389050Sjake
292491316Sjake	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l4
292591316Sjake
292684186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
292784186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
292884186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
292984186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
293084186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
293184186Sjake
293284186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
293384186Sjake
293488644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
293586519Sjake	mov	%l1, %g2
293686519Sjake	mov	%l2, %g3
293788644Sjake	wrpr	%l3, 0, %pil
293891316Sjake	wr	%l4, 0, %y
293984186Sjake
294086519Sjake	restore
294186519Sjake
294284186Sjake	wrpr	%g0, 2, %tl
294384186Sjake
294488644Sjake	rdpr	%cwp, %g4
294588644Sjake	wrpr	%g1, %g4, %tstate
294686519Sjake	wrpr	%g2, 0, %tpc
294786519Sjake	wrpr	%g3, 0, %tnpc
294886519Sjake
294988644Sjake#if KTR_COMPILE & KTR_INTR
295088644Sjake	CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
295186519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
295286519Sjake	ldx	[PCPU(CURTHREAD)], %g3
295386519Sjake	stx	%g3, [%g2 + KTR_PARM1]
295486519Sjake	rdpr	%pil, %g3
295586519Sjake	stx	%g3, [%g2 + KTR_PARM2]
295686519Sjake	rdpr	%tstate, %g3
295786519Sjake	stx	%g3, [%g2 + KTR_PARM3]
295886519Sjake	rdpr	%tpc, %g3
295986519Sjake	stx	%g3, [%g2 + KTR_PARM4]
296086519Sjake	stx	%sp, [%g2 + KTR_PARM5]
296184186Sjake9:
296284186Sjake#endif
296384186Sjake
296484186Sjake	retry
296584186SjakeEND(tl1_intr)
296684186Sjake
296782906Sjake/*
296882906Sjake * Freshly forked processes come here when switched to for the first time.
296982906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
297082906Sjake * them to the outs.
297182906Sjake */
297280709SjakeENTRY(fork_trampoline)
297384186Sjake#if KTR_COMPILE & KTR_PROC
297484186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
297582906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
297683366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
297782906Sjake	stx	%g2, [%g1 + KTR_PARM1]
297884186Sjake	ldx	[%g2 + TD_PROC], %g2
297982906Sjake	add	%g2, P_COMM, %g2
298082906Sjake	stx	%g2, [%g1 + KTR_PARM2]
298182906Sjake	rdpr	%cwp, %g2
298282906Sjake	stx	%g2, [%g1 + KTR_PARM3]
298382906Sjake9:
298482906Sjake#endif
298580709Sjake	mov	%l0, %o0
298680709Sjake	mov	%l1, %o1
298780709Sjake	call	fork_exit
298888644Sjake	 mov	%l2, %o2
298982005Sjake	b,a	%xcc, tl0_ret
299084186Sjake	 nop
299180709SjakeEND(fork_trampoline)
2992