exception.S revision 112924
180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
2881180Sjake *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake *
5580709Sjake * $FreeBSD: head/sys/sparc64/sparc64/exception.S 112924 2003-04-01 04:58:50Z jake $
5680709Sjake */
5780709Sjake
58106050Sjake#include "opt_compat.h"
5980709Sjake#include "opt_ddb.h"
6080709Sjake
6180709Sjake#include <machine/asi.h>
6280709Sjake#include <machine/asmacros.h>
6382906Sjake#include <machine/ktr.h>
6482906Sjake#include <machine/pstate.h>
6580709Sjake#include <machine/trap.h>
6682906Sjake#include <machine/tstate.h>
6782906Sjake#include <machine/wstate.h>
6880709Sjake
6980709Sjake#include "assym.s"
7080709Sjake
71101653Sjake#define	TSB_KERNEL_MASK	0x0
72101653Sjake#define	TSB_KERNEL	0x0
73101653Sjake
7488644Sjake	.register %g2,#ignore
7588644Sjake	.register %g3,#ignore
7688644Sjake	.register %g6,#ignore
7788644Sjake	.register %g7,#ignore
7888644Sjake
7982005Sjake/*
8088644Sjake * Atomically set the reference bit in a tte.
8188644Sjake */
8288644Sjake#define	TTE_SET_BIT(r1, r2, r3, bit) \
8388644Sjake	add	r1, TTE_DATA, r1 ; \
8488644Sjake	ldx	[r1], r2 ; \
8588644Sjake9:	or	r2, bit, r3 ; \
8688644Sjake	casxa	[r1] ASI_N, r2, r3 ; \
8788644Sjake	cmp	r2, r3 ; \
8888644Sjake	bne,pn	%xcc, 9b ; \
8988644Sjake	 mov	r3, r2
9088644Sjake
9188644Sjake#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
9288644Sjake#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
9388644Sjake
9488644Sjake/*
9582906Sjake * Macros for spilling and filling live windows.
9682906Sjake *
9782906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
9882906Sjake * handler will not use more than 24 instructions total, to leave room for
9982906Sjake * resume vectors which occupy the last 8 instructions.
10082005Sjake */
10180709Sjake
10282906Sjake#define	SPILL(storer, base, size, asi) \
10382906Sjake	storer	%l0, [base + (0 * size)] asi ; \
10482906Sjake	storer	%l1, [base + (1 * size)] asi ; \
10582906Sjake	storer	%l2, [base + (2 * size)] asi ; \
10682906Sjake	storer	%l3, [base + (3 * size)] asi ; \
10782906Sjake	storer	%l4, [base + (4 * size)] asi ; \
10882906Sjake	storer	%l5, [base + (5 * size)] asi ; \
10982906Sjake	storer	%l6, [base + (6 * size)] asi ; \
11082906Sjake	storer	%l7, [base + (7 * size)] asi ; \
11182906Sjake	storer	%i0, [base + (8 * size)] asi ; \
11282906Sjake	storer	%i1, [base + (9 * size)] asi ; \
11382906Sjake	storer	%i2, [base + (10 * size)] asi ; \
11482906Sjake	storer	%i3, [base + (11 * size)] asi ; \
11582906Sjake	storer	%i4, [base + (12 * size)] asi ; \
11682906Sjake	storer	%i5, [base + (13 * size)] asi ; \
11782906Sjake	storer	%i6, [base + (14 * size)] asi ; \
11882906Sjake	storer	%i7, [base + (15 * size)] asi
11980709Sjake
12082906Sjake#define	FILL(loader, base, size, asi) \
12182906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
12282906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
12382906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
12482906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
12582906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
12682906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
12782906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
12882906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
12982906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
13082906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
13182906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
13282906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
13382906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
13482906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
13582906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
13682906Sjake	loader	[base + (15 * size)] asi, %i7
13782005Sjake
13882906Sjake#define	ERRATUM50(reg)	mov reg, reg
13982906Sjake
14088781Sjake#define	KSTACK_SLOP	1024
14188781Sjake
14289048Sjake/*
14389048Sjake * Sanity check the kernel stack and bail out if its wrong.
14489048Sjake * XXX: doesn't handle being on the panic stack.
14589048Sjake */
14688781Sjake#define	KSTACK_CHECK \
14788781Sjake	dec	16, ASP_REG ; \
14888781Sjake	stx	%g1, [ASP_REG + 0] ; \
14988781Sjake	stx	%g2, [ASP_REG + 8] ; \
15088781Sjake	add	%sp, SPOFF, %g1 ; \
15188781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
15288781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
15388781Sjake	 inc	16, ASP_REG ; \
15488781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
15588781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
15688781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
15788781Sjake	subcc	%g1, %g2, %g1 ; \
15888781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
15988781Sjake	 inc	16, ASP_REG ; \
16088781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
16188781Sjake	cmp	%g1, %g2 ; \
16288781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
16388781Sjake	 inc	16, ASP_REG ; \
16488781Sjake	ldx	[ASP_REG + 8], %g2 ; \
16588781Sjake	ldx	[ASP_REG + 0], %g1 ; \
16688781Sjake	inc	16, ASP_REG
16788781Sjake
16888781SjakeENTRY(tl1_kstack_fault)
16988781Sjake	rdpr	%tl, %g1
17097263Sjake1:	cmp	%g1, 2
17197263Sjake	be,a	2f
17288781Sjake	 nop
17388781Sjake
17488781Sjake#if KTR_COMPILE & KTR_TRAP
17588781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
17697263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
17797263Sjake	rdpr	%tl, %g3
17897263Sjake	stx	%g3, [%g2 + KTR_PARM1]
17997263Sjake	rdpr	%tpc, %g3
18097263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18197263Sjake	rdpr	%tnpc, %g3
18297263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18388781Sjake9:
18488781Sjake#endif
18588781Sjake
18697263Sjake	sub	%g1, 1, %g1
18797263Sjake	wrpr	%g1, 0, %tl
18897263Sjake	ba,a	%xcc, 1b
18997263Sjake	 nop
19097263Sjake
19188781Sjake2:
19288781Sjake#if KTR_COMPILE & KTR_TRAP
19388781Sjake	CATR(KTR_TRAP,
19488781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
19588781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
19688781Sjake	add	%sp, SPOFF, %g2
19788781Sjake	stx	%g2, [%g1 + KTR_PARM1]
19888781Sjake	ldx	[PCPU(CURTHREAD)], %g2
19988781Sjake	ldx	[%g2 + TD_KSTACK], %g2
20088781Sjake	stx	%g2, [%g1 + KTR_PARM2]
20188781Sjake	rdpr	%canrestore, %g2
20288781Sjake	stx	%g2, [%g1 + KTR_PARM3]
20388781Sjake	rdpr	%cansave, %g2
20488781Sjake	stx	%g2, [%g1 + KTR_PARM4]
20588781Sjake	rdpr	%otherwin, %g2
20688781Sjake	stx	%g2, [%g1 + KTR_PARM5]
20788781Sjake	rdpr	%wstate, %g2
20888781Sjake	stx	%g2, [%g1 + KTR_PARM6]
20988781Sjake9:
21088781Sjake#endif
21188781Sjake
21288781Sjake	wrpr	%g0, 0, %canrestore
21388781Sjake	wrpr	%g0, 6, %cansave
21488781Sjake	wrpr	%g0, 0, %otherwin
21588781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
21688781Sjake
21789048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
21888781Sjake	clr	%fp
21988781Sjake
220103921Sjake	set	trap, %o2
22188781Sjake	b	%xcc, tl1_trap
22288781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
22388781SjakeEND(tl1_kstack_fault)
22488781Sjake
22582906Sjake/*
22682906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
22782906Sjake * mmu fault during a spill or a fill, this macro will detect the fault and
22888644Sjake * resume at a set instruction offset in the trap handler.
22982906Sjake *
23088644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
23188644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
23282906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
23382906Sjake * tl bit allows us to detect both ranges with one test.
23482906Sjake *
23582906Sjake * This is:
23688644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
23782906Sjake *
23882906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
23982906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
24082906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
24182906Sjake *
24282906Sjake *	0x7f ^ 0x1f == 0x60
24382906Sjake *	0x1f == (0x80 - 0x60) - 1
24482906Sjake *
24586519Sjake * Which are the offset and xor value used to resume from alignment faults.
24682906Sjake */
24782906Sjake
24882906Sjake/*
24988644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
25088644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
25188644Sjake * alternate globals.
25282906Sjake */
25388644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
25488644Sjake	dec	16, ASP_REG ; \
25588644Sjake	stx	%g1, [ASP_REG + 0] ; \
25688644Sjake	stx	%g2, [ASP_REG + 8] ; \
25788644Sjake	rdpr	%tpc, %g1 ; \
25888644Sjake	ERRATUM50(%g1) ; \
25988644Sjake	rdpr	%tba, %g2 ; \
26088644Sjake	sub	%g1, %g2, %g2 ; \
26188644Sjake	srlx	%g2, 5, %g2 ; \
26288644Sjake	andn	%g2, 0x200, %g2 ; \
26388644Sjake	cmp	%g2, 0x80 ; \
26488644Sjake	blu,pt	%xcc, 9f ; \
26588644Sjake	 cmp	%g2, 0x100 ; \
26688644Sjake	bgeu,pt	%xcc, 9f ; \
26788644Sjake	 or	%g1, 0x7f, %g1 ; \
26888644Sjake	wrpr	%g1, xor, %tnpc ; \
26988644Sjake	stxa_g0_sfsr ; \
27088644Sjake	ldx	[ASP_REG + 8], %g2 ; \
27188644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27288644Sjake	inc	16, ASP_REG ; \
27388644Sjake	done ; \
27488644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
27588644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27688644Sjake	inc	16, ASP_REG
27782906Sjake
27888644Sjake/*
27988644Sjake * For certain faults we need to clear the sfsr mmu register before returning.
28088644Sjake */
28188644Sjake#define	RSF_CLR_SFSR \
28288644Sjake	wr	%g0, ASI_DMMU, %asi ; \
28388644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
28488644Sjake
28582906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
28682906Sjake
28782906Sjake/*
28882906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
28982906Sjake * nested traps, and corresponding xor constants for wrpr.
29082906Sjake */
29186519Sjake#define	RSF_OFF_ALIGN	0x60
29286519Sjake#define	RSF_OFF_MMU	0x70
29382906Sjake
29488644Sjake#define	RESUME_SPILLFILL_ALIGN \
29588644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
29688644Sjake#define	RESUME_SPILLFILL_MMU \
29788644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
29888644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
29988644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
30082906Sjake
30182906Sjake/*
30282906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
30388644Sjake * user mode.
30482906Sjake */
30582906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
30682906Sjake
30782906Sjake/*
30882906Sjake * Retry a spill or fill with a different wstate due to an alignment fault.
30982906Sjake * We may just be using the wrong stack offset.
31082906Sjake */
31182906Sjake#define	RSF_ALIGN_RETRY(ws) \
31282906Sjake	wrpr	%g0, (ws), %wstate ; \
31382906Sjake	retry ; \
31482906Sjake	.align	16
31582906Sjake
31682906Sjake/*
31782906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
31882906Sjake */
31982906Sjake#define	RSF_TRAP(type) \
32082906Sjake	b	%xcc, tl0_sftrap ; \
32182906Sjake	 mov	type, %g2 ; \
32282906Sjake	.align	16
32382906Sjake
32482906Sjake/*
32582906Sjake * Game over if the window operation fails.
32682906Sjake */
32782906Sjake#define	RSF_FATAL(type) \
32888781Sjake	b	%xcc, rsf_fatal ; \
32988781Sjake	 mov	type, %g2 ; \
33082906Sjake	.align	16
33182906Sjake
33282906Sjake/*
33382906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
33482906Sjake * restore.  This is used on return from the kernel to usermode.
33582906Sjake */
33682906Sjake#define	RSF_FILL_MAGIC \
33782906Sjake	rdpr	%tnpc, %g1 ; \
33882906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
33982906Sjake	wrpr	%g1, 0, %tnpc ; \
34082906Sjake	done ; \
34182906Sjake	.align	16
34282906Sjake
34382906Sjake/*
34482906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
34582906Sjake */
34682906Sjake#define	RSF_SPILL_TOPCB \
34782906Sjake	b,a	%xcc, tl1_spill_topcb ; \
34882906Sjake	 nop ; \
34982906Sjake	.align	16
35082906Sjake
35188781SjakeENTRY(rsf_fatal)
35288781Sjake#if KTR_COMPILE & KTR_TRAP
35388781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
35488781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
35588781Sjake	rdpr	%tt, %g3
35688781Sjake	stx	%g3, [%g1 + KTR_PARM1]
35788781Sjake	stx	%g2, [%g1 + KTR_PARM2]
35888781Sjake9:
35988781Sjake#endif
36088781Sjake
36188781Sjake	KSTACK_CHECK
36288781Sjake
36388781Sjake	sir
36488781SjakeEND(rsf_fatal)
36588781Sjake
36697265Sjake	.comm	intrnames, IV_MAX * 8
36785243Sjake	.comm	eintrnames, 0
36880709Sjake
36997265Sjake	.comm	intrcnt, IV_MAX * 8
37085243Sjake	.comm	eintrcnt, 0
37180709Sjake
37282906Sjake/*
37382906Sjake * Trap table and associated macros
37482906Sjake *
37582906Sjake * Due to its size a trap table is an inherently hard thing to represent in
37682906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
37782906Sjake * instructions each, many of which are identical.  The way that this is
37882906Sjake * layed out is the instructions (8 or 32) for the actual trap vector appear
37982906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
38082906Sjake * but if not supporting code can be placed just after the definition of the
38182906Sjake * macro.  The macros are then instantiated in a different section (.trap),
38282906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
38382906Sjake * code around the macros is moved to the end of trap table.  In this way the
38482906Sjake * code that must be sequential in memory can be split up, and located near
38582906Sjake * its supporting code so that it is easier to follow.
38682906Sjake */
38782906Sjake
38882906Sjake	/*
38982906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
39082906Sjake	 * is not leaked between address spaces in registers.
39182906Sjake	 */
39280709Sjake	.macro	clean_window
39380709Sjake	clr	%o0
39480709Sjake	clr	%o1
39580709Sjake	clr	%o2
39680709Sjake	clr	%o3
39780709Sjake	clr	%o4
39880709Sjake	clr	%o5
39980709Sjake	clr	%o6
40080709Sjake	clr	%o7
40180709Sjake	clr	%l0
40280709Sjake	clr	%l1
40380709Sjake	clr	%l2
40480709Sjake	clr	%l3
40580709Sjake	clr	%l4
40680709Sjake	clr	%l5
40780709Sjake	clr	%l6
40880709Sjake	rdpr	%cleanwin, %l7
40980709Sjake	inc	%l7
41080709Sjake	wrpr	%l7, 0, %cleanwin
41180709Sjake	clr	%l7
41280709Sjake	retry
41380709Sjake	.align	128
41480709Sjake	.endm
41580709Sjake
41681380Sjake	/*
41782906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
41882906Sjake	 * user stack, and with its live registers, so we must save soon.  We
41982906Sjake	 * are on alternate globals so we do have some registers.  Set the
42088644Sjake	 * transitional window state, and do the save.  If this traps we
42188644Sjake	 * we attempt to spill a window to the user stack.  If this fails,
42288644Sjake	 * we spill the window to the pcb and continue.  Spilling to the pcb
42388644Sjake	 * must not fail.
42482906Sjake	 *
42582906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
42681380Sjake	 */
42782906Sjake
42888644Sjake	.macro	tl0_split
42982906Sjake	rdpr	%wstate, %g1
43082906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
43181380Sjake	save
43281380Sjake	.endm
43381380Sjake
43482906Sjake	.macro	tl0_setup	type
43588644Sjake	tl0_split
436108374Sjake	clr	%o1
437103921Sjake	set	trap, %o2
438103897Sjake	ba	%xcc, tl0_utrap
43982906Sjake	 mov	\type, %o0
44081380Sjake	.endm
44181380Sjake
44281380Sjake	/*
44382906Sjake	 * Generic trap type.  Call trap() with the specified type.
44481380Sjake	 */
44580709Sjake	.macro	tl0_gen		type
44682906Sjake	tl0_setup \type
44780709Sjake	.align	32
44880709Sjake	.endm
44980709Sjake
45082906Sjake	/*
45182906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
45282906Sjake	 * Generates count "reserved" trap vectors.
45382906Sjake	 */
45480709Sjake	.macro	tl0_reserved	count
45580709Sjake	.rept	\count
45680709Sjake	tl0_gen	T_RESERVED
45780709Sjake	.endr
45880709Sjake	.endm
45980709Sjake
460109810Sjake	.macro	tl1_split
461109810Sjake	rdpr	%wstate, %g1
462109810Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
463109810Sjake	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
464109810Sjake	.endm
465109810Sjake
466109810Sjake	.macro	tl1_setup	type
467109810Sjake	tl1_split
468109810Sjake	clr	%o1
469109810Sjake	set	trap, %o2
470109810Sjake	b	%xcc, tl1_trap
471109810Sjake	 mov	\type | T_KERNEL, %o0
472109810Sjake	.endm
473109810Sjake
474109810Sjake	.macro	tl1_gen		type
475109810Sjake	tl1_setup \type
476109810Sjake	.align	32
477109810Sjake	.endm
478109810Sjake
479109810Sjake	.macro	tl1_reserved	count
480109810Sjake	.rept	\count
481109810Sjake	tl1_gen	T_RESERVED
482109810Sjake	.endr
483109810Sjake	.endm
484109810Sjake
48588644Sjake	.macro	tl0_insn_excptn
486101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
48788644Sjake	wr	%g0, ASI_IMMU, %asi
48888644Sjake	rdpr	%tpc, %g3
48988644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
49088644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
49188644Sjake	membar	#Sync
49288644Sjake	b	%xcc, tl0_sfsr_trap
49388644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
49488644Sjake	.align	32
49588644Sjake	.endm
49688644Sjake
49782906Sjake	.macro	tl0_data_excptn
498101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
49982906Sjake	wr	%g0, ASI_DMMU, %asi
50082906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
50182906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
50288644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
50388644Sjake	membar	#Sync
50482906Sjake	b	%xcc, tl0_sfsr_trap
50588644Sjake	 mov	T_DATA_EXCEPTION, %g2
50682906Sjake	.align	32
50782906Sjake	.endm
50882906Sjake
50982005Sjake	.macro	tl0_align
51082906Sjake	wr	%g0, ASI_DMMU, %asi
51182906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
51282906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
51388644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
51488644Sjake	membar	#Sync
51582005Sjake	b	%xcc, tl0_sfsr_trap
51688644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
51782005Sjake	.align	32
51882005Sjake	.endm
51982005Sjake
52082005SjakeENTRY(tl0_sfsr_trap)
52188644Sjake	tl0_split
522108374Sjake	clr	%o1
523103921Sjake	set	trap, %o2
52488644Sjake	mov	%g3, %o4
52588644Sjake	mov	%g4, %o5
526103897Sjake	ba	%xcc, tl0_utrap
52782906Sjake	 mov	%g2, %o0
52882005SjakeEND(tl0_sfsr_trap)
52982005Sjake
53082906Sjake	.macro	tl0_intr level, mask
53188644Sjake	tl0_split
53291246Sjake	set	\mask, %o1
53384186Sjake	b	%xcc, tl0_intr
53491246Sjake	 mov	\level, %o0
53581380Sjake	.align	32
53681380Sjake	.endm
53781380Sjake
53881380Sjake#define	INTR(level, traplvl)						\
53982906Sjake	tl ## traplvl ## _intr	level, 1 << level
54081380Sjake
54181380Sjake#define	TICK(traplvl) \
54282906Sjake	tl ## traplvl ## _intr	PIL_TICK, 1
54381380Sjake
54481380Sjake#define	INTR_LEVEL(tl)							\
54581380Sjake	INTR(1, tl) ;							\
54681380Sjake	INTR(2, tl) ;							\
54781380Sjake	INTR(3, tl) ;							\
54881380Sjake	INTR(4, tl) ;							\
54981380Sjake	INTR(5, tl) ;							\
55081380Sjake	INTR(6, tl) ;							\
55181380Sjake	INTR(7, tl) ;							\
55281380Sjake	INTR(8, tl) ;							\
55381380Sjake	INTR(9, tl) ;							\
55481380Sjake	INTR(10, tl) ;							\
55581380Sjake	INTR(11, tl) ;							\
55681380Sjake	INTR(12, tl) ;							\
55781380Sjake	INTR(13, tl) ;							\
55881380Sjake	TICK(tl) ;							\
55981380Sjake	INTR(15, tl) ;
56081380Sjake
56180709Sjake	.macro	tl0_intr_level
56281380Sjake	INTR_LEVEL(0)
56380709Sjake	.endm
56480709Sjake
56597265Sjake	.macro	intr_vector
56697265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
56797265Sjake	andcc	%g1, IRSR_BUSY, %g0
568104075Sjake	bnz,a,pt %xcc, intr_vector
56997265Sjake	 nop
57097265Sjake	sir
57181380Sjake	.align	32
57280709Sjake	.endm
57380709Sjake
574109860Sjake	.macro	tl0_immu_miss
57581380Sjake	/*
576109860Sjake	 * Load the virtual page number and context from the tag access
577109860Sjake	 * register.  We ignore the context.
578109860Sjake	 */
579109860Sjake	wr	%g0, ASI_IMMU, %asi
580109860Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
581109860Sjake
582109860Sjake	/*
583102040Sjake	 * Initialize the page size walker.
584102040Sjake	 */
585102040Sjake	mov	TS_MIN, %g2
586102040Sjake
587102040Sjake	/*
588102040Sjake	 * Loop over all supported page sizes.
589102040Sjake	 */
590102040Sjake
591102040Sjake	/*
592102040Sjake	 * Compute the page shift for the page size we are currently looking
593102040Sjake	 * for.
594102040Sjake	 */
595102040Sjake1:	add	%g2, %g2, %g3
596102040Sjake	add	%g3, %g2, %g3
597102040Sjake	add	%g3, PAGE_SHIFT, %g3
598102040Sjake
599102040Sjake	/*
60091224Sjake	 * Extract the virtual page number from the contents of the tag
60191224Sjake	 * access register.
60281380Sjake	 */
603102040Sjake	srlx	%g1, %g3, %g3
60481380Sjake
60581380Sjake	/*
60691224Sjake	 * Compute the tte bucket address.
60781380Sjake	 */
608102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
609102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
610102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
611102040Sjake	add	%g4, %g5, %g4
61281380Sjake
61381380Sjake	/*
614102040Sjake	 * Compute the tte tag target.
61581380Sjake	 */
616102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
617102040Sjake	or	%g3, %g2, %g3
61881380Sjake
61981380Sjake	/*
620102040Sjake	 * Loop over the ttes in this bucket
62181380Sjake	 */
62281380Sjake
62381380Sjake	/*
624102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
625102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
626102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
627102040Sjake	 * completes successfully.
62881380Sjake	 */
629102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
63081380Sjake
63181380Sjake	/*
632102040Sjake	 * Check that its valid and executable and that the tte tags match.
63381380Sjake	 */
634102040Sjake	brgez,pn %g7, 3f
635102040Sjake	 andcc	%g7, TD_EXEC, %g0
636102040Sjake	bz,pn	%xcc, 3f
637102040Sjake	 cmp	%g3, %g6
638102040Sjake	bne,pn	%xcc, 3f
63988644Sjake	 EMPTY
64081380Sjake
64181380Sjake	/*
64281380Sjake	 * We matched a tte, load the tlb.
64381380Sjake	 */
64481380Sjake
64581380Sjake	/*
64681380Sjake	 * Set the reference bit, if it's currently clear.
64781380Sjake	 */
648102040Sjake	 andcc	%g7, TD_REF, %g0
64982906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
65081380Sjake	 nop
65181380Sjake
65281380Sjake	/*
65391224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
65481380Sjake	 */
655102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
656102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
65781380Sjake	retry
65881380Sjake
65981380Sjake	/*
660102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
661102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
66281380Sjake	 */
663102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
664102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
665102040Sjake	bnz,pt	%xcc, 2b
666102040Sjake	 EMPTY
66791224Sjake
66891224Sjake	/*
669102040Sjake	 * See if we just checked the largest page size, and advance to the
670102040Sjake	 * next one if not.
67191224Sjake	 */
672102040Sjake	 cmp	%g2, TS_MAX
673102040Sjake	bne,pt	%xcc, 1b
674102040Sjake	 add	%g2, 1, %g2
67591224Sjake
67696207Sjake	/*
677102040Sjake	 * Not in user tsb, call c code.
678102040Sjake	 */
679102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
68081380Sjake	.align	128
68180709Sjake	.endm
68280709Sjake
68382906SjakeENTRY(tl0_immu_miss_set_ref)
68481380Sjake	/*
68581380Sjake	 * Set the reference bit.
68681380Sjake	 */
687102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
68881380Sjake
68981380Sjake	/*
690102040Sjake	 * May have become invalid during casxa, in which case start over.
69181380Sjake	 */
692102040Sjake	brgez,pn %g2, 1f
693102040Sjake	 nop
69481380Sjake
69581380Sjake	/*
69691224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
69781380Sjake	 */
698102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
699102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
70091224Sjake1:	retry
70182906SjakeEND(tl0_immu_miss_set_ref)
70281380Sjake
70382906SjakeENTRY(tl0_immu_miss_trap)
70481380Sjake	/*
70596207Sjake	 * Put back the contents of the tag access register, in case we
70696207Sjake	 * faulted.
70796207Sjake	 */
708102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
70996207Sjake	membar	#Sync
71096207Sjake
71196207Sjake	/*
71282906Sjake	 * Switch to alternate globals.
71382906Sjake	 */
71482906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
71582906Sjake
71682906Sjake	/*
71791224Sjake	 * Reload the tag access register.
71881380Sjake	 */
71991224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
72081380Sjake
72181380Sjake	/*
72291224Sjake	 * Save the tag access register, and call common trap code.
72381380Sjake	 */
72488644Sjake	tl0_split
725108374Sjake	clr	%o1
726103921Sjake	set	trap, %o2
72791224Sjake	mov	%g2, %o3
72882906Sjake	b	%xcc, tl0_trap
72988644Sjake	 mov	T_INSTRUCTION_MISS, %o0
73082906SjakeEND(tl0_immu_miss_trap)
73181380Sjake
732109860Sjake	.macro	tl0_dmmu_miss
73381180Sjake	/*
734109860Sjake	 * Load the virtual page number and context from the tag access
735109860Sjake	 * register.  We ignore the context.
736109860Sjake	 */
737109860Sjake	wr	%g0, ASI_DMMU, %asi
738109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
739109860Sjake
740109860Sjake	/*
741102040Sjake	 * Initialize the page size walker.
742102040Sjake	 */
743109860Sjaketl1_dmmu_miss_user:
744102040Sjake	mov	TS_MIN, %g2
745102040Sjake
746102040Sjake	/*
747102040Sjake	 * Loop over all supported page sizes.
748102040Sjake	 */
749102040Sjake
750102040Sjake	/*
751102040Sjake	 * Compute the page shift for the page size we are currently looking
752102040Sjake	 * for.
753102040Sjake	 */
754102040Sjake1:	add	%g2, %g2, %g3
755102040Sjake	add	%g3, %g2, %g3
756102040Sjake	add	%g3, PAGE_SHIFT, %g3
757102040Sjake
758102040Sjake	/*
75991224Sjake	 * Extract the virtual page number from the contents of the tag
76091224Sjake	 * access register.
76191224Sjake	 */
762102040Sjake	srlx	%g1, %g3, %g3
76391224Sjake
76491224Sjake	/*
76588644Sjake	 * Compute the tte bucket address.
76681180Sjake	 */
767102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
768102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
769102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
770102040Sjake	add	%g4, %g5, %g4
77181180Sjake
77281180Sjake	/*
773102040Sjake	 * Compute the tte tag target.
77481180Sjake	 */
775102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
776102040Sjake	or	%g3, %g2, %g3
77781180Sjake
77881180Sjake	/*
779102040Sjake	 * Loop over the ttes in this bucket
78081180Sjake	 */
78181180Sjake
78281180Sjake	/*
783102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
784102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
785102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
786102040Sjake	 * completes successfully.
78781180Sjake	 */
788102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
78981180Sjake
79081180Sjake	/*
79191224Sjake	 * Check that its valid and that the virtual page numbers match.
79281180Sjake	 */
793102040Sjake	brgez,pn %g7, 3f
794102040Sjake	 cmp	%g3, %g6
795102040Sjake	bne,pn	%xcc, 3f
79688644Sjake	 EMPTY
79781180Sjake
79881180Sjake	/*
79981180Sjake	 * We matched a tte, load the tlb.
80081180Sjake	 */
80181180Sjake
80281180Sjake	/*
80381180Sjake	 * Set the reference bit, if it's currently clear.
80481180Sjake	 */
805102040Sjake	 andcc	%g7, TD_REF, %g0
806109860Sjake	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
80781180Sjake	 nop
80881180Sjake
80981180Sjake	/*
81091224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
81181180Sjake	 */
812102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
813102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
81481180Sjake	retry
81581180Sjake
81681180Sjake	/*
817102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
818102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
81981180Sjake	 */
820102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
821102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
822102040Sjake	bnz,pt	%xcc, 2b
823102040Sjake	 EMPTY
824102040Sjake
825102040Sjake	/*
826102040Sjake	 * See if we just checked the largest page size, and advance to the
827102040Sjake	 * next one if not.
828102040Sjake	 */
829102040Sjake	 cmp	%g2, TS_MAX
830102040Sjake	bne,pt	%xcc, 1b
831102040Sjake	 add	%g2, 1, %g2
832109860Sjake
833109860Sjake	/*
834109860Sjake	 * Not in user tsb, call c code.
835109860Sjake	 */
836109860Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
837109860Sjake	.align	128
83881180Sjake	.endm
83981180Sjake
840109860SjakeENTRY(tl0_dmmu_miss_set_ref)
84181180Sjake	/*
84281180Sjake	 * Set the reference bit.
84381180Sjake	 */
844102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
84581180Sjake
84681180Sjake	/*
847102040Sjake	 * May have become invalid during casxa, in which case start over.
84881180Sjake	 */
849102040Sjake	brgez,pn %g2, 1f
850102040Sjake	 nop
85181180Sjake
85281180Sjake	/*
85391224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
85481180Sjake	 */
855102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
856102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
85791224Sjake1:	retry
858109860SjakeEND(tl0_dmmu_miss_set_ref)
85981180Sjake
86081180SjakeENTRY(tl0_dmmu_miss_trap)
86182005Sjake	/*
86296207Sjake	 * Put back the contents of the tag access register, in case we
86396207Sjake	 * faulted.
86496207Sjake	 */
865102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
86696207Sjake	membar	#Sync
86796207Sjake
86896207Sjake	/*
86982906Sjake	 * Switch to alternate globals.
87082005Sjake	 */
87182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
87282005Sjake
87382005Sjake	/*
874109860Sjake	 * Check if we actually came from the kernel.
875109860Sjake	 */
876109860Sjake	rdpr	%tl, %g1
877109860Sjake	cmp	%g1, 1
878109860Sjake	bgt,a,pn %xcc, 1f
879109860Sjake	 nop
880109860Sjake
881109860Sjake	/*
88291224Sjake	 * Reload the tag access register.
88382005Sjake	 */
88491224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
88581180Sjake
88681180Sjake	/*
88791224Sjake	 * Save the tag access register and call common trap code.
88881180Sjake	 */
88988644Sjake	tl0_split
890108374Sjake	clr	%o1
891103921Sjake	set	trap, %o2
89291224Sjake	mov	%g2, %o3
89382906Sjake	b	%xcc, tl0_trap
89488644Sjake	 mov	T_DATA_MISS, %o0
895109860Sjake
896109860Sjake	/*
897109860Sjake	 * Handle faults during window spill/fill.
898109860Sjake	 */
899109860Sjake1:	RESUME_SPILLFILL_MMU
900109860Sjake
901109860Sjake	/*
902109860Sjake	 * Reload the tag access register.
903109860Sjake	 */
904109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
905109860Sjake
906109860Sjake	tl1_split
907109860Sjake	clr	%o1
908109860Sjake	set	trap, %o2
909109860Sjake	mov	%g2, %o3
910109860Sjake	b	%xcc, tl1_trap
911109860Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
91282906SjakeEND(tl0_dmmu_miss_trap)
91381180Sjake
914109860Sjake	.macro	tl0_dmmu_prot
915109860Sjake	ba,a	%xcc, tl0_dmmu_prot_1
916109860Sjake	 nop
917109860Sjake	.align	128
918109860Sjake	.endm
919109860Sjake
920109860SjakeENTRY(tl0_dmmu_prot_1)
92188644Sjake	/*
922109860Sjake	 * Load the virtual page number and context from the tag access
923109860Sjake	 * register.  We ignore the context.
924109860Sjake	 */
925109860Sjake	wr	%g0, ASI_DMMU, %asi
926109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
927109860Sjake
928109860Sjake	/*
929102040Sjake	 * Initialize the page size walker.
930102040Sjake	 */
931109860Sjaketl1_dmmu_prot_user:
932102040Sjake	mov	TS_MIN, %g2
933102040Sjake
934102040Sjake	/*
935102040Sjake	 * Loop over all supported page sizes.
936102040Sjake	 */
937102040Sjake
938102040Sjake	/*
939102040Sjake	 * Compute the page shift for the page size we are currently looking
940102040Sjake	 * for.
941102040Sjake	 */
942102040Sjake1:	add	%g2, %g2, %g3
943102040Sjake	add	%g3, %g2, %g3
944102040Sjake	add	%g3, PAGE_SHIFT, %g3
945102040Sjake
946102040Sjake	/*
94791224Sjake	 * Extract the virtual page number from the contents of the tag
94891224Sjake	 * access register.
94991224Sjake	 */
950102040Sjake	srlx	%g1, %g3, %g3
95191224Sjake
95291224Sjake	/*
95388644Sjake	 * Compute the tte bucket address.
95488644Sjake	 */
955102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
956102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
957102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
958102040Sjake	add	%g4, %g5, %g4
95988644Sjake
96088644Sjake	/*
961102040Sjake	 * Compute the tte tag target.
96288644Sjake	 */
963102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
964102040Sjake	or	%g3, %g2, %g3
96588644Sjake
96688644Sjake	/*
967102040Sjake	 * Loop over the ttes in this bucket
96888644Sjake	 */
96988644Sjake
97088644Sjake	/*
971102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
972102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
973102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
974102040Sjake	 * completes successfully.
97588644Sjake	 */
976102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
97788644Sjake
97888644Sjake	/*
97991224Sjake	 * Check that its valid and writable and that the virtual page
98091224Sjake	 * numbers match.
98188644Sjake	 */
982102040Sjake	brgez,pn %g7, 4f
983102040Sjake	 andcc	%g7, TD_SW, %g0
984102040Sjake	bz,pn	%xcc, 4f
985102040Sjake	 cmp	%g3, %g6
986102040Sjake	bne,pn	%xcc, 4f
98788644Sjake	 nop
98888644Sjake
98991224Sjake	/*
99091224Sjake	 * Set the hardware write bit.
99191224Sjake	 */
992102040Sjake	TTE_SET_W(%g4, %g2, %g3)
99388644Sjake
99488644Sjake	/*
995102040Sjake	 * Delete the old TLB entry and clear the sfsr.
99688644Sjake	 */
997102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
998102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
999102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1000102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1001102040Sjake	membar	#Sync
100288644Sjake
100381180Sjake	/*
1004102040Sjake	 * May have become invalid during casxa, in which case start over.
100588644Sjake	 */
1006102040Sjake	brgez,pn %g2, 3f
1007102040Sjake	 or	%g2, TD_W, %g2
100888644Sjake
100988644Sjake	/*
1010102040Sjake	 * Load the tte data into the tlb and retry the instruction.
101196207Sjake	 */
1012102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1013102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
1014102040Sjake3:	retry
101596207Sjake
101696207Sjake	/*
1017102040Sjake	 * Check the low bits to see if we've finished the bucket.
101888644Sjake	 */
1019102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
1020102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1021102040Sjake	bnz,pt	%xcc, 2b
1022102040Sjake	 EMPTY
102388644Sjake
102488644Sjake	/*
1025102040Sjake	 * See if we just checked the largest page size, and advance to the
1026102040Sjake	 * next one if not.
102788644Sjake	 */
1028102040Sjake	 cmp	%g2, TS_MAX
1029102040Sjake	bne,pt	%xcc, 1b
1030102040Sjake	 add	%g2, 1, %g2
1031102040Sjake
103288644Sjake	/*
1033102040Sjake	 * Not in user tsb, call c code.
103491224Sjake	 */
1035102040Sjake	b,a	%xcc, tl0_dmmu_prot_trap
1036102040Sjake	 nop
1037102040SjakeEND(tl0_dmmu_prot_1)
103891224Sjake
103988644SjakeENTRY(tl0_dmmu_prot_trap)
104088644Sjake	/*
104196207Sjake	 * Put back the contents of the tag access register, in case we
104296207Sjake	 * faulted.
104396207Sjake	 */
1044102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
104596207Sjake	membar	#Sync
104696207Sjake
104796207Sjake	/*
104882906Sjake	 * Switch to alternate globals.
104981180Sjake	 */
105082906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
105181180Sjake
105281180Sjake	/*
1053109860Sjake	 * Check if we actually came from the kernel.
1054109860Sjake	 */
1055109860Sjake	rdpr	%tl, %g1
1056109860Sjake	cmp	%g1, 1
1057109860Sjake	bgt,a,pn %xcc, 1f
1058109860Sjake	 nop
1059109860Sjake
1060109860Sjake	/*
106182005Sjake	 * Load the tar, sfar and sfsr.
106282005Sjake	 */
106388644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
106488644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
106588644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
106685243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
106782005Sjake	membar	#Sync
106882005Sjake
106982005Sjake	/*
107091224Sjake	 * Save the mmu registers and call common trap code.
107182005Sjake	 */
107288644Sjake	tl0_split
1073108374Sjake	clr	%o1
1074103921Sjake	set	trap, %o2
107588644Sjake	mov	%g2, %o3
107688644Sjake	mov	%g3, %o4
107788644Sjake	mov	%g4, %o5
1078103897Sjake	ba	%xcc, tl0_utrap
107988644Sjake	 mov	T_DATA_PROTECTION, %o0
1080109860Sjake
1081109860Sjake	/*
1082109860Sjake	 * Handle faults during window spill/fill.
1083109860Sjake	 */
1084109860Sjake1:	RESUME_SPILLFILL_MMU_CLR_SFSR
1085109860Sjake
1086109860Sjake	/*
1087109860Sjake	 * Load the sfar, sfsr and tar.  Clear the sfsr.
1088109860Sjake	 */
1089109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1090109860Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1091109860Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1092109860Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1093109860Sjake	membar	#Sync
1094109860Sjake
1095109860Sjake	tl1_split
1096109860Sjake	clr	%o1
1097109860Sjake	set	trap, %o2
1098109860Sjake	mov	%g2, %o3
1099109860Sjake	mov	%g3, %o4
1100109860Sjake	mov	%g4, %o5
1101109860Sjake	b	%xcc, tl1_trap
1102109860Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
110388644SjakeEND(tl0_dmmu_prot_trap)
110481180Sjake
110580709Sjake	.macro	tl0_spill_0_n
110691246Sjake	wr	%g0, ASI_AIUP, %asi
110791246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
110880709Sjake	saved
110980709Sjake	retry
111082906Sjake	.align	32
111182906Sjake	RSF_TRAP(T_SPILL)
111282906Sjake	RSF_TRAP(T_SPILL)
111380709Sjake	.endm
111480709Sjake
111582906Sjake	.macro	tl0_spill_1_n
111691246Sjake	wr	%g0, ASI_AIUP, %asi
111782906Sjake	SPILL(stwa, %sp, 4, %asi)
111882906Sjake	saved
111982906Sjake	retry
112082906Sjake	.align	32
112182906Sjake	RSF_TRAP(T_SPILL)
112282906Sjake	RSF_TRAP(T_SPILL)
112382906Sjake	.endm
112482005Sjake
112591246Sjake	.macro	tl0_fill_0_n
112682906Sjake	wr	%g0, ASI_AIUP, %asi
112791246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
112882906Sjake	restored
112982906Sjake	retry
113082906Sjake	.align	32
113182906Sjake	RSF_TRAP(T_FILL)
113282906Sjake	RSF_TRAP(T_FILL)
113380709Sjake	.endm
113480709Sjake
113582906Sjake	.macro	tl0_fill_1_n
113691246Sjake	wr	%g0, ASI_AIUP, %asi
113782906Sjake	FILL(lduwa, %sp, 4, %asi)
113882906Sjake	restored
113982906Sjake	retry
114082906Sjake	.align	32
114182906Sjake	RSF_TRAP(T_FILL)
114282906Sjake	RSF_TRAP(T_FILL)
114382906Sjake	.endm
114482906Sjake
114582906SjakeENTRY(tl0_sftrap)
114682906Sjake	rdpr	%tstate, %g1
114782906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
114882906Sjake	wrpr	%g1, 0, %cwp
114988644Sjake	tl0_split
1150108374Sjake	clr	%o1
1151103921Sjake	set	trap, %o2
115282906Sjake	b	%xcc, tl0_trap
115382906Sjake	 mov	%g2, %o0
115482906SjakeEND(tl0_sftrap)
115582906Sjake
115682906Sjake	.macro	tl0_spill_bad	count
115782906Sjake	.rept	\count
115888644Sjake	sir
115988644Sjake	.align	128
116082906Sjake	.endr
116182906Sjake	.endm
116282906Sjake
116380709Sjake	.macro	tl0_fill_bad	count
116480709Sjake	.rept	\count
116588644Sjake	sir
116688644Sjake	.align	128
116780709Sjake	.endr
116880709Sjake	.endm
116980709Sjake
117084186Sjake	.macro	tl0_syscall
117188644Sjake	tl0_split
1172108374Sjake	clr	%o1
1173103921Sjake	set	syscall, %o2
1174103921Sjake	ba	%xcc, tl0_trap
117584186Sjake	 mov	T_SYSCALL, %o0
117688784Sjake	.align	32
117784186Sjake	.endm
117884186Sjake
1179112920Sjake	.macro	tl0_fp_restore
1180112920Sjake	ba,a	%xcc, tl0_fp_restore
1181112920Sjake	 nop
1182112920Sjake	.align	32
1183112920Sjake	.endm
1184112920Sjake
1185112920SjakeENTRY(tl0_fp_restore)
1186112924Sjake	ldx	[PCB_REG + PCB_FLAGS], %g1
1187112924Sjake	andn	%g1, PCB_FEF, %g1
1188112924Sjake	stx	%g1, [PCB_REG + PCB_FLAGS]
1189112924Sjake
1190112920Sjake	wr	%g0, FPRS_FEF, %fprs
1191112920Sjake	wr	%g0, ASI_BLK_S, %asi
1192112920Sjake	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1193112920Sjake	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1194112920Sjake	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1195112920Sjake	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1196112920Sjake	membar	#Sync
1197112920Sjake	done
1198112920SjakeEND(tl0_fp_restore)
1199112920Sjake
120080709Sjake	.macro	tl1_insn_excptn
1201101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
120288644Sjake	wr	%g0, ASI_IMMU, %asi
120388644Sjake	rdpr	%tpc, %g3
120488644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
120588644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
120688644Sjake	membar	#Sync
120788644Sjake	b	%xcc, tl1_insn_exceptn_trap
120888644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
120980709Sjake	.align	32
121080709Sjake	.endm
121180709Sjake
121288644SjakeENTRY(tl1_insn_exceptn_trap)
121391246Sjake	tl1_split
1214103921Sjake	clr	%o1
1215103921Sjake	set	trap, %o2
121688644Sjake	mov	%g3, %o4
121788644Sjake	mov	%g4, %o5
121888644Sjake	b	%xcc, tl1_trap
121988644Sjake	 mov	%g2, %o0
122088644SjakeEND(tl1_insn_exceptn_trap)
122188644Sjake
122282005Sjake	.macro	tl1_data_excptn
1223101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
122488644Sjake	b,a	%xcc, tl1_data_excptn_trap
122582906Sjake	 nop
122682005Sjake	.align	32
122782005Sjake	.endm
122882005Sjake
122988644SjakeENTRY(tl1_data_excptn_trap)
123088644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
123182906Sjake	b	%xcc, tl1_sfsr_trap
123288644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
123388644SjakeEND(tl1_data_excptn_trap)
123482906Sjake
123580709Sjake	.macro	tl1_align
123688644Sjake	b,a	%xcc, tl1_align_trap
123788644Sjake	 nop
123880709Sjake	.align	32
123980709Sjake	.endm
124080709Sjake
124182906SjakeENTRY(tl1_align_trap)
124288644Sjake	RESUME_SPILLFILL_ALIGN
124382906Sjake	b	%xcc, tl1_sfsr_trap
124488644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
124588644SjakeEND(tl1_data_excptn_trap)
124682906Sjake
124780709SjakeENTRY(tl1_sfsr_trap)
124888644Sjake	wr	%g0, ASI_DMMU, %asi
124988644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
125088644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
125180709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
125280709Sjake	membar	#Sync
125382005Sjake
125491246Sjake	tl1_split
1255103921Sjake	clr	%o1
1256103921Sjake	set	trap, %o2
125788644Sjake	mov	%g3, %o4
125888644Sjake	mov	%g4, %o5
125980709Sjake	b	%xcc, tl1_trap
126088644Sjake	 mov	%g2, %o0
126188644SjakeEND(tl1_sfsr_trap)
126280709Sjake
126384186Sjake	.macro	tl1_intr level, mask
126491246Sjake	tl1_split
126591246Sjake	set	\mask, %o1
126684186Sjake	b	%xcc, tl1_intr
126791246Sjake	 mov	\level, %o0
126881380Sjake	.align	32
126981380Sjake	.endm
127081380Sjake
127180709Sjake	.macro	tl1_intr_level
127281380Sjake	INTR_LEVEL(1)
127380709Sjake	.endm
127480709Sjake
127580709Sjake	.macro	tl1_immu_miss
127691224Sjake	/*
127791224Sjake	 * Load the context and the virtual page number from the tag access
127891224Sjake	 * register.  We ignore the context.
127991224Sjake	 */
128091224Sjake	wr	%g0, ASI_IMMU, %asi
1281102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
128285585Sjake
128391224Sjake	/*
1284102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1285102040Sjake	 * tsb are patched at startup.
128691224Sjake	 */
1287102040Sjake	.globl	tl1_immu_miss_patch_1
1288102040Sjaketl1_immu_miss_patch_1:
1289102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1290102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1291102040Sjake	sethi	%hi(TSB_KERNEL), %g7
129285585Sjake
1293102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1294102040Sjake	and	%g5, %g6, %g6
1295102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1296102040Sjake	add	%g6, %g7, %g6
129785585Sjake
129885585Sjake	/*
129991224Sjake	 * Load the tte.
130091224Sjake	 */
1301102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
130291224Sjake
130391224Sjake	/*
130491224Sjake	 * Check that its valid and executable and that the virtual page
130591224Sjake	 * numbers match.
130691224Sjake	 */
1307102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1308102040Sjake	 andcc	%g7, TD_EXEC, %g0
130991224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1310102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1311102040Sjake	cmp	%g5, %g6
131291224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
131385585Sjake	 EMPTY
131485585Sjake
131585585Sjake	/*
131691224Sjake	 * Set the reference bit if its currently clear.
131785585Sjake	 */
1318102040Sjake	 andcc	%g7, TD_REF, %g0
1319102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
132091224Sjake	 nop
132185585Sjake
132291224Sjake	/*
1323102040Sjake	 * Load the tte data into the TLB and retry the instruction.
132491224Sjake	 */
1325102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1326102040Sjake	retry
1327102040Sjake	.align	128
1328102040Sjake	.endm
132988644Sjake
1330102040SjakeENTRY(tl1_immu_miss_set_ref)
133185585Sjake	/*
1332102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1333102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1334102040Sjake	 */
1335102040Sjake	.globl	tl1_immu_miss_patch_2
1336102040Sjaketl1_immu_miss_patch_2:
1337102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1338102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1339102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1340102040Sjake
1341102040Sjake	and	%g5, %g6, %g5
1342102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1343102040Sjake	add	%g5, %g7, %g5
1344102040Sjake
1345102040Sjake	/*
1346102040Sjake	 * Set the reference bit.
1347102040Sjake	 */
1348102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1349102040Sjake
1350102040Sjake	/*
1351102040Sjake	 * May have become invalid during casxa, in which case start over.
1352102040Sjake	 */
1353102040Sjake	brgez,pn %g6, 1f
1354102040Sjake	 nop
1355102040Sjake
1356102040Sjake	/*
135785585Sjake	 * Load the tte data into the TLB and retry the instruction.
135885585Sjake	 */
1359102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1360102040Sjake1:	retry
1361102040SjakeEND(tl1_immu_miss_set_ref)
136285585Sjake
136391224SjakeENTRY(tl1_immu_miss_trap)
136485585Sjake	/*
136585585Sjake	 * Switch to alternate globals.
136685585Sjake	 */
136791224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
136885585Sjake
136991224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
137085585Sjake
137191246Sjake	tl1_split
1372103921Sjake	clr	%o1
1373103921Sjake	set	trap, %o2
137491224Sjake	mov	%g2, %o3
137580709Sjake	b	%xcc, tl1_trap
137688644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
137791224SjakeEND(tl1_immu_miss_trap)
137891224Sjake
137991224Sjake	.macro	tl1_dmmu_miss
138091224Sjake	/*
138191224Sjake	 * Load the context and the virtual page number from the tag access
138291224Sjake	 * register.
138391224Sjake	 */
138491224Sjake	wr	%g0, ASI_DMMU, %asi
1385102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
138680709Sjake
138791224Sjake	/*
138891224Sjake	 * Extract the context from the contents of the tag access register.
1389100771Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1390108195Sjake	 * faulting address is passed in %g1.
139191224Sjake	 */
1392102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1393102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1394102040Sjake	 mov	%g5, %g1
139580709Sjake
139691224Sjake	/*
1397100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1398100771Sjake	 * the high bit set so they are negative.
1399100771Sjake	 */
1400102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1401100771Sjake	 EMPTY
1402100771Sjake
1403100771Sjake	/*
1404102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1405102040Sjake	 * tsb are patched at startup.
140691224Sjake	 */
1407102040Sjake	.globl	tl1_dmmu_miss_patch_1
1408102040Sjaketl1_dmmu_miss_patch_1:
1409102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1410102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1411102040Sjake	sethi	%hi(TSB_KERNEL), %g7
141284186Sjake
1413102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1414102040Sjake	and	%g5, %g6, %g6
1415102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1416102040Sjake	add	%g6, %g7, %g6
141791224Sjake
141891224Sjake	/*
141991224Sjake	 * Load the tte.
142091224Sjake	 */
1421102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
142291224Sjake
142391224Sjake	/*
142491224Sjake	 * Check that its valid and that the virtual page numbers match.
142591224Sjake	 */
1426102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1427102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1428102040Sjake	cmp	%g5, %g6
142991224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
143080709Sjake	 EMPTY
143180709Sjake
143280709Sjake	/*
143391224Sjake	 * Set the reference bit if its currently clear.
143480709Sjake	 */
1435102040Sjake	 andcc	%g7, TD_REF, %g0
1436102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
143791224Sjake	 nop
143880709Sjake
143991224Sjake	/*
1440102040Sjake	 * Load the tte data into the TLB and retry the instruction.
144191224Sjake	 */
1442102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1443102040Sjake	retry
1444102040Sjake	.align	128
1445102040Sjake	.endm
144688644Sjake
1447102040SjakeENTRY(tl1_dmmu_miss_set_ref)
144880709Sjake	/*
1449102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1450102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1451102040Sjake	 */
1452102040Sjake	.globl	tl1_dmmu_miss_patch_2
1453102040Sjaketl1_dmmu_miss_patch_2:
1454102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1455102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1456102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1457102040Sjake
1458102040Sjake	and	%g5, %g6, %g5
1459102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1460102040Sjake	add	%g5, %g7, %g5
1461102040Sjake
1462102040Sjake	/*
1463102040Sjake	 * Set the reference bit.
1464102040Sjake	 */
1465102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1466102040Sjake
1467102040Sjake	/*
1468102040Sjake	 * May have become invalid during casxa, in which case start over.
1469102040Sjake	 */
1470102040Sjake	brgez,pn %g6, 1f
1471102040Sjake	 nop
1472102040Sjake
1473102040Sjake	/*
147482906Sjake	 * Load the tte data into the TLB and retry the instruction.
147580709Sjake	 */
1476102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1477102040Sjake1:	retry
1478102040SjakeEND(tl1_dmmu_miss_set_ref)
147980709Sjake
148091224SjakeENTRY(tl1_dmmu_miss_trap)
148180709Sjake	/*
148282906Sjake	 * Switch to alternate globals.
148380709Sjake	 */
148491224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
148580709Sjake
1486108195Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1487108195Sjake
148888781Sjake	KSTACK_CHECK
148988781Sjake
149091246Sjake	tl1_split
1491103921Sjake	clr	%o1
1492103921Sjake	set	trap, %o2
149391224Sjake	mov	%g2, %o3
149482906Sjake	b	%xcc, tl1_trap
149588644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
149688781SjakeEND(tl1_dmmu_miss_trap)
149780709Sjake
1498100771SjakeENTRY(tl1_dmmu_miss_direct)
1499100771Sjake	/*
1500100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1501108245Sjake	 * address, and or in the tte bits.  The virtual address bits that
1502108245Sjake	 * correspond to the tte valid and page size bits are left set, so
1503108245Sjake	 * they don't have to be included in the tte bits below.  We know they
1504108245Sjake	 * are set because the virtual address is in the upper va hole.
1505100771Sjake	 */
1506108245Sjake	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1507108245Sjake	and	%g5, %g6, %g5
1508108245Sjake	or	%g5, TD_CP | TD_CV | TD_W, %g5
1509100771Sjake
1510100771Sjake	/*
1511100771Sjake	 * Load the tte data into the TLB and retry the instruction.
1512100771Sjake	 */
1513102040Sjake	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1514100771Sjake	retry
1515100771SjakeEND(tl1_dmmu_miss_direct)
1516100771Sjake
151782906Sjake	.macro	tl1_dmmu_prot
1518102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1519102040Sjake	 nop
1520102040Sjake	.align	128
1521102040Sjake	.endm
1522102040Sjake
1523102040SjakeENTRY(tl1_dmmu_prot_1)
152491224Sjake	/*
152591224Sjake	 * Load the context and the virtual page number from the tag access
152691224Sjake	 * register.
152791224Sjake	 */
152891224Sjake	wr	%g0, ASI_DMMU, %asi
1529102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
153088644Sjake
153191224Sjake	/*
153291224Sjake	 * Extract the context from the contents of the tag access register.
1533108195Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1534108195Sjake	 * faulting address is passed in %g1.
153591224Sjake	 */
1536102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1537102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1538102040Sjake	 mov	%g5, %g1
153988644Sjake
154091224Sjake	/*
1541102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1542102040Sjake	 * tsb are patched at startup.
154391224Sjake	 */
1544102040Sjake	.globl	tl1_dmmu_prot_patch_1
1545102040Sjaketl1_dmmu_prot_patch_1:
1546102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1547102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1548102040Sjake	sethi	%hi(TSB_KERNEL), %g7
154988644Sjake
1550102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1551102040Sjake	and	%g5, %g6, %g6
1552102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1553102040Sjake	add	%g6, %g7, %g6
155491224Sjake
155591224Sjake	/*
155691224Sjake	 * Load the tte.
155791224Sjake	 */
1558102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
155991224Sjake
156091224Sjake	/*
156191224Sjake	 * Check that its valid and writeable and that the virtual page
156291224Sjake	 * numbers match.
156391224Sjake	 */
1564102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1565102040Sjake	 andcc	%g7, TD_SW, %g0
156691224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1567102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1568102040Sjake	cmp	%g5, %g6
156991224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
157088644Sjake	 EMPTY
157188644Sjake
157288644Sjake	/*
157391224Sjake	 * Delete the old TLB entry and clear the sfsr.
157488644Sjake	 */
1575102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
157691224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
157791224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
157881180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
157991224Sjake	membar	#Sync
158081180Sjake
1581102040Sjake	/*
1582102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1583102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1584102040Sjake	 */
1585102040Sjake	.globl	tl1_dmmu_prot_patch_2
1586102040Sjaketl1_dmmu_prot_patch_2:
1587102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1588102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1589102040Sjake	sethi	%hi(TSB_KERNEL), %g7
159096207Sjake
1591102040Sjake	and	%g5, %g6, %g5
1592102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1593102040Sjake	add	%g5, %g7, %g5
1594102040Sjake
159581180Sjake	/*
159691224Sjake	 * Set the hardware write bit.
159791224Sjake	 */
1598102040Sjake	TTE_SET_W(%g5, %g6, %g7)
159991224Sjake
160091224Sjake	/*
1601102040Sjake	 * May have become invalid during casxa, in which case start over.
1602102040Sjake	 */
1603102040Sjake	brgez,pn %g6, 1f
1604102040Sjake	 or	%g6, TD_W, %g6
1605102040Sjake
1606102040Sjake	/*
160788644Sjake	 * Load the tte data into the TLB and retry the instruction.
160888644Sjake	 */
1609102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1610102040Sjake1:	retry
1611102040SjakeEND(tl1_dmmu_prot_1)
161288644Sjake
161388644SjakeENTRY(tl1_dmmu_prot_trap)
161481180Sjake	/*
161591224Sjake	 * Switch to alternate globals.
161691224Sjake	 */
161791224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
161891224Sjake
161991224Sjake	/*
162081180Sjake	 * Load the sfar, sfsr and tar.  Clear the sfsr.
162181180Sjake	 */
162288644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
162388644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
162488644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
162581180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
162681180Sjake	membar	#Sync
162781180Sjake
162891246Sjake	tl1_split
1629103921Sjake	clr	%o1
1630103921Sjake	set	trap, %o2
163188644Sjake	mov	%g2, %o3
163288644Sjake	mov	%g3, %o4
163388644Sjake	mov	%g4, %o5
163482906Sjake	b	%xcc, tl1_trap
163588644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
163688644SjakeEND(tl1_dmmu_prot_trap)
163781180Sjake
163880709Sjake	.macro	tl1_spill_0_n
163982906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
164080709Sjake	saved
164180709Sjake	retry
164282906Sjake	.align	32
164382906Sjake	RSF_FATAL(T_SPILL)
164482906Sjake	RSF_FATAL(T_SPILL)
164580709Sjake	.endm
164680709Sjake
164791246Sjake	.macro	tl1_spill_2_n
164891246Sjake	wr	%g0, ASI_AIUP, %asi
164991246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
165082906Sjake	saved
165182906Sjake	retry
165282906Sjake	.align	32
165382906Sjake	RSF_SPILL_TOPCB
165482906Sjake	RSF_SPILL_TOPCB
165581380Sjake	.endm
165681380Sjake
165791246Sjake	.macro	tl1_spill_3_n
165891246Sjake	wr	%g0, ASI_AIUP, %asi
165992200Sjake	SPILL(stwa, %sp, 4, %asi)
166082906Sjake	saved
166182906Sjake	retry
166282906Sjake	.align	32
166382906Sjake	RSF_SPILL_TOPCB
166482906Sjake	RSF_SPILL_TOPCB
166582906Sjake	.endm
166682906Sjake
166791246Sjake	.macro	tl1_spill_0_o
166882906Sjake	wr	%g0, ASI_AIUP, %asi
166982906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
167082906Sjake	saved
167182906Sjake	retry
167282906Sjake	.align	32
167382906Sjake	RSF_SPILL_TOPCB
167482906Sjake	RSF_SPILL_TOPCB
167582906Sjake	.endm
167682906Sjake
167782906Sjake	.macro	tl1_spill_1_o
167891246Sjake	wr	%g0, ASI_AIUP, %asi
167982906Sjake	SPILL(stwa, %sp, 4, %asi)
168082005Sjake	saved
168182005Sjake	retry
168282906Sjake	.align	32
168382906Sjake	RSF_SPILL_TOPCB
168482906Sjake	RSF_SPILL_TOPCB
168582906Sjake	.endm
168682005Sjake
168782906Sjake	.macro	tl1_spill_2_o
168882906Sjake	RSF_SPILL_TOPCB
168991246Sjake	.align	128
169080709Sjake	.endm
169180709Sjake
169280709Sjake	.macro	tl1_fill_0_n
169382906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
169480709Sjake	restored
169580709Sjake	retry
169682906Sjake	.align	32
169782906Sjake	RSF_FATAL(T_FILL)
169882906Sjake	RSF_FATAL(T_FILL)
169980709Sjake	.endm
170080709Sjake
170191246Sjake	.macro	tl1_fill_2_n
170282906Sjake	wr	%g0, ASI_AIUP, %asi
170382906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
170482906Sjake	restored
170582906Sjake	retry
170682906Sjake	.align 32
170782906Sjake	RSF_FILL_MAGIC
170891246Sjake	RSF_FILL_MAGIC
170982906Sjake	.endm
171082906Sjake
171191246Sjake	.macro	tl1_fill_3_n
171282906Sjake	wr	%g0, ASI_AIUP, %asi
171382906Sjake	FILL(lduwa, %sp, 4, %asi)
171482906Sjake	restored
171582906Sjake	retry
171682906Sjake	.align 32
171782906Sjake	RSF_FILL_MAGIC
171891246Sjake	RSF_FILL_MAGIC
171982906Sjake	.endm
172082906Sjake
172182005Sjake/*
172282906Sjake * This is used to spill windows that are still occupied with user
172382906Sjake * data on kernel entry to the pcb.
172482005Sjake */
172582906SjakeENTRY(tl1_spill_topcb)
172682906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
172782906Sjake
172882005Sjake	/* Free some globals for our use. */
172988644Sjake	dec	24, ASP_REG
173088644Sjake	stx	%g1, [ASP_REG + 0]
173188644Sjake	stx	%g2, [ASP_REG + 8]
173288644Sjake	stx	%g3, [ASP_REG + 16]
173382906Sjake
173488644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
173582906Sjake
173688644Sjake	sllx	%g1, PTR_SHIFT, %g2
173788644Sjake	add	%g2, PCB_REG, %g2
173888644Sjake	stx	%sp, [%g2 + PCB_RWSP]
173982906Sjake
174088644Sjake	sllx	%g1, RW_SHIFT, %g2
174188644Sjake	add	%g2, PCB_REG, %g2
174288644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
174382906Sjake
174488644Sjake	inc	%g1
174588644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
174682906Sjake
174785243Sjake#if KTR_COMPILE & KTR_TRAP
174888785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
174982906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
175082906Sjake	rdpr	%tpc, %g2
175182906Sjake	stx	%g2, [%g1 + KTR_PARM1]
175288785Sjake	rdpr	%tnpc, %g2
175388785Sjake	stx	%g2, [%g1 + KTR_PARM2]
175488785Sjake	stx	%sp, [%g1 + KTR_PARM3]
175588644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
175688785Sjake	stx	%g2, [%g1 + KTR_PARM4]
175782906Sjake9:
175882906Sjake#endif
175982906Sjake
176082906Sjake	saved
176182906Sjake
176288644Sjake	ldx	[ASP_REG + 16], %g3
176388644Sjake	ldx	[ASP_REG + 8], %g2
176488644Sjake	ldx	[ASP_REG + 0], %g1
176588644Sjake	inc	24, ASP_REG
176682005Sjake	retry
176782906SjakeEND(tl1_spill_topcb)
176882005Sjake
176982906Sjake	.macro	tl1_spill_bad	count
177082906Sjake	.rept	\count
177188644Sjake	sir
177288644Sjake	.align	128
177382906Sjake	.endr
177482906Sjake	.endm
177582906Sjake
177680709Sjake	.macro	tl1_fill_bad	count
177780709Sjake	.rept	\count
177888644Sjake	sir
177988644Sjake	.align	128
178080709Sjake	.endr
178180709Sjake	.endm
178280709Sjake
178380709Sjake	.macro	tl1_soft	count
178482906Sjake	.rept	\count
178582906Sjake	tl1_gen	T_SOFT | T_KERNEL
178682906Sjake	.endr
178780709Sjake	.endm
178880709Sjake
178980709Sjake	.sect	.trap
179080709Sjake	.align	0x8000
179180709Sjake	.globl	tl0_base
179280709Sjake
179380709Sjaketl0_base:
179488779Sjake	tl0_reserved	8				! 0x0-0x7
179580709Sjaketl0_insn_excptn:
179688779Sjake	tl0_insn_excptn					! 0x8
179788779Sjake	tl0_reserved	1				! 0x9
179880709Sjaketl0_insn_error:
179988779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
180088779Sjake	tl0_reserved	5				! 0xb-0xf
180180709Sjaketl0_insn_illegal:
180288779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
180380709Sjaketl0_priv_opcode:
180488779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
180588779Sjake	tl0_reserved	14				! 0x12-0x1f
180680709Sjaketl0_fp_disabled:
180788779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
180880709Sjaketl0_fp_ieee:
180988779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
181080709Sjaketl0_fp_other:
181188779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
181280709Sjaketl0_tag_ovflw:
181388779Sjake	tl0_gen		T_TAG_OFERFLOW			! 0x23
181480709Sjaketl0_clean_window:
181588779Sjake	clean_window					! 0x24
181680709Sjaketl0_divide:
181788779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
181888779Sjake	tl0_reserved	7				! 0x29-0x2f
181980709Sjaketl0_data_excptn:
182088779Sjake	tl0_data_excptn					! 0x30
182188779Sjake	tl0_reserved	1				! 0x31
182280709Sjaketl0_data_error:
182388779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
182488779Sjake	tl0_reserved	1				! 0x33
182580709Sjaketl0_align:
182688779Sjake	tl0_align					! 0x34
182780709Sjaketl0_align_lddf:
182888779Sjake	tl0_gen		T_RESERVED			! 0x35
182980709Sjaketl0_align_stdf:
183088779Sjake	tl0_gen		T_RESERVED			! 0x36
183180709Sjaketl0_priv_action:
183288779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
183388779Sjake	tl0_reserved	9				! 0x38-0x40
183480709Sjaketl0_intr_level:
183588779Sjake	tl0_intr_level					! 0x41-0x4f
183688779Sjake	tl0_reserved	16				! 0x50-0x5f
183780709Sjaketl0_intr_vector:
183897265Sjake	intr_vector					! 0x60
183980709Sjaketl0_watch_phys:
184088779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
184180709Sjaketl0_watch_virt:
184288779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
184380709Sjaketl0_ecc:
184488779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
184580709Sjaketl0_immu_miss:
184688779Sjake	tl0_immu_miss					! 0x64
184780709Sjaketl0_dmmu_miss:
184888779Sjake	tl0_dmmu_miss					! 0x68
184980709Sjaketl0_dmmu_prot:
185088779Sjake	tl0_dmmu_prot					! 0x6c
185188779Sjake	tl0_reserved	16				! 0x70-0x7f
185280709Sjaketl0_spill_0_n:
185388779Sjake	tl0_spill_0_n					! 0x80
185482906Sjaketl0_spill_1_n:
185588779Sjake	tl0_spill_1_n					! 0x84
185691246Sjake	tl0_spill_bad	14				! 0x88-0xbf
185780709Sjaketl0_fill_0_n:
185888779Sjake	tl0_fill_0_n					! 0xc0
185982906Sjaketl0_fill_1_n:
186088779Sjake	tl0_fill_1_n					! 0xc4
186191246Sjake	tl0_fill_bad	14				! 0xc8-0xff
186288644Sjaketl0_soft:
1863106050Sjake	tl0_gen		T_SYSCALL			! 0x100
186488779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
186588779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
186688779Sjake	tl0_reserved	1				! 0x103
186788779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
186888779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
186988779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
187088779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
1871106050Sjake	tl0_gen		T_SYSCALL			! 0x108
1872106050Sjake#ifdef COMPAT_FREEBSD4
187388779Sjake	tl0_syscall					! 0x109
1874106050Sjake#else
1875106050Sjake	tl0_gen		T_SYSCALL			! 0x109
1876106050Sjake#endif
187788779Sjake	tl0_fp_restore					! 0x10a
187888779Sjake	tl0_reserved	5				! 0x10b-0x10f
187988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
188088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
188188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
188288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
188388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
188488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
188588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
188688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
188788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
188888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
188988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
189088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
189188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
189288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
189388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
189488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
1895106050Sjake	tl0_reserved	32				! 0x120-0x13f
1896106050Sjake	tl0_gen		T_SYSCALL			! 0x140
1897106050Sjake	tl0_syscall					! 0x141
1898106050Sjake	tl0_gen		T_SYSCALL			! 0x142
1899106050Sjake	tl0_gen		T_SYSCALL			! 0x143
1900106050Sjake	tl0_reserved	188				! 0x144-0x1ff
190180709Sjake
190280709Sjaketl1_base:
190388779Sjake	tl1_reserved	8				! 0x200-0x207
190480709Sjaketl1_insn_excptn:
190588779Sjake	tl1_insn_excptn					! 0x208
190688779Sjake	tl1_reserved	1				! 0x209
190780709Sjaketl1_insn_error:
190888779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
190988779Sjake	tl1_reserved	5				! 0x20b-0x20f
191080709Sjaketl1_insn_illegal:
191188779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
191280709Sjaketl1_priv_opcode:
191388779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
191488779Sjake	tl1_reserved	14				! 0x212-0x21f
191580709Sjaketl1_fp_disabled:
191688779Sjake	tl1_gen		T_FP_DISABLED			! 0x220
191780709Sjaketl1_fp_ieee:
191888779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
191980709Sjaketl1_fp_other:
192088779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
192180709Sjaketl1_tag_ovflw:
192288779Sjake	tl1_gen		T_TAG_OFERFLOW			! 0x223
192380709Sjaketl1_clean_window:
192488779Sjake	clean_window					! 0x224
192580709Sjaketl1_divide:
192688779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
192788779Sjake	tl1_reserved	7				! 0x229-0x22f
192880709Sjaketl1_data_excptn:
192988779Sjake	tl1_data_excptn					! 0x230
193088779Sjake	tl1_reserved	1				! 0x231
193180709Sjaketl1_data_error:
193288779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
193388779Sjake	tl1_reserved	1				! 0x233
193480709Sjaketl1_align:
193588779Sjake	tl1_align					! 0x234
193680709Sjaketl1_align_lddf:
193788779Sjake	tl1_gen		T_RESERVED			! 0x235
193880709Sjaketl1_align_stdf:
193988779Sjake	tl1_gen		T_RESERVED			! 0x236
194080709Sjaketl1_priv_action:
194188779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
194288779Sjake	tl1_reserved	9				! 0x238-0x240
194380709Sjaketl1_intr_level:
194488779Sjake	tl1_intr_level					! 0x241-0x24f
194588779Sjake	tl1_reserved	16				! 0x250-0x25f
194680709Sjaketl1_intr_vector:
194797265Sjake	intr_vector					! 0x260
194880709Sjaketl1_watch_phys:
194988779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
195080709Sjaketl1_watch_virt:
195188779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
195280709Sjaketl1_ecc:
195388779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
195480709Sjaketl1_immu_miss:
195588779Sjake	tl1_immu_miss					! 0x264
195680709Sjaketl1_dmmu_miss:
195788779Sjake	tl1_dmmu_miss					! 0x268
195880709Sjaketl1_dmmu_prot:
195988779Sjake	tl1_dmmu_prot					! 0x26c
196088779Sjake	tl1_reserved	16				! 0x270-0x27f
196180709Sjaketl1_spill_0_n:
196288779Sjake	tl1_spill_0_n					! 0x280
196391246Sjake	tl1_spill_bad	1				! 0x284
196491246Sjaketl1_spill_2_n:
196591246Sjake	tl1_spill_2_n					! 0x288
196691246Sjaketl1_spill_3_n:
196791246Sjake	tl1_spill_3_n					! 0x29c
196891246Sjake	tl1_spill_bad	4				! 0x290-0x29f
196981380Sjaketl1_spill_0_o:
197088779Sjake	tl1_spill_0_o					! 0x2a0
197182906Sjaketl1_spill_1_o:
197288779Sjake	tl1_spill_1_o					! 0x2a4
197382906Sjaketl1_spill_2_o:
197488779Sjake	tl1_spill_2_o					! 0x2a8
197591246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
197680709Sjaketl1_fill_0_n:
197788779Sjake	tl1_fill_0_n					! 0x2c0
197891246Sjake	tl1_fill_bad	1				! 0x2c4
197991246Sjaketl1_fill_2_n:
198091246Sjake	tl1_fill_2_n					! 0x2d0
198191246Sjaketl1_fill_3_n:
198291246Sjake	tl1_fill_3_n					! 0x2d4
198391246Sjake	tl1_fill_bad	12				! 0x2d8-0x2ff
198488779Sjake	tl1_reserved	1				! 0x300
198580709Sjaketl1_breakpoint:
198688779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
198788779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
198888779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
198988779Sjake	tl1_reserved	252				! 0x304-0x3ff
199080709Sjake
199181380Sjake/*
199282906Sjake * User trap entry point.
199382906Sjake *
1994103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
1995103897Sjake *                u_long sfsr)
1996103897Sjake *
1997103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
1998103897Sjake * program must have first registered a trap handler with the kernel using
1999103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2000103897Sjake * for it to return to the trapping code directly, it will not return through
2001103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2002103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2003103897Sjake * parameters passed in out registers may be used by the user trap handler.
2004103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2005103897Sjake *
2006103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2007103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2008103897Sjake */
2009103897SjakeENTRY(tl0_utrap)
2010103897Sjake	/*
2011103897Sjake	 * Check if the trap type allows user traps.
2012103897Sjake	 */
2013103897Sjake	cmp	%o0, UT_MAX
2014103897Sjake	bge,a,pt %xcc, tl0_trap
2015103897Sjake	 nop
2016103897Sjake
2017103897Sjake	/*
2018103897Sjake	 * Load the user trap handler from the utrap table.
2019103897Sjake	 */
2020103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2021103897Sjake	ldx	[%l0 + TD_PROC], %l0
2022103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2023103897Sjake	brz,pt	%l0, tl0_trap
2024103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2025103897Sjake	ldx	[%l0 + %l1], %l0
2026103897Sjake	brz,a,pt %l0, tl0_trap
2027103897Sjake	 nop
2028103897Sjake
2029103897Sjake	/*
2030103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2031103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2032103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2033103897Sjake	 * not be able to find them, since the user trap handler returns
2034103897Sjake	 * directly to the trapping code.  Note that we only support precise
2035103897Sjake	 * user traps, which implies that the condition that caused the trap
2036103897Sjake	 * in the first place is still valid, so it will occur again when we
2037103897Sjake	 * re-execute the trapping instruction.
2038103897Sjake	 */
2039103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2040103897Sjake	brnz,a,pn %l1, tl0_trap
2041103897Sjake	 mov	T_SPILL, %o0
2042103897Sjake
2043103897Sjake	/*
2044103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2045103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2046103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2047103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2048103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2049103897Sjake	 * temporary stack for that.
2050103897Sjake	 */
2051103897Sjake	rd	%fprs, %l1
2052103897Sjake	or	%l1, FPRS_FEF, %l2
2053103897Sjake	wr	%l2, 0, %fprs
2054103897Sjake	dec	8, ASP_REG
2055103897Sjake	stx	%fsr, [ASP_REG]
2056103897Sjake	ldx	[ASP_REG], %l4
2057103897Sjake	inc	8, ASP_REG
2058103897Sjake	wr	%l1, 0, %fprs
2059103897Sjake
2060103897Sjake	rdpr	%tstate, %l5
2061103897Sjake	rdpr	%tpc, %l6
2062103897Sjake	rdpr	%tnpc, %l7
2063103897Sjake
2064103897Sjake	/*
2065103897Sjake	 * Setup %tnpc to return to.
2066103897Sjake	 */
2067103897Sjake	wrpr	%l0, 0, %tnpc
2068103897Sjake
2069103897Sjake	/*
2070103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2071103897Sjake	 */
2072103897Sjake	rdpr	%wstate, %l1
2073103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2074103897Sjake	wrpr	%l1, 0, %wstate
2075103897Sjake
2076103897Sjake	/*
2077103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2078103897Sjake	 * current window instead of the window at the time of the trap.
2079103897Sjake	 */
2080103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2081103897Sjake	rdpr	%cwp, %l2
2082103897Sjake	wrpr	%l1, %l2, %tstate
2083103897Sjake
2084103897Sjake	/*
2085103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2086103897Sjake	 */
2087103897Sjake	sub	%fp, CCFSZ, %sp
2088103897Sjake
2089103897Sjake	/*
2090103897Sjake	 * Execute the user trap handler.
2091103897Sjake	 */
2092103897Sjake	done
2093103897SjakeEND(tl0_utrap)
2094103897Sjake
2095103897Sjake/*
2096103897Sjake * (Real) User trap entry point.
2097103897Sjake *
209888644Sjake * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
209988644Sjake *		 u_int sfsr)
210082906Sjake *
210182906Sjake * The following setup has been performed:
210282906Sjake *	- the windows have been split and the active user window has been saved
210382906Sjake *	  (maybe just to the pcb)
210482906Sjake *	- we are on alternate globals and interrupts are disabled
210582906Sjake *
210689050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
210788644Sjake * globals, enable interrupts and call trap.
210882906Sjake *
210982906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
211082906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
211182906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
211287702Sjhb * of cpu migration and using the wrong pcpup.
211381380Sjake */
211482005SjakeENTRY(tl0_trap)
211582906Sjake	/*
211682906Sjake	 * Force kernel store order.
211782906Sjake	 */
211882906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
211980709Sjake
212081380Sjake	rdpr	%tstate, %l0
212188644Sjake	rdpr	%tpc, %l1
212288644Sjake	rdpr	%tnpc, %l2
212388644Sjake	rd	%y, %l3
212488644Sjake	rd	%fprs, %l4
212588644Sjake	rdpr	%wstate, %l5
212688644Sjake
212788644Sjake#if KTR_COMPILE & KTR_TRAP
212888644Sjake	CATR(KTR_TRAP,
212988644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
213088644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
213188644Sjake	ldx	[PCPU(CURTHREAD)], %g2
213288644Sjake	stx	%g2, [%g1 + KTR_PARM1]
213388644Sjake	stx	%o0, [%g1 + KTR_PARM2]
213488644Sjake	rdpr	%pil, %g2
213588644Sjake	stx	%g2, [%g1 + KTR_PARM3]
213688644Sjake	stx	%l1, [%g1 + KTR_PARM4]
213788644Sjake	stx	%l2, [%g1 + KTR_PARM5]
213888644Sjake	stx	%i6, [%g1 + KTR_PARM6]
213988644Sjake9:
214088644Sjake#endif
214188644Sjake
2142103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2143103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
214488644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
214588644Sjake	rdpr	%canrestore, %l6
214688644Sjake	wrpr	%l6, 0, %otherwin
214788644Sjake	wrpr	%g0, 0, %canrestore
214888644Sjake
214988644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
215088644Sjake
2151105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2152105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
215388644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
215488644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2155105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
215688644Sjake
215781380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
215881380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
215981380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2160105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2161105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2162105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
216381380Sjake
216488644Sjake	wr	%g0, FPRS_FEF, %fprs
216588644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2166108379Sjake	rd	%gsr, %l6
2167105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
216888644Sjake	wr	%g0, 0, %fprs
216982906Sjake
217089050Sjake	mov	PCB_REG, %l0
217189050Sjake	mov	PCPU_REG, %l1
217282906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
217382005Sjake
217482005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
217582005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
217682005Sjake
217789050Sjake	mov	%l0, PCB_REG
217889050Sjake	mov	%l1, PCPU_REG
217988644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
218084186Sjake
218184186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
218284186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
218384186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
218484186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
218584186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
218684186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
218784186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
218884186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
218984186Sjake
2190108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2191108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2192108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2193108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2194108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2195108377Sjake
2196103921Sjake	set	tl0_ret - 8, %o7
2197103921Sjake	jmpl	%o2, %g0
219884186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
219984186SjakeEND(tl0_trap)
220084186Sjake
220188644Sjake/*
220291246Sjake * void tl0_intr(u_int level, u_int mask)
220391246Sjake */
220484186SjakeENTRY(tl0_intr)
220584186Sjake	/*
220684186Sjake	 * Force kernel store order.
220784186Sjake	 */
220884186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
220984186Sjake
221084186Sjake	rdpr	%tstate, %l0
221188644Sjake	rdpr	%tpc, %l1
221288644Sjake	rdpr	%tnpc, %l2
221388644Sjake	rd	%y, %l3
221488644Sjake	rd	%fprs, %l4
221588644Sjake	rdpr	%wstate, %l5
221688644Sjake
221788644Sjake#if KTR_COMPILE & KTR_INTR
221888644Sjake	CATR(KTR_INTR,
221991246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
222088644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
222188644Sjake	ldx	[PCPU(CURTHREAD)], %g2
222288644Sjake	stx	%g2, [%g1 + KTR_PARM1]
222388644Sjake	stx	%o0, [%g1 + KTR_PARM2]
222488644Sjake	rdpr	%pil, %g2
222588644Sjake	stx	%g2, [%g1 + KTR_PARM3]
222688644Sjake	stx	%l1, [%g1 + KTR_PARM4]
222788644Sjake	stx	%l2, [%g1 + KTR_PARM5]
222888644Sjake	stx	%i6, [%g1 + KTR_PARM6]
222988644Sjake9:
223088644Sjake#endif
223188644Sjake
223291246Sjake	wrpr	%o0, 0, %pil
2233108379Sjake	wr	%o1, 0, %clear_softint
223491246Sjake
223588644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
223688644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
223788644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
223888644Sjake	rdpr	%canrestore, %l6
223988644Sjake	wrpr	%l6, 0, %otherwin
224088644Sjake	wrpr	%g0, 0, %canrestore
224188644Sjake
224288644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
224388644Sjake
224484186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
224584186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
224684186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2247105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2248105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2249105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
225081380Sjake
225188644Sjake	wr	%g0, FPRS_FEF, %fprs
225288644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2253108379Sjake	rd	%gsr, %l6
2254105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
225588644Sjake	wr	%g0, 0, %fprs
225684186Sjake
225791246Sjake	mov	%o0, %l3
225891246Sjake	mov	T_INTERRUPT, %o1
225989050Sjake
2260105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2261105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
226288644Sjake
226389050Sjake	mov	PCB_REG, %l0
226489050Sjake	mov	PCPU_REG, %l1
226584186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
226684186Sjake
226784186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
226884186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
226984186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
227084186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
227184186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
227284186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
227384186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
227484186Sjake
227589050Sjake	mov	%l0, PCB_REG
227689050Sjake	mov	%l1, PCPU_REG
227788644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
227884186Sjake
227984186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
228084186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
228184186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
228284186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
228384186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
228484186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
228584186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
228684186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
228784186Sjake
228889050Sjake	call	critical_enter
228989050Sjake	 nop
229089050Sjake
229186519Sjake	SET(cnt+V_INTR, %l1, %l0)
229288644Sjake	ATOMIC_INC_INT(%l0, %l1, %l2)
229384186Sjake
229486519Sjake	SET(intr_handlers, %l1, %l0)
229589050Sjake	sllx	%l3, IH_SHIFT, %l1
229688644Sjake	ldx	[%l0 + %l1], %l1
229789050Sjake	KASSERT(%l1, "tl0_intr: ih null")
229884186Sjake	call	%l1
229984186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
230089050Sjake
230189050Sjake	call	critical_exit
230289050Sjake	 nop
230389050Sjake
230484186Sjake	b,a	%xcc, tl0_ret
230584186Sjake	 nop
230684186SjakeEND(tl0_intr)
230784186Sjake
2308105733Sjake/*
2309105733Sjake * Initiate return to usermode.
2310105733Sjake *
2311105733Sjake * Called with a trapframe on the stack.  The window that was setup in
2312105733Sjake * tl0_trap may have been used by "fast" trap handlers that pretend to be
2313105733Sjake * leaf functions, so all ins and locals may have been clobbered since
2314105733Sjake * then.
2315105733Sjake *
2316105733Sjake * This code is rather long and complicated.
2317105733Sjake */
231882005SjakeENTRY(tl0_ret)
231993389Sjake	/*
232093389Sjake	 * Check for pending asts atomically with returning.  We must raise
232193389Sjake	 * the pil before checking, and if no asts are found the pil must
232293389Sjake	 * remain raised until the retry is executed, or we risk missing asts
232393389Sjake	 * caused by interrupts occuring after the test.  If the pil is lowered,
232493389Sjake	 * as it is when we call ast, the check must be re-executed.
232593389Sjake	 */
2326103784Sjake	wrpr	%g0, PIL_TICK, %pil
232784186Sjake	ldx	[PCPU(CURTHREAD)], %l0
2328111032Sjulian	lduw	[%l0 + TD_FLAGS], %l1
2329111032Sjulian	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2330111032Sjulian	and	%l1, %l2, %l1
2331111032Sjulian	brz,a,pt %l1, 1f
233282906Sjake	 nop
2333105733Sjake
2334105733Sjake	/*
2335105733Sjake	 * We have an ast.  Re-enable interrupts and handle it, then restart
2336105733Sjake	 * the return sequence.
2337105733Sjake	 */
233893389Sjake	wrpr	%g0, 0, %pil
233982906Sjake	call	ast
234082906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2341103784Sjake	ba,a	%xcc, tl0_ret
234293389Sjake	 nop
234382906Sjake
234493389Sjake	/*
234593389Sjake	 * Check for windows that were spilled to the pcb and need to be
234693389Sjake	 * copied out.  This must be the last thing that is done before the
234793389Sjake	 * return to usermode.  If there are still user windows in the cpu
234893389Sjake	 * and we call a nested function after this, which causes them to be
234993389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
235093389Sjake	 * be inconsistent.
235193389Sjake	 */
2352103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2353103784Sjake	brz,a,pt %l1, 2f
2354103784Sjake	 nop
2355103784Sjake	wrpr	%g0, 0, %pil
235693389Sjake	mov	T_SPILL, %o0
2357105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2358103784Sjake	call	trap
2359103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2360103784Sjake	ba,a	%xcc, tl0_ret
2361103784Sjake	 nop
236282906Sjake
2363105733Sjake	/*
2364108377Sjake	 * Restore the out and most global registers from the trapframe.
2365108377Sjake	 * The ins will become the outs when we restore below.
2366105733Sjake	 */
2367103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
236882906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
236982906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
237082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
237182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
237282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
237382906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
237482906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
237581380Sjake
2376108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2377108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2378108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2379108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2380108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2381108377Sjake
2382105733Sjake	/*
2383105733Sjake	 * Load everything we need to restore below before disabling
2384105733Sjake	 * interrupts.
2385105733Sjake	 */
2386105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2387105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
238885243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2389105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2390105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2391105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2392105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
239382906Sjake
2394105733Sjake	/*
2395108377Sjake	 * Disable interrupts to restore the special globals.  They are not
2396108377Sjake	 * saved and restored for all kernel traps, so an interrupt at the
2397108377Sjake	 * wrong time would clobber them.
2398105733Sjake	 */
239989050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
240089050Sjake
240189050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
240289050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
240389050Sjake
2404105733Sjake	/*
2405105733Sjake	 * Switch to alternate globals.  This frees up some registers we
2406105733Sjake	 * can use after the restore changes our window.
2407105733Sjake	 */
240882906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
240982906Sjake
2410105733Sjake	/*
2411105733Sjake	 * Drop %pil to zero.  It must have been zero at the time of the
2412105733Sjake	 * trap, since we were in usermode, but it was raised above in
2413105733Sjake	 * order to check for asts atomically.  We have interrupts disabled
2414105733Sjake	 * so any interrupts will not be serviced until we complete the
2415105733Sjake	 * return to usermode.
2416105733Sjake	 */
241788644Sjake	wrpr	%g0, 0, %pil
2418105733Sjake
2419105733Sjake	/*
2420105733Sjake	 * Save %fprs in an alternate global so it can be restored after the
2421105733Sjake	 * restore instruction below.  If we restore it before the restore,
2422105733Sjake	 * and the restore traps we may run for a while with floating point
2423105733Sjake	 * enabled in the kernel, which we want to avoid.
2424105733Sjake	 */
2425105733Sjake	mov	%l0, %g1
2426105733Sjake
2427105733Sjake	/*
2428105733Sjake	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2429105733Sjake	 * so we set it temporarily and then clear it.
2430105733Sjake	 */
2431105733Sjake	wr	%g0, FPRS_FEF, %fprs
2432105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2433108379Sjake	wr	%l1, 0, %gsr
2434105733Sjake	wr	%g0, 0, %fprs
2435105733Sjake
2436105733Sjake	/*
2437105733Sjake	 * Restore program counters.  This could be done after the restore
2438105733Sjake	 * but we're out of alternate globals to store them in...
2439105733Sjake	 */
244088644Sjake	wrpr	%l2, 0, %tnpc
2441105733Sjake	wrpr	%l3, 0, %tpc
244282906Sjake
2443105733Sjake	/*
2444105733Sjake	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2445105733Sjake	 * will be affected by the restore below and we need to make sure it
2446105733Sjake	 * points to the current window at that time, not the window that was
2447105733Sjake	 * active at the time of the trap.
2448105733Sjake	 */
2449105733Sjake	andn	%l4, TSTATE_CWP_MASK, %g2
245082906Sjake
2451105733Sjake	/*
2452105733Sjake	 * Restore %y.  Could also be below if we had more alternate globals.
2453105733Sjake	 */
2454105733Sjake	wr	%l5, 0, %y
2455105733Sjake
2456105733Sjake	/*
2457105733Sjake	 * Setup %wstate for return.  We need to restore the user window state
2458105733Sjake	 * which we saved in wstate.other when we trapped.  We also need to
2459105733Sjake	 * set the transition bit so the restore will be handled specially
2460105733Sjake	 * if it traps, use the xor feature of wrpr to do that.
2461105733Sjake	 */
2462105733Sjake	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
246388644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
2464105733Sjake
2465105733Sjake	/*
2466105733Sjake	 * Setup window management registers for return.  If not all user
2467105733Sjake	 * windows were spilled in the kernel %otherwin will be non-zero,
2468105733Sjake	 * so we need to transfer it to %canrestore to correctly restore
2469105733Sjake	 * those windows.  Otherwise everything gets set to zero and the
2470105733Sjake	 * restore below will fill a window directly from the user stack.
2471105733Sjake	 */
247288644Sjake	rdpr	%otherwin, %o0
247388644Sjake	wrpr	%o0, 0, %canrestore
247482906Sjake	wrpr	%g0, 0, %otherwin
247588644Sjake	wrpr	%o0, 0, %cleanwin
247681380Sjake
247782005Sjake	/*
2478105733Sjake	 * Now do the restore.  If this instruction causes a fill trap which
2479105733Sjake	 * fails to fill a window from the user stack, we will resume at
2480105733Sjake	 * tl0_ret_fill_end and call back into the kernel.
248182005Sjake	 */
248282906Sjake	restore
248382906Sjaketl0_ret_fill:
248481380Sjake
2485105733Sjake	/*
2486105733Sjake	 * We made it.  We're back in the window that was active at the time
2487105733Sjake	 * of the trap, and ready to return to usermode.
2488105733Sjake	 */
2489105733Sjake
2490105733Sjake	/*
2491105733Sjake	 * Restore %frps.  This was saved in an alternate global above.
2492105733Sjake	 */
2493105733Sjake	wr	%g1, 0, %fprs
2494105733Sjake
2495105733Sjake	/*
2496105733Sjake	 * Fixup %tstate so the saved %cwp points to the current window and
2497105733Sjake	 * restore it.
2498105733Sjake	 */
249988644Sjake	rdpr	%cwp, %g4
2500105733Sjake	wrpr	%g2, %g4, %tstate
2501105733Sjake
2502105733Sjake	/*
2503105733Sjake	 * Restore the user window state.  The transition bit was set above
2504105733Sjake	 * for special handling of the restore, this clears it.
2505105733Sjake	 */
250688644Sjake	wrpr	%g3, 0, %wstate
250785243Sjake
250884186Sjake#if KTR_COMPILE & KTR_TRAP
250988644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
251082906Sjake	    , %g2, %g3, %g4, 7, 8, 9)
251183366Sjulian	ldx	[PCPU(CURTHREAD)], %g3
251282906Sjake	stx	%g3, [%g2 + KTR_PARM1]
251385243Sjake	rdpr	%pil, %g3
251485243Sjake	stx	%g3, [%g2 + KTR_PARM2]
251588644Sjake	rdpr	%tpc, %g3
251684186Sjake	stx	%g3, [%g2 + KTR_PARM3]
251788644Sjake	rdpr	%tnpc, %g3
251884186Sjake	stx	%g3, [%g2 + KTR_PARM4]
251984186Sjake	stx	%sp, [%g2 + KTR_PARM5]
252082906Sjake9:
252182906Sjake#endif
252281380Sjake
2523105733Sjake	/*
2524105733Sjake	 * Return to usermode.
2525105733Sjake	 */
252682906Sjake	retry
252782906Sjaketl0_ret_fill_end:
252882005Sjake
252984186Sjake#if KTR_COMPILE & KTR_TRAP
253088785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
253182906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
253288785Sjake	rdpr	%pstate, %l1
253388785Sjake	stx	%l1, [%l0 + KTR_PARM1]
253488785Sjake	stx	%l5, [%l0 + KTR_PARM2]
253588785Sjake	stx	%sp, [%l0 + KTR_PARM3]
253682906Sjake9:
253782906Sjake#endif
253882906Sjake
253982906Sjake	/*
2540105733Sjake	 * The restore above caused a fill trap and the fill handler was
2541105733Sjake	 * unable to fill a window from the user stack.  The special fill
2542105733Sjake	 * handler recognized this and punted, sending us here.  We need
2543105733Sjake	 * to carefully undo any state that was restored before the restore
2544105733Sjake	 * was executed and call trap again.  Trap will copyin a window
2545105733Sjake	 * from the user stack which will fault in the page we need so the
2546105733Sjake	 * restore above will succeed when we try again.  If this fails
2547105733Sjake	 * the process has trashed its stack, so we kill it.
254882906Sjake	 */
2549105733Sjake
2550105733Sjake	/*
2551105733Sjake	 * Restore the kernel window state.  This was saved in %l6 above, and
2552105733Sjake	 * since the restore failed we're back in the same window.
2553105733Sjake	 */
2554105733Sjake	wrpr	%l6, 0, %wstate
2555105733Sjake
2556105733Sjake	/*
2557105733Sjake	 * Restore the normal globals which have predefined values in the
2558105733Sjake	 * kernel.  We clobbered them above restoring the user's globals
2559105733Sjake	 * so this is very important.
2560105733Sjake	 * XXX PSTATE_ALT must already be set.
2561105733Sjake	 */
256288785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
256389050Sjake	mov	PCB_REG, %o0
256489050Sjake	mov	PCPU_REG, %o1
256588785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
256689050Sjake	mov	%o0, PCB_REG
256789050Sjake	mov	%o1, PCPU_REG
256888644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2569105733Sjake
2570105733Sjake	/*
2571105733Sjake	 * Simulate a fill trap and then start the whole return sequence over
2572105733Sjake	 * again.  This is special because it only copies in 1 window, not 2
2573105733Sjake	 * as we would for a normal failed fill.  This may be the first time
2574105733Sjake	 * the process has been run, so there may not be 2 windows worth of
2575105733Sjake	 * stack to copyin.
2576105733Sjake	 */
2577103784Sjake	mov	T_FILL_RET, %o0
2578105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2579103784Sjake	call	trap
2580103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2581103784Sjake	ba,a	%xcc, tl0_ret
2582103784Sjake	 nop
258382005SjakeEND(tl0_ret)
258481380Sjake
258580709Sjake/*
258682906Sjake * Kernel trap entry point
258782906Sjake *
258891246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
258988644Sjake *		 u_int sfsr)
259082906Sjake *
259182906Sjake * This is easy because the stack is already setup and the windows don't need
259282906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
259382906Sjake * the outs don't need to be saved.
259480709Sjake */
259580709SjakeENTRY(tl1_trap)
259680709Sjake	rdpr	%tstate, %l0
259780709Sjake	rdpr	%tpc, %l1
259880709Sjake	rdpr	%tnpc, %l2
259991246Sjake	rdpr	%pil, %l3
260091316Sjake	rd	%y, %l4
260191316Sjake	rdpr	%wstate, %l5
260280709Sjake
260384186Sjake#if KTR_COMPILE & KTR_TRAP
260488644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
260588644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
260688644Sjake	ldx	[PCPU(CURTHREAD)], %g2
260788644Sjake	stx	%g2, [%g1 + KTR_PARM1]
260897265Sjake	stx	%o0, [%g1 + KTR_PARM2]
260991246Sjake	stx	%l3, [%g1 + KTR_PARM3]
261088644Sjake	stx	%l1, [%g1 + KTR_PARM4]
261188644Sjake	stx	%i6, [%g1 + KTR_PARM5]
261282906Sjake9:
261382906Sjake#endif
261482906Sjake
261580709Sjake	wrpr	%g0, 1, %tl
261688644Sjake
261791316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
261891316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
261991246Sjake
2620105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2621105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2622103919Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2623103919Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2624105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2625103919Sjake
262688644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
262788644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
262888644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2629105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2630105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
263188644Sjake
2632103919Sjake	mov	PCB_REG, %l0
2633103919Sjake	mov	PCPU_REG, %l1
263491158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
263591158Sjake
2636108377Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2637108377Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
263880709Sjake
2639103919Sjake	mov	%l0, PCB_REG
2640103919Sjake	mov	%l1, PCPU_REG
264191158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
264291158Sjake
2643103919Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2644103919Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2645103919Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2646103919Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2647103919Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2648103919Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2649103919Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2650103919Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2651103919Sjake
2652108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2653108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2654108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2655108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2656108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2657108377Sjake
2658103921Sjake	set	tl1_ret - 8, %o7
2659103921Sjake	jmpl	%o2, %g0
266080709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2661103921SjakeEND(tl1_trap)
266280709Sjake
2663103921SjakeENTRY(tl1_ret)
2664103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2665103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2666103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2667103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2668103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2669103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2670103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2671103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2672103919Sjake
2673108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2674108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2675108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2676108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2677108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2678108377Sjake
267988644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
268088644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
268188644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2682105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2683105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
268488644Sjake
2685108377Sjake	set	VM_MIN_PROM_ADDRESS, %l5
2686108377Sjake	cmp	%l1, %l5
2687108377Sjake	bl,a,pt	%xcc, 1f
2688108377Sjake	 nop
268980709Sjake
2690108377Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
269180709Sjake
2692108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2693108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2694108377Sjake
2695108377Sjake1:	wrpr	%g0, PSTATE_ALT, %pstate
2696108377Sjake
269788644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
269886519Sjake	mov	%l1, %g2
269986519Sjake	mov	%l2, %g3
270081380Sjake
270188644Sjake	wrpr	%l3, 0, %pil
270291316Sjake	wr	%l4, 0, %y
270386519Sjake
270486519Sjake	restore
270586519Sjake
270680709Sjake	wrpr	%g0, 2, %tl
270780709Sjake
270888644Sjake	rdpr	%cwp, %g4
270988644Sjake	wrpr	%g1, %g4, %tstate
271086519Sjake	wrpr	%g2, 0, %tpc
271186519Sjake	wrpr	%g3, 0, %tnpc
271286519Sjake
271384186Sjake#if KTR_COMPILE & KTR_TRAP
2714103921Sjake	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
271586519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
271686519Sjake	ldx	[PCPU(CURTHREAD)], %g3
271786519Sjake	stx	%g3, [%g2 + KTR_PARM1]
271886519Sjake	rdpr	%pil, %g3
271986519Sjake	stx	%g3, [%g2 + KTR_PARM2]
272086519Sjake	rdpr	%tstate, %g3
272186519Sjake	stx	%g3, [%g2 + KTR_PARM3]
272286519Sjake	rdpr	%tpc, %g3
272386519Sjake	stx	%g3, [%g2 + KTR_PARM4]
272486519Sjake	stx	%sp, [%g2 + KTR_PARM5]
272582906Sjake9:
272682906Sjake#endif
272782906Sjake
272880709Sjake	retry
2729103921SjakeEND(tl1_ret)
273080709Sjake
273191246Sjake/*
273291246Sjake * void tl1_intr(u_int level, u_int mask)
273391246Sjake */
273484186SjakeENTRY(tl1_intr)
273584186Sjake	rdpr	%tstate, %l0
273684186Sjake	rdpr	%tpc, %l1
273784186Sjake	rdpr	%tnpc, %l2
273891246Sjake	rdpr	%pil, %l3
273991316Sjake	rd	%y, %l4
274091316Sjake	rdpr	%wstate, %l5
274184186Sjake
274284186Sjake#if KTR_COMPILE & KTR_INTR
274389050Sjake	CATR(KTR_INTR,
274491246Sjake	    "tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
274588644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
274688644Sjake	ldx	[PCPU(CURTHREAD)], %g2
274788644Sjake	stx	%g2, [%g1 + KTR_PARM1]
274891246Sjake	stx	%o0, [%g1 + KTR_PARM2]
274991246Sjake	stx	%l3, [%g1 + KTR_PARM3]
275091246Sjake	stx	%l1, [%g1 + KTR_PARM4]
275191246Sjake	stx	%i6, [%g1 + KTR_PARM5]
275284186Sjake9:
275384186Sjake#endif
275484186Sjake
275591246Sjake	wrpr	%o0, 0, %pil
2756108379Sjake	wr	%o1, 0, %clear_softint
275791246Sjake
275884186Sjake	wrpr	%g0, 1, %tl
275988644Sjake
276091316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
276191316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
276291246Sjake
276388644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
276488644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
276588644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2766105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2767105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
276888644Sjake
276991246Sjake	mov	%o0, %l7
277091246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
277189050Sjake
2772105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2773105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
277488644Sjake
277588644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
277688644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
277788644Sjake
277891158Sjake	mov	PCB_REG, %l4
277991158Sjake	mov	PCPU_REG, %l5
278091158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
278191158Sjake
278284186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
278384186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
278484186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
278584186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
278684186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
278784186Sjake
278891158Sjake	mov	%l4, PCB_REG
278991158Sjake	mov	%l5, PCPU_REG
279091158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
279191158Sjake
279289050Sjake	call	critical_enter
279389050Sjake	 nop
279484186Sjake
279588644Sjake	SET(cnt+V_INTR, %l5, %l4)
279688644Sjake	ATOMIC_INC_INT(%l4, %l5, %l6)
279788644Sjake
279888644Sjake	SET(intr_handlers, %l5, %l4)
279989050Sjake	sllx	%l7, IH_SHIFT, %l5
280088644Sjake	ldx	[%l4 + %l5], %l5
280189050Sjake	KASSERT(%l5, "tl1_intr: ih null")
280288644Sjake	call	%l5
280384186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
280484186Sjake
280589050Sjake	call	critical_exit
280689050Sjake	 nop
280789050Sjake
2808105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
280991316Sjake
281084186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
281184186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
281284186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
281384186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
281484186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
281584186Sjake
281684186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
281784186Sjake
281888644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
281986519Sjake	mov	%l1, %g2
282086519Sjake	mov	%l2, %g3
282188644Sjake	wrpr	%l3, 0, %pil
282291316Sjake	wr	%l4, 0, %y
282384186Sjake
282486519Sjake	restore
282586519Sjake
282684186Sjake	wrpr	%g0, 2, %tl
282784186Sjake
282888644Sjake	rdpr	%cwp, %g4
282988644Sjake	wrpr	%g1, %g4, %tstate
283086519Sjake	wrpr	%g2, 0, %tpc
283186519Sjake	wrpr	%g3, 0, %tnpc
283286519Sjake
283388644Sjake#if KTR_COMPILE & KTR_INTR
283488644Sjake	CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
283586519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
283686519Sjake	ldx	[PCPU(CURTHREAD)], %g3
283786519Sjake	stx	%g3, [%g2 + KTR_PARM1]
283886519Sjake	rdpr	%pil, %g3
283986519Sjake	stx	%g3, [%g2 + KTR_PARM2]
284086519Sjake	rdpr	%tstate, %g3
284186519Sjake	stx	%g3, [%g2 + KTR_PARM3]
284286519Sjake	rdpr	%tpc, %g3
284386519Sjake	stx	%g3, [%g2 + KTR_PARM4]
284486519Sjake	stx	%sp, [%g2 + KTR_PARM5]
284584186Sjake9:
284684186Sjake#endif
284784186Sjake
284884186Sjake	retry
284984186SjakeEND(tl1_intr)
285084186Sjake
285182906Sjake/*
285282906Sjake * Freshly forked processes come here when switched to for the first time.
285382906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
285482906Sjake * them to the outs.
285582906Sjake */
285680709SjakeENTRY(fork_trampoline)
285784186Sjake#if KTR_COMPILE & KTR_PROC
285884186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
285982906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
286083366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
286182906Sjake	stx	%g2, [%g1 + KTR_PARM1]
286284186Sjake	ldx	[%g2 + TD_PROC], %g2
286382906Sjake	add	%g2, P_COMM, %g2
286482906Sjake	stx	%g2, [%g1 + KTR_PARM2]
286582906Sjake	rdpr	%cwp, %g2
286682906Sjake	stx	%g2, [%g1 + KTR_PARM3]
286782906Sjake9:
286882906Sjake#endif
286980709Sjake	mov	%l0, %o0
287080709Sjake	mov	%l1, %o1
287180709Sjake	call	fork_exit
287288644Sjake	 mov	%l2, %o2
287382005Sjake	b,a	%xcc, tl0_ret
287484186Sjake	 nop
287580709SjakeEND(fork_trampoline)
2876