exception.S revision 108195
180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
2881180Sjake *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake *
5580709Sjake * $FreeBSD: head/sys/sparc64/sparc64/exception.S 108195 2002-12-23 02:18:45Z jake $
5680709Sjake */
5780709Sjake
58106050Sjake#include "opt_compat.h"
5980709Sjake#include "opt_ddb.h"
6080709Sjake
6180709Sjake#include <machine/asi.h>
6280709Sjake#include <machine/asmacros.h>
6382906Sjake#include <machine/ktr.h>
6482906Sjake#include <machine/pstate.h>
6580709Sjake#include <machine/trap.h>
6682906Sjake#include <machine/tstate.h>
6782906Sjake#include <machine/wstate.h>
6880709Sjake
6980709Sjake#include "assym.s"
7080709Sjake
71101653Sjake#define	TSB_KERNEL_MASK	0x0
72101653Sjake#define	TSB_KERNEL	0x0
73101653Sjake
7488644Sjake	.register %g2,#ignore
7588644Sjake	.register %g3,#ignore
7688644Sjake	.register %g6,#ignore
7788644Sjake	.register %g7,#ignore
7888644Sjake
7982005Sjake/*
8088644Sjake * Atomically set the reference bit in a tte.
8188644Sjake */
8288644Sjake#define	TTE_SET_BIT(r1, r2, r3, bit) \
8388644Sjake	add	r1, TTE_DATA, r1 ; \
8488644Sjake	ldx	[r1], r2 ; \
8588644Sjake9:	or	r2, bit, r3 ; \
8688644Sjake	casxa	[r1] ASI_N, r2, r3 ; \
8788644Sjake	cmp	r2, r3 ; \
8888644Sjake	bne,pn	%xcc, 9b ; \
8988644Sjake	 mov	r3, r2
9088644Sjake
9188644Sjake#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
9288644Sjake#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
9388644Sjake
9488644Sjake/*
9582906Sjake * Macros for spilling and filling live windows.
9682906Sjake *
9782906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
9882906Sjake * handler will not use more than 24 instructions total, to leave room for
9982906Sjake * resume vectors which occupy the last 8 instructions.
10082005Sjake */
10180709Sjake
10282906Sjake#define	SPILL(storer, base, size, asi) \
10382906Sjake	storer	%l0, [base + (0 * size)] asi ; \
10482906Sjake	storer	%l1, [base + (1 * size)] asi ; \
10582906Sjake	storer	%l2, [base + (2 * size)] asi ; \
10682906Sjake	storer	%l3, [base + (3 * size)] asi ; \
10782906Sjake	storer	%l4, [base + (4 * size)] asi ; \
10882906Sjake	storer	%l5, [base + (5 * size)] asi ; \
10982906Sjake	storer	%l6, [base + (6 * size)] asi ; \
11082906Sjake	storer	%l7, [base + (7 * size)] asi ; \
11182906Sjake	storer	%i0, [base + (8 * size)] asi ; \
11282906Sjake	storer	%i1, [base + (9 * size)] asi ; \
11382906Sjake	storer	%i2, [base + (10 * size)] asi ; \
11482906Sjake	storer	%i3, [base + (11 * size)] asi ; \
11582906Sjake	storer	%i4, [base + (12 * size)] asi ; \
11682906Sjake	storer	%i5, [base + (13 * size)] asi ; \
11782906Sjake	storer	%i6, [base + (14 * size)] asi ; \
11882906Sjake	storer	%i7, [base + (15 * size)] asi
11980709Sjake
12082906Sjake#define	FILL(loader, base, size, asi) \
12182906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
12282906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
12382906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
12482906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
12582906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
12682906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
12782906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
12882906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
12982906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
13082906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
13182906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
13282906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
13382906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
13482906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
13582906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
13682906Sjake	loader	[base + (15 * size)] asi, %i7
13782005Sjake
13882906Sjake#define	ERRATUM50(reg)	mov reg, reg
13982906Sjake
14088781Sjake#define	KSTACK_SLOP	1024
14188781Sjake
14289048Sjake/*
14389048Sjake * Sanity check the kernel stack and bail out if its wrong.
14489048Sjake * XXX: doesn't handle being on the panic stack.
14589048Sjake */
14688781Sjake#define	KSTACK_CHECK \
14788781Sjake	dec	16, ASP_REG ; \
14888781Sjake	stx	%g1, [ASP_REG + 0] ; \
14988781Sjake	stx	%g2, [ASP_REG + 8] ; \
15088781Sjake	add	%sp, SPOFF, %g1 ; \
15188781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
15288781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
15388781Sjake	 inc	16, ASP_REG ; \
15488781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
15588781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
15688781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
15788781Sjake	subcc	%g1, %g2, %g1 ; \
15888781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
15988781Sjake	 inc	16, ASP_REG ; \
16088781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
16188781Sjake	cmp	%g1, %g2 ; \
16288781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
16388781Sjake	 inc	16, ASP_REG ; \
16488781Sjake	ldx	[ASP_REG + 8], %g2 ; \
16588781Sjake	ldx	[ASP_REG + 0], %g1 ; \
16688781Sjake	inc	16, ASP_REG
16788781Sjake
16888781SjakeENTRY(tl1_kstack_fault)
16988781Sjake	rdpr	%tl, %g1
17097263Sjake1:	cmp	%g1, 2
17197263Sjake	be,a	2f
17288781Sjake	 nop
17388781Sjake
17488781Sjake#if KTR_COMPILE & KTR_TRAP
17588781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
17697263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
17797263Sjake	rdpr	%tl, %g3
17897263Sjake	stx	%g3, [%g2 + KTR_PARM1]
17997263Sjake	rdpr	%tpc, %g3
18097263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18197263Sjake	rdpr	%tnpc, %g3
18297263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18388781Sjake9:
18488781Sjake#endif
18588781Sjake
18697263Sjake	sub	%g1, 1, %g1
18797263Sjake	wrpr	%g1, 0, %tl
18897263Sjake	ba,a	%xcc, 1b
18997263Sjake	 nop
19097263Sjake
19188781Sjake2:
19288781Sjake#if KTR_COMPILE & KTR_TRAP
19388781Sjake	CATR(KTR_TRAP,
19488781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
19588781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
19688781Sjake	add	%sp, SPOFF, %g2
19788781Sjake	stx	%g2, [%g1 + KTR_PARM1]
19888781Sjake	ldx	[PCPU(CURTHREAD)], %g2
19988781Sjake	ldx	[%g2 + TD_KSTACK], %g2
20088781Sjake	stx	%g2, [%g1 + KTR_PARM2]
20188781Sjake	rdpr	%canrestore, %g2
20288781Sjake	stx	%g2, [%g1 + KTR_PARM3]
20388781Sjake	rdpr	%cansave, %g2
20488781Sjake	stx	%g2, [%g1 + KTR_PARM4]
20588781Sjake	rdpr	%otherwin, %g2
20688781Sjake	stx	%g2, [%g1 + KTR_PARM5]
20788781Sjake	rdpr	%wstate, %g2
20888781Sjake	stx	%g2, [%g1 + KTR_PARM6]
20988781Sjake9:
21088781Sjake#endif
21188781Sjake
21288781Sjake	wrpr	%g0, 0, %canrestore
21388781Sjake	wrpr	%g0, 6, %cansave
21488781Sjake	wrpr	%g0, 0, %otherwin
21588781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
21688781Sjake
21789048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
21888781Sjake	clr	%fp
21988781Sjake
220103921Sjake	set	trap, %o2
22188781Sjake	b	%xcc, tl1_trap
22288781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
22388781SjakeEND(tl1_kstack_fault)
22488781Sjake
22582906Sjake/*
22682906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
22782906Sjake * mmu fault during a spill or a fill, this macro will detect the fault and
22888644Sjake * resume at a set instruction offset in the trap handler.
22982906Sjake *
23088644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
23188644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
23282906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
23382906Sjake * tl bit allows us to detect both ranges with one test.
23482906Sjake *
23582906Sjake * This is:
23688644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
23782906Sjake *
23882906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
23982906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
24082906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
24182906Sjake *
24282906Sjake *	0x7f ^ 0x1f == 0x60
24382906Sjake *	0x1f == (0x80 - 0x60) - 1
24482906Sjake *
24586519Sjake * Which are the offset and xor value used to resume from alignment faults.
24682906Sjake */
24782906Sjake
24882906Sjake/*
24988644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
25088644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
25188644Sjake * alternate globals.
25282906Sjake */
25388644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
25488644Sjake	dec	16, ASP_REG ; \
25588644Sjake	stx	%g1, [ASP_REG + 0] ; \
25688644Sjake	stx	%g2, [ASP_REG + 8] ; \
25788644Sjake	rdpr	%tpc, %g1 ; \
25888644Sjake	ERRATUM50(%g1) ; \
25988644Sjake	rdpr	%tba, %g2 ; \
26088644Sjake	sub	%g1, %g2, %g2 ; \
26188644Sjake	srlx	%g2, 5, %g2 ; \
26288644Sjake	andn	%g2, 0x200, %g2 ; \
26388644Sjake	cmp	%g2, 0x80 ; \
26488644Sjake	blu,pt	%xcc, 9f ; \
26588644Sjake	 cmp	%g2, 0x100 ; \
26688644Sjake	bgeu,pt	%xcc, 9f ; \
26788644Sjake	 or	%g1, 0x7f, %g1 ; \
26888644Sjake	wrpr	%g1, xor, %tnpc ; \
26988644Sjake	stxa_g0_sfsr ; \
27088644Sjake	ldx	[ASP_REG + 8], %g2 ; \
27188644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27288644Sjake	inc	16, ASP_REG ; \
27388644Sjake	done ; \
27488644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
27588644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27688644Sjake	inc	16, ASP_REG
27782906Sjake
27888644Sjake/*
27988644Sjake * For certain faults we need to clear the sfsr mmu register before returning.
28088644Sjake */
28188644Sjake#define	RSF_CLR_SFSR \
28288644Sjake	wr	%g0, ASI_DMMU, %asi ; \
28388644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
28488644Sjake
28582906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
28682906Sjake
28782906Sjake/*
28882906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
28982906Sjake * nested traps, and corresponding xor constants for wrpr.
29082906Sjake */
29186519Sjake#define	RSF_OFF_ALIGN	0x60
29286519Sjake#define	RSF_OFF_MMU	0x70
29382906Sjake
29488644Sjake#define	RESUME_SPILLFILL_ALIGN \
29588644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
29688644Sjake#define	RESUME_SPILLFILL_MMU \
29788644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
29888644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
29988644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
30082906Sjake
30182906Sjake/*
30282906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
30388644Sjake * user mode.
30482906Sjake */
30582906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
30682906Sjake
30782906Sjake/*
30882906Sjake * Retry a spill or fill with a different wstate due to an alignment fault.
30982906Sjake * We may just be using the wrong stack offset.
31082906Sjake */
31182906Sjake#define	RSF_ALIGN_RETRY(ws) \
31282906Sjake	wrpr	%g0, (ws), %wstate ; \
31382906Sjake	retry ; \
31482906Sjake	.align	16
31582906Sjake
31682906Sjake/*
31782906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
31882906Sjake */
31982906Sjake#define	RSF_TRAP(type) \
32082906Sjake	b	%xcc, tl0_sftrap ; \
32182906Sjake	 mov	type, %g2 ; \
32282906Sjake	.align	16
32382906Sjake
32482906Sjake/*
32582906Sjake * Game over if the window operation fails.
32682906Sjake */
32782906Sjake#define	RSF_FATAL(type) \
32888781Sjake	b	%xcc, rsf_fatal ; \
32988781Sjake	 mov	type, %g2 ; \
33082906Sjake	.align	16
33182906Sjake
33282906Sjake/*
33382906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
33482906Sjake * restore.  This is used on return from the kernel to usermode.
33582906Sjake */
33682906Sjake#define	RSF_FILL_MAGIC \
33782906Sjake	rdpr	%tnpc, %g1 ; \
33882906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
33982906Sjake	wrpr	%g1, 0, %tnpc ; \
34082906Sjake	done ; \
34182906Sjake	.align	16
34282906Sjake
34382906Sjake/*
34482906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
34582906Sjake */
34682906Sjake#define	RSF_SPILL_TOPCB \
34782906Sjake	b,a	%xcc, tl1_spill_topcb ; \
34882906Sjake	 nop ; \
34982906Sjake	.align	16
35082906Sjake
35188781SjakeENTRY(rsf_fatal)
35288781Sjake#if KTR_COMPILE & KTR_TRAP
35388781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
35488781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
35588781Sjake	rdpr	%tt, %g3
35688781Sjake	stx	%g3, [%g1 + KTR_PARM1]
35788781Sjake	stx	%g2, [%g1 + KTR_PARM2]
35888781Sjake9:
35988781Sjake#endif
36088781Sjake
36188781Sjake	KSTACK_CHECK
36288781Sjake
36388781Sjake	sir
36488781SjakeEND(rsf_fatal)
36588781Sjake
36697265Sjake	.comm	intrnames, IV_MAX * 8
36785243Sjake	.comm	eintrnames, 0
36880709Sjake
36997265Sjake	.comm	intrcnt, IV_MAX * 8
37085243Sjake	.comm	eintrcnt, 0
37180709Sjake
37282906Sjake/*
37382906Sjake * Trap table and associated macros
37482906Sjake *
37582906Sjake * Due to its size a trap table is an inherently hard thing to represent in
37682906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
37782906Sjake * instructions each, many of which are identical.  The way that this is
37882906Sjake * layed out is the instructions (8 or 32) for the actual trap vector appear
37982906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
38082906Sjake * but if not supporting code can be placed just after the definition of the
38182906Sjake * macro.  The macros are then instantiated in a different section (.trap),
38282906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
38382906Sjake * code around the macros is moved to the end of trap table.  In this way the
38482906Sjake * code that must be sequential in memory can be split up, and located near
38582906Sjake * its supporting code so that it is easier to follow.
38682906Sjake */
38782906Sjake
38882906Sjake	/*
38982906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
39082906Sjake	 * is not leaked between address spaces in registers.
39182906Sjake	 */
39280709Sjake	.macro	clean_window
39380709Sjake	clr	%o0
39480709Sjake	clr	%o1
39580709Sjake	clr	%o2
39680709Sjake	clr	%o3
39780709Sjake	clr	%o4
39880709Sjake	clr	%o5
39980709Sjake	clr	%o6
40080709Sjake	clr	%o7
40180709Sjake	clr	%l0
40280709Sjake	clr	%l1
40380709Sjake	clr	%l2
40480709Sjake	clr	%l3
40580709Sjake	clr	%l4
40680709Sjake	clr	%l5
40780709Sjake	clr	%l6
40880709Sjake	rdpr	%cleanwin, %l7
40980709Sjake	inc	%l7
41080709Sjake	wrpr	%l7, 0, %cleanwin
41180709Sjake	clr	%l7
41280709Sjake	retry
41380709Sjake	.align	128
41480709Sjake	.endm
41580709Sjake
41681380Sjake	/*
41782906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
41882906Sjake	 * user stack, and with its live registers, so we must save soon.  We
41982906Sjake	 * are on alternate globals so we do have some registers.  Set the
42088644Sjake	 * transitional window state, and do the save.  If this traps we
42188644Sjake	 * we attempt to spill a window to the user stack.  If this fails,
42288644Sjake	 * we spill the window to the pcb and continue.  Spilling to the pcb
42388644Sjake	 * must not fail.
42482906Sjake	 *
42582906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
42681380Sjake	 */
42782906Sjake
42888644Sjake	.macro	tl0_split
42982906Sjake	rdpr	%wstate, %g1
43082906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
43181380Sjake	save
43281380Sjake	.endm
43381380Sjake
43482906Sjake	.macro	tl0_setup	type
43588644Sjake	tl0_split
436103921Sjake	set	trap, %o2
437103897Sjake	ba	%xcc, tl0_utrap
43882906Sjake	 mov	\type, %o0
43981380Sjake	.endm
44081380Sjake
44181380Sjake	/*
44282906Sjake	 * Generic trap type.  Call trap() with the specified type.
44381380Sjake	 */
44480709Sjake	.macro	tl0_gen		type
44582906Sjake	tl0_setup \type
44680709Sjake	.align	32
44780709Sjake	.endm
44880709Sjake
44982906Sjake	/*
45082906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
45182906Sjake	 * Generates count "reserved" trap vectors.
45282906Sjake	 */
45380709Sjake	.macro	tl0_reserved	count
45480709Sjake	.rept	\count
45580709Sjake	tl0_gen	T_RESERVED
45680709Sjake	.endr
45780709Sjake	.endm
45880709Sjake
45988780Sjake	.macro	tl0_fp_restore
46088780Sjake	wr	%g0, FPRS_FEF, %fprs
46188780Sjake	wr	%g0, ASI_BLK_S, %asi
46288780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB0] %asi, %f0
46388780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB1] %asi, %f16
46488780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB2] %asi, %f32
46588780Sjake	ldda	[PCB_REG + PCB_FPSTATE + FP_FB3] %asi, %f48
46688780Sjake	membar	#Sync
46788780Sjake	done
46888780Sjake	.align	32
46988780Sjake	.endm
47088780Sjake
47188644Sjake	.macro	tl0_insn_excptn
472101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
47388644Sjake	wr	%g0, ASI_IMMU, %asi
47488644Sjake	rdpr	%tpc, %g3
47588644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
47688644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
47788644Sjake	membar	#Sync
47888644Sjake	b	%xcc, tl0_sfsr_trap
47988644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
48088644Sjake	.align	32
48188644Sjake	.endm
48288644Sjake
48382906Sjake	.macro	tl0_data_excptn
484101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
48582906Sjake	wr	%g0, ASI_DMMU, %asi
48682906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
48782906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
48888644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
48988644Sjake	membar	#Sync
49082906Sjake	b	%xcc, tl0_sfsr_trap
49188644Sjake	 mov	T_DATA_EXCEPTION, %g2
49282906Sjake	.align	32
49382906Sjake	.endm
49482906Sjake
49582005Sjake	.macro	tl0_align
49682906Sjake	wr	%g0, ASI_DMMU, %asi
49782906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
49882906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
49988644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
50088644Sjake	membar	#Sync
50182005Sjake	b	%xcc, tl0_sfsr_trap
50288644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
50382005Sjake	.align	32
50482005Sjake	.endm
50582005Sjake
50682005SjakeENTRY(tl0_sfsr_trap)
50788644Sjake	tl0_split
508103921Sjake	set	trap, %o2
50988644Sjake	mov	%g3, %o4
51088644Sjake	mov	%g4, %o5
511103897Sjake	ba	%xcc, tl0_utrap
51282906Sjake	 mov	%g2, %o0
51382005SjakeEND(tl0_sfsr_trap)
51482005Sjake
51582906Sjake	.macro	tl0_intr level, mask
51688644Sjake	tl0_split
51791246Sjake	set	\mask, %o1
51884186Sjake	b	%xcc, tl0_intr
51991246Sjake	 mov	\level, %o0
52081380Sjake	.align	32
52181380Sjake	.endm
52281380Sjake
52381380Sjake#define	INTR(level, traplvl)						\
52482906Sjake	tl ## traplvl ## _intr	level, 1 << level
52581380Sjake
52681380Sjake#define	TICK(traplvl) \
52782906Sjake	tl ## traplvl ## _intr	PIL_TICK, 1
52881380Sjake
52981380Sjake#define	INTR_LEVEL(tl)							\
53081380Sjake	INTR(1, tl) ;							\
53181380Sjake	INTR(2, tl) ;							\
53281380Sjake	INTR(3, tl) ;							\
53381380Sjake	INTR(4, tl) ;							\
53481380Sjake	INTR(5, tl) ;							\
53581380Sjake	INTR(6, tl) ;							\
53681380Sjake	INTR(7, tl) ;							\
53781380Sjake	INTR(8, tl) ;							\
53881380Sjake	INTR(9, tl) ;							\
53981380Sjake	INTR(10, tl) ;							\
54081380Sjake	INTR(11, tl) ;							\
54181380Sjake	INTR(12, tl) ;							\
54281380Sjake	INTR(13, tl) ;							\
54381380Sjake	TICK(tl) ;							\
54481380Sjake	INTR(15, tl) ;
54581380Sjake
54680709Sjake	.macro	tl0_intr_level
54781380Sjake	INTR_LEVEL(0)
54880709Sjake	.endm
54980709Sjake
55097265Sjake	.macro	intr_vector
55197265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
55297265Sjake	andcc	%g1, IRSR_BUSY, %g0
553104075Sjake	bnz,a,pt %xcc, intr_vector
55497265Sjake	 nop
55597265Sjake	sir
55681380Sjake	.align	32
55780709Sjake	.endm
55880709Sjake
55996207Sjake	.macro	immu_miss_user
56081380Sjake	/*
561102040Sjake	 * Initialize the page size walker.
562102040Sjake	 */
563102040Sjake	mov	TS_MIN, %g2
564102040Sjake
565102040Sjake	/*
566102040Sjake	 * Loop over all supported page sizes.
567102040Sjake	 */
568102040Sjake
569102040Sjake	/*
570102040Sjake	 * Compute the page shift for the page size we are currently looking
571102040Sjake	 * for.
572102040Sjake	 */
573102040Sjake1:	add	%g2, %g2, %g3
574102040Sjake	add	%g3, %g2, %g3
575102040Sjake	add	%g3, PAGE_SHIFT, %g3
576102040Sjake
577102040Sjake	/*
57891224Sjake	 * Extract the virtual page number from the contents of the tag
57991224Sjake	 * access register.
58081380Sjake	 */
581102040Sjake	srlx	%g1, %g3, %g3
58281380Sjake
58381380Sjake	/*
58491224Sjake	 * Compute the tte bucket address.
58581380Sjake	 */
586102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
587102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
588102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
589102040Sjake	add	%g4, %g5, %g4
59081380Sjake
59181380Sjake	/*
592102040Sjake	 * Compute the tte tag target.
59381380Sjake	 */
594102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
595102040Sjake	or	%g3, %g2, %g3
59681380Sjake
59781380Sjake	/*
598102040Sjake	 * Loop over the ttes in this bucket
59981380Sjake	 */
60081380Sjake
60181380Sjake	/*
602102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
603102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
604102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
605102040Sjake	 * completes successfully.
60681380Sjake	 */
607102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
60881380Sjake
60981380Sjake	/*
610102040Sjake	 * Check that its valid and executable and that the tte tags match.
61181380Sjake	 */
612102040Sjake	brgez,pn %g7, 3f
613102040Sjake	 andcc	%g7, TD_EXEC, %g0
614102040Sjake	bz,pn	%xcc, 3f
615102040Sjake	 cmp	%g3, %g6
616102040Sjake	bne,pn	%xcc, 3f
61788644Sjake	 EMPTY
61881380Sjake
61981380Sjake	/*
62081380Sjake	 * We matched a tte, load the tlb.
62181380Sjake	 */
62281380Sjake
62381380Sjake	/*
62481380Sjake	 * Set the reference bit, if it's currently clear.
62581380Sjake	 */
626102040Sjake	 andcc	%g7, TD_REF, %g0
62782906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
62881380Sjake	 nop
62981380Sjake
63081380Sjake	/*
63191224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
63281380Sjake	 */
633102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
634102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
63581380Sjake	retry
63681380Sjake
63781380Sjake	/*
638102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
639102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
64081380Sjake	 */
641102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
642102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
643102040Sjake	bnz,pt	%xcc, 2b
644102040Sjake	 EMPTY
64591224Sjake
64691224Sjake	/*
647102040Sjake	 * See if we just checked the largest page size, and advance to the
648102040Sjake	 * next one if not.
64991224Sjake	 */
650102040Sjake	 cmp	%g2, TS_MAX
651102040Sjake	bne,pt	%xcc, 1b
652102040Sjake	 add	%g2, 1, %g2
653102040Sjake	.endm
65491224Sjake
655102040Sjake	.macro	tl0_immu_miss
65696207Sjake	/*
65796207Sjake	 * Load the virtual page number and context from the tag access
65896207Sjake	 * register.  We ignore the context.
65996207Sjake	 */
66096207Sjake	wr	%g0, ASI_IMMU, %asi
661102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
66296207Sjake
663102040Sjake	/*
664102040Sjake	 * Try a fast inline lookup of the user tsb.
665102040Sjake	 */
66696207Sjake	immu_miss_user
66796207Sjake
668102040Sjake	/*
669102040Sjake	 * Not in user tsb, call c code.
670102040Sjake	 */
671102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
67281380Sjake	.align	128
67380709Sjake	.endm
67480709Sjake
67582906SjakeENTRY(tl0_immu_miss_set_ref)
67681380Sjake	/*
67781380Sjake	 * Set the reference bit.
67881380Sjake	 */
679102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
68081380Sjake
68181380Sjake	/*
682102040Sjake	 * May have become invalid during casxa, in which case start over.
68381380Sjake	 */
684102040Sjake	brgez,pn %g2, 1f
685102040Sjake	 nop
68681380Sjake
68781380Sjake	/*
68891224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
68981380Sjake	 */
690102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
691102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
69291224Sjake1:	retry
69382906SjakeEND(tl0_immu_miss_set_ref)
69481380Sjake
69582906SjakeENTRY(tl0_immu_miss_trap)
69681380Sjake	/*
69796207Sjake	 * Put back the contents of the tag access register, in case we
69896207Sjake	 * faulted.
69996207Sjake	 */
700102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
70196207Sjake	membar	#Sync
70296207Sjake
70396207Sjake	/*
70482906Sjake	 * Switch to alternate globals.
70582906Sjake	 */
70682906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
70782906Sjake
70882906Sjake	/*
70991224Sjake	 * Reload the tag access register.
71081380Sjake	 */
71191224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
71281380Sjake
71381380Sjake	/*
71491224Sjake	 * Save the tag access register, and call common trap code.
71581380Sjake	 */
71688644Sjake	tl0_split
717103921Sjake	set	trap, %o2
71891224Sjake	mov	%g2, %o3
71982906Sjake	b	%xcc, tl0_trap
72088644Sjake	 mov	T_INSTRUCTION_MISS, %o0
72182906SjakeEND(tl0_immu_miss_trap)
72281380Sjake
72381180Sjake	.macro	dmmu_miss_user
72481180Sjake	/*
725102040Sjake	 * Initialize the page size walker.
726102040Sjake	 */
727102040Sjake	mov	TS_MIN, %g2
728102040Sjake
729102040Sjake	/*
730102040Sjake	 * Loop over all supported page sizes.
731102040Sjake	 */
732102040Sjake
733102040Sjake	/*
734102040Sjake	 * Compute the page shift for the page size we are currently looking
735102040Sjake	 * for.
736102040Sjake	 */
737102040Sjake1:	add	%g2, %g2, %g3
738102040Sjake	add	%g3, %g2, %g3
739102040Sjake	add	%g3, PAGE_SHIFT, %g3
740102040Sjake
741102040Sjake	/*
74291224Sjake	 * Extract the virtual page number from the contents of the tag
74391224Sjake	 * access register.
74491224Sjake	 */
745102040Sjake	srlx	%g1, %g3, %g3
74691224Sjake
74791224Sjake	/*
74888644Sjake	 * Compute the tte bucket address.
74981180Sjake	 */
750102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
751102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
752102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
753102040Sjake	add	%g4, %g5, %g4
75481180Sjake
75581180Sjake	/*
756102040Sjake	 * Compute the tte tag target.
75781180Sjake	 */
758102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
759102040Sjake	or	%g3, %g2, %g3
76081180Sjake
76181180Sjake	/*
762102040Sjake	 * Loop over the ttes in this bucket
76381180Sjake	 */
76481180Sjake
76581180Sjake	/*
766102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
767102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
768102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
769102040Sjake	 * completes successfully.
77081180Sjake	 */
771102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
77281180Sjake
77381180Sjake	/*
77491224Sjake	 * Check that its valid and that the virtual page numbers match.
77581180Sjake	 */
776102040Sjake	brgez,pn %g7, 3f
777102040Sjake	 cmp	%g3, %g6
778102040Sjake	bne,pn	%xcc, 3f
77988644Sjake	 EMPTY
78081180Sjake
78181180Sjake	/*
78281180Sjake	 * We matched a tte, load the tlb.
78381180Sjake	 */
78481180Sjake
78581180Sjake	/*
78681180Sjake	 * Set the reference bit, if it's currently clear.
78781180Sjake	 */
788102040Sjake	 andcc	%g7, TD_REF, %g0
78981180Sjake	bz,a,pn	%xcc, dmmu_miss_user_set_ref
79081180Sjake	 nop
79181180Sjake
79281180Sjake	/*
79391224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
79481180Sjake	 */
795102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
796102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
79781180Sjake	retry
79881180Sjake
79981180Sjake	/*
800102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
801102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
80281180Sjake	 */
803102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
804102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
805102040Sjake	bnz,pt	%xcc, 2b
806102040Sjake	 EMPTY
807102040Sjake
808102040Sjake	/*
809102040Sjake	 * See if we just checked the largest page size, and advance to the
810102040Sjake	 * next one if not.
811102040Sjake	 */
812102040Sjake	 cmp	%g2, TS_MAX
813102040Sjake	bne,pt	%xcc, 1b
814102040Sjake	 add	%g2, 1, %g2
81581180Sjake	.endm
81681180Sjake
81781180SjakeENTRY(dmmu_miss_user_set_ref)
81881180Sjake	/*
81981180Sjake	 * Set the reference bit.
82081180Sjake	 */
821102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
82281180Sjake
82381180Sjake	/*
824102040Sjake	 * May have become invalid during casxa, in which case start over.
82581180Sjake	 */
826102040Sjake	brgez,pn %g2, 1f
827102040Sjake	 nop
82881180Sjake
82981180Sjake	/*
83091224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
83181180Sjake	 */
832102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
833102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
83491224Sjake1:	retry
83581180SjakeEND(dmmu_miss_user_set_ref)
83681180Sjake
83780709Sjake	.macro	tl0_dmmu_miss
83881180Sjake	/*
83996207Sjake	 * Load the virtual page number and context from the tag access
84096207Sjake	 * register.  We ignore the context.
84196207Sjake	 */
84296207Sjake	wr	%g0, ASI_DMMU, %asi
843102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
84496207Sjake
84596207Sjake	/*
84681180Sjake	 * Try a fast inline lookup of the primary tsb.
84781180Sjake	 */
84881180Sjake	dmmu_miss_user
84981180Sjake
85081180Sjake	/*
851102040Sjake	 * Not in user tsb, call c code.
85281180Sjake	 */
853102040Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
85481180Sjake	.align	128
85580709Sjake	.endm
85680709Sjake
85781180SjakeENTRY(tl0_dmmu_miss_trap)
85882005Sjake	/*
85996207Sjake	 * Put back the contents of the tag access register, in case we
86096207Sjake	 * faulted.
86196207Sjake	 */
862102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
86396207Sjake	membar	#Sync
86496207Sjake
86596207Sjake	/*
86682906Sjake	 * Switch to alternate globals.
86782005Sjake	 */
86882906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
86982005Sjake
87082005Sjake	/*
87191224Sjake	 * Reload the tag access register.
87282005Sjake	 */
87391224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
87481180Sjake
87581180Sjake	/*
87691224Sjake	 * Save the tag access register and call common trap code.
87781180Sjake	 */
87888644Sjake	tl0_split
879103921Sjake	set	trap, %o2
88091224Sjake	mov	%g2, %o3
88182906Sjake	b	%xcc, tl0_trap
88288644Sjake	 mov	T_DATA_MISS, %o0
88382906SjakeEND(tl0_dmmu_miss_trap)
88481180Sjake
88588644Sjake	.macro	dmmu_prot_user
88688644Sjake	/*
887102040Sjake	 * Initialize the page size walker.
888102040Sjake	 */
889102040Sjake	mov	TS_MIN, %g2
890102040Sjake
891102040Sjake	/*
892102040Sjake	 * Loop over all supported page sizes.
893102040Sjake	 */
894102040Sjake
895102040Sjake	/*
896102040Sjake	 * Compute the page shift for the page size we are currently looking
897102040Sjake	 * for.
898102040Sjake	 */
899102040Sjake1:	add	%g2, %g2, %g3
900102040Sjake	add	%g3, %g2, %g3
901102040Sjake	add	%g3, PAGE_SHIFT, %g3
902102040Sjake
903102040Sjake	/*
90491224Sjake	 * Extract the virtual page number from the contents of the tag
90591224Sjake	 * access register.
90691224Sjake	 */
907102040Sjake	srlx	%g1, %g3, %g3
90891224Sjake
90991224Sjake	/*
91088644Sjake	 * Compute the tte bucket address.
91188644Sjake	 */
912102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
913102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
914102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
915102040Sjake	add	%g4, %g5, %g4
91688644Sjake
91788644Sjake	/*
918102040Sjake	 * Compute the tte tag target.
91988644Sjake	 */
920102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
921102040Sjake	or	%g3, %g2, %g3
92288644Sjake
92388644Sjake	/*
924102040Sjake	 * Loop over the ttes in this bucket
92588644Sjake	 */
92688644Sjake
92788644Sjake	/*
928102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
929102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
930102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
931102040Sjake	 * completes successfully.
93288644Sjake	 */
933102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
93488644Sjake
93588644Sjake	/*
93691224Sjake	 * Check that its valid and writable and that the virtual page
93791224Sjake	 * numbers match.
93888644Sjake	 */
939102040Sjake	brgez,pn %g7, 4f
940102040Sjake	 andcc	%g7, TD_SW, %g0
941102040Sjake	bz,pn	%xcc, 4f
942102040Sjake	 cmp	%g3, %g6
943102040Sjake	bne,pn	%xcc, 4f
94488644Sjake	 nop
94588644Sjake
94691224Sjake	/*
94791224Sjake	 * Set the hardware write bit.
94891224Sjake	 */
949102040Sjake	TTE_SET_W(%g4, %g2, %g3)
95088644Sjake
95188644Sjake	/*
952102040Sjake	 * Delete the old TLB entry and clear the sfsr.
95388644Sjake	 */
954102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
955102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
956102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
957102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
958102040Sjake	membar	#Sync
95988644Sjake
96081180Sjake	/*
961102040Sjake	 * May have become invalid during casxa, in which case start over.
96288644Sjake	 */
963102040Sjake	brgez,pn %g2, 3f
964102040Sjake	 or	%g2, TD_W, %g2
96588644Sjake
96688644Sjake	/*
967102040Sjake	 * Load the tte data into the tlb and retry the instruction.
96896207Sjake	 */
969102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
970102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
971102040Sjake3:	retry
97296207Sjake
97396207Sjake	/*
974102040Sjake	 * Check the low bits to see if we've finished the bucket.
97588644Sjake	 */
976102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
977102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
978102040Sjake	bnz,pt	%xcc, 2b
979102040Sjake	 EMPTY
98088644Sjake
98188644Sjake	/*
982102040Sjake	 * See if we just checked the largest page size, and advance to the
983102040Sjake	 * next one if not.
98488644Sjake	 */
985102040Sjake	 cmp	%g2, TS_MAX
986102040Sjake	bne,pt	%xcc, 1b
987102040Sjake	 add	%g2, 1, %g2
988102040Sjake	.endm
989102040Sjake
990102040Sjake	.macro	tl0_dmmu_prot
991102040Sjake	ba,a	%xcc, tl0_dmmu_prot_1
99288644Sjake	 nop
99388644Sjake	.align	128
99488644Sjake	.endm
99588644Sjake
996102040SjakeENTRY(tl0_dmmu_prot_1)
99788644Sjake	/*
998102040Sjake	 * Load the virtual page number and context from the tag access
999102040Sjake	 * register.  We ignore the context.
100088644Sjake	 */
1001102040Sjake	wr	%g0, ASI_DMMU, %asi
1002102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
100388644Sjake
100488644Sjake	/*
1005102040Sjake	 * Try a fast inline lookup of the tsb.
100688644Sjake	 */
1007102040Sjake	dmmu_prot_user
100888644Sjake
100988644Sjake	/*
1010102040Sjake	 * Not in user tsb, call c code.
101191224Sjake	 */
1012102040Sjake	b,a	%xcc, tl0_dmmu_prot_trap
1013102040Sjake	 nop
1014102040SjakeEND(tl0_dmmu_prot_1)
101591224Sjake
101688644SjakeENTRY(tl0_dmmu_prot_trap)
101788644Sjake	/*
101896207Sjake	 * Put back the contents of the tag access register, in case we
101996207Sjake	 * faulted.
102096207Sjake	 */
1021102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
102296207Sjake	membar	#Sync
102396207Sjake
102496207Sjake	/*
102582906Sjake	 * Switch to alternate globals.
102681180Sjake	 */
102782906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
102881180Sjake
102981180Sjake	/*
103082005Sjake	 * Load the tar, sfar and sfsr.
103182005Sjake	 */
103288644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
103388644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
103488644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
103585243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
103682005Sjake	membar	#Sync
103782005Sjake
103882005Sjake	/*
103991224Sjake	 * Save the mmu registers and call common trap code.
104082005Sjake	 */
104188644Sjake	tl0_split
1042103921Sjake	set	trap, %o2
104388644Sjake	mov	%g2, %o3
104488644Sjake	mov	%g3, %o4
104588644Sjake	mov	%g4, %o5
1046103897Sjake	ba	%xcc, tl0_utrap
104788644Sjake	 mov	T_DATA_PROTECTION, %o0
104888644SjakeEND(tl0_dmmu_prot_trap)
104981180Sjake
105080709Sjake	.macro	tl0_spill_0_n
105191246Sjake	wr	%g0, ASI_AIUP, %asi
105291246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
105380709Sjake	saved
105480709Sjake	retry
105582906Sjake	.align	32
105682906Sjake	RSF_TRAP(T_SPILL)
105782906Sjake	RSF_TRAP(T_SPILL)
105880709Sjake	.endm
105980709Sjake
106082906Sjake	.macro	tl0_spill_1_n
106191246Sjake	wr	%g0, ASI_AIUP, %asi
106282906Sjake	SPILL(stwa, %sp, 4, %asi)
106382906Sjake	saved
106482906Sjake	retry
106582906Sjake	.align	32
106682906Sjake	RSF_TRAP(T_SPILL)
106782906Sjake	RSF_TRAP(T_SPILL)
106882906Sjake	.endm
106982005Sjake
107091246Sjake	.macro	tl0_fill_0_n
107182906Sjake	wr	%g0, ASI_AIUP, %asi
107291246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
107382906Sjake	restored
107482906Sjake	retry
107582906Sjake	.align	32
107682906Sjake	RSF_TRAP(T_FILL)
107782906Sjake	RSF_TRAP(T_FILL)
107880709Sjake	.endm
107980709Sjake
108082906Sjake	.macro	tl0_fill_1_n
108191246Sjake	wr	%g0, ASI_AIUP, %asi
108282906Sjake	FILL(lduwa, %sp, 4, %asi)
108382906Sjake	restored
108482906Sjake	retry
108582906Sjake	.align	32
108682906Sjake	RSF_TRAP(T_FILL)
108782906Sjake	RSF_TRAP(T_FILL)
108882906Sjake	.endm
108982906Sjake
109082906SjakeENTRY(tl0_sftrap)
109182906Sjake	rdpr	%tstate, %g1
109282906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
109382906Sjake	wrpr	%g1, 0, %cwp
109488644Sjake	tl0_split
1095103921Sjake	set	trap, %o2
109682906Sjake	b	%xcc, tl0_trap
109782906Sjake	 mov	%g2, %o0
109882906SjakeEND(tl0_sftrap)
109982906Sjake
110082906Sjake	.macro	tl0_spill_bad	count
110182906Sjake	.rept	\count
110288644Sjake	sir
110388644Sjake	.align	128
110482906Sjake	.endr
110582906Sjake	.endm
110682906Sjake
110780709Sjake	.macro	tl0_fill_bad	count
110880709Sjake	.rept	\count
110988644Sjake	sir
111088644Sjake	.align	128
111180709Sjake	.endr
111280709Sjake	.endm
111380709Sjake
111484186Sjake	.macro	tl0_syscall
111588644Sjake	tl0_split
1116103921Sjake	set	syscall, %o2
1117103921Sjake	ba	%xcc, tl0_trap
111884186Sjake	 mov	T_SYSCALL, %o0
111988784Sjake	.align	32
112084186Sjake	.endm
112184186Sjake
112291246Sjake	.macro	tl1_split
112391246Sjake	rdpr	%wstate, %g1
112491246Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
1125103916Sjake	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
112682906Sjake	.endm
112782906Sjake
112882906Sjake	.macro	tl1_setup	type
112991246Sjake	tl1_split
1130103921Sjake	clr	%o1
1131103921Sjake	set	trap, %o2
113280709Sjake	b	%xcc, tl1_trap
113388644Sjake	 mov	\type | T_KERNEL, %o0
113482906Sjake	.endm
113582906Sjake
113682906Sjake	.macro	tl1_gen		type
113782906Sjake	tl1_setup \type
113880709Sjake	.align	32
113980709Sjake	.endm
114080709Sjake
114180709Sjake	.macro	tl1_reserved	count
114280709Sjake	.rept	\count
114380709Sjake	tl1_gen	T_RESERVED
114480709Sjake	.endr
114580709Sjake	.endm
114680709Sjake
114780709Sjake	.macro	tl1_insn_excptn
1148101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
114988644Sjake	wr	%g0, ASI_IMMU, %asi
115088644Sjake	rdpr	%tpc, %g3
115188644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
115288644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
115388644Sjake	membar	#Sync
115488644Sjake	b	%xcc, tl1_insn_exceptn_trap
115588644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
115680709Sjake	.align	32
115780709Sjake	.endm
115880709Sjake
115988644SjakeENTRY(tl1_insn_exceptn_trap)
116091246Sjake	tl1_split
1161103921Sjake	clr	%o1
1162103921Sjake	set	trap, %o2
116388644Sjake	mov	%g3, %o4
116488644Sjake	mov	%g4, %o5
116588644Sjake	b	%xcc, tl1_trap
116688644Sjake	 mov	%g2, %o0
116788644SjakeEND(tl1_insn_exceptn_trap)
116888644Sjake
116982005Sjake	.macro	tl1_data_excptn
1170101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
117188644Sjake	b,a	%xcc, tl1_data_excptn_trap
117282906Sjake	 nop
117382005Sjake	.align	32
117482005Sjake	.endm
117582005Sjake
117688644SjakeENTRY(tl1_data_excptn_trap)
117788644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
117882906Sjake	b	%xcc, tl1_sfsr_trap
117988644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
118088644SjakeEND(tl1_data_excptn_trap)
118182906Sjake
118280709Sjake	.macro	tl1_align
118388644Sjake	b,a	%xcc, tl1_align_trap
118488644Sjake	 nop
118580709Sjake	.align	32
118680709Sjake	.endm
118780709Sjake
118882906SjakeENTRY(tl1_align_trap)
118988644Sjake	RESUME_SPILLFILL_ALIGN
119082906Sjake	b	%xcc, tl1_sfsr_trap
119188644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
119288644SjakeEND(tl1_data_excptn_trap)
119382906Sjake
119480709SjakeENTRY(tl1_sfsr_trap)
119588644Sjake	wr	%g0, ASI_DMMU, %asi
119688644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
119788644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
119880709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
119980709Sjake	membar	#Sync
120082005Sjake
120191246Sjake	tl1_split
1202103921Sjake	clr	%o1
1203103921Sjake	set	trap, %o2
120488644Sjake	mov	%g3, %o4
120588644Sjake	mov	%g4, %o5
120680709Sjake	b	%xcc, tl1_trap
120788644Sjake	 mov	%g2, %o0
120888644SjakeEND(tl1_sfsr_trap)
120980709Sjake
121084186Sjake	.macro	tl1_intr level, mask
121191246Sjake	tl1_split
121291246Sjake	set	\mask, %o1
121384186Sjake	b	%xcc, tl1_intr
121491246Sjake	 mov	\level, %o0
121581380Sjake	.align	32
121681380Sjake	.endm
121781380Sjake
121880709Sjake	.macro	tl1_intr_level
121981380Sjake	INTR_LEVEL(1)
122080709Sjake	.endm
122180709Sjake
122280709Sjake	.macro	tl1_immu_miss
122391224Sjake	/*
122491224Sjake	 * Load the context and the virtual page number from the tag access
122591224Sjake	 * register.  We ignore the context.
122691224Sjake	 */
122791224Sjake	wr	%g0, ASI_IMMU, %asi
1228102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
122985585Sjake
123091224Sjake	/*
1231102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1232102040Sjake	 * tsb are patched at startup.
123391224Sjake	 */
1234102040Sjake	.globl	tl1_immu_miss_patch_1
1235102040Sjaketl1_immu_miss_patch_1:
1236102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1237102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1238102040Sjake	sethi	%hi(TSB_KERNEL), %g7
123985585Sjake
1240102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1241102040Sjake	and	%g5, %g6, %g6
1242102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1243102040Sjake	add	%g6, %g7, %g6
124485585Sjake
124585585Sjake	/*
124691224Sjake	 * Load the tte.
124791224Sjake	 */
1248102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
124991224Sjake
125091224Sjake	/*
125191224Sjake	 * Check that its valid and executable and that the virtual page
125291224Sjake	 * numbers match.
125391224Sjake	 */
1254102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1255102040Sjake	 andcc	%g7, TD_EXEC, %g0
125691224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1257102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1258102040Sjake	cmp	%g5, %g6
125991224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
126085585Sjake	 EMPTY
126185585Sjake
126285585Sjake	/*
126391224Sjake	 * Set the reference bit if its currently clear.
126485585Sjake	 */
1265102040Sjake	 andcc	%g7, TD_REF, %g0
1266102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
126791224Sjake	 nop
126885585Sjake
126991224Sjake	/*
1270102040Sjake	 * Load the tte data into the TLB and retry the instruction.
127191224Sjake	 */
1272102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1273102040Sjake	retry
1274102040Sjake	.align	128
1275102040Sjake	.endm
127688644Sjake
1277102040SjakeENTRY(tl1_immu_miss_set_ref)
127885585Sjake	/*
1279102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1280102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1281102040Sjake	 */
1282102040Sjake	.globl	tl1_immu_miss_patch_2
1283102040Sjaketl1_immu_miss_patch_2:
1284102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1285102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1286102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1287102040Sjake
1288102040Sjake	and	%g5, %g6, %g5
1289102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1290102040Sjake	add	%g5, %g7, %g5
1291102040Sjake
1292102040Sjake	/*
1293102040Sjake	 * Set the reference bit.
1294102040Sjake	 */
1295102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1296102040Sjake
1297102040Sjake	/*
1298102040Sjake	 * May have become invalid during casxa, in which case start over.
1299102040Sjake	 */
1300102040Sjake	brgez,pn %g6, 1f
1301102040Sjake	 nop
1302102040Sjake
1303102040Sjake	/*
130485585Sjake	 * Load the tte data into the TLB and retry the instruction.
130585585Sjake	 */
1306102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1307102040Sjake1:	retry
1308102040SjakeEND(tl1_immu_miss_set_ref)
130985585Sjake
131091224SjakeENTRY(tl1_immu_miss_trap)
131185585Sjake	/*
131285585Sjake	 * Switch to alternate globals.
131385585Sjake	 */
131491224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
131585585Sjake
131691224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
131785585Sjake
131891246Sjake	tl1_split
1319103921Sjake	clr	%o1
1320103921Sjake	set	trap, %o2
132191224Sjake	mov	%g2, %o3
132280709Sjake	b	%xcc, tl1_trap
132388644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
132491224SjakeEND(tl1_immu_miss_trap)
132591224Sjake
132691224Sjake	.macro	tl1_dmmu_miss
132791224Sjake	/*
132891224Sjake	 * Load the context and the virtual page number from the tag access
132991224Sjake	 * register.
133091224Sjake	 */
133191224Sjake	wr	%g0, ASI_DMMU, %asi
1332102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
133380709Sjake
133491224Sjake	/*
133591224Sjake	 * Extract the context from the contents of the tag access register.
1336100771Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1337108195Sjake	 * faulting address is passed in %g1.
133891224Sjake	 */
1339102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1340102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1341102040Sjake	 mov	%g5, %g1
134280709Sjake
134391224Sjake	/*
1344100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1345100771Sjake	 * the high bit set so they are negative.
1346100771Sjake	 */
1347102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1348100771Sjake	 EMPTY
1349100771Sjake
1350100771Sjake	/*
1351102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1352102040Sjake	 * tsb are patched at startup.
135391224Sjake	 */
1354102040Sjake	.globl	tl1_dmmu_miss_patch_1
1355102040Sjaketl1_dmmu_miss_patch_1:
1356102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1357102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1358102040Sjake	sethi	%hi(TSB_KERNEL), %g7
135984186Sjake
1360102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1361102040Sjake	and	%g5, %g6, %g6
1362102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1363102040Sjake	add	%g6, %g7, %g6
136491224Sjake
136591224Sjake	/*
136691224Sjake	 * Load the tte.
136791224Sjake	 */
1368102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
136991224Sjake
137091224Sjake	/*
137191224Sjake	 * Check that its valid and that the virtual page numbers match.
137291224Sjake	 */
1373102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1374102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1375102040Sjake	cmp	%g5, %g6
137691224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
137780709Sjake	 EMPTY
137880709Sjake
137980709Sjake	/*
138091224Sjake	 * Set the reference bit if its currently clear.
138180709Sjake	 */
1382102040Sjake	 andcc	%g7, TD_REF, %g0
1383102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
138491224Sjake	 nop
138580709Sjake
138691224Sjake	/*
1387102040Sjake	 * Load the tte data into the TLB and retry the instruction.
138891224Sjake	 */
1389102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1390102040Sjake	retry
1391102040Sjake	.align	128
1392102040Sjake	.endm
139388644Sjake
1394102040SjakeENTRY(tl1_dmmu_miss_set_ref)
139580709Sjake	/*
1396102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1397102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1398102040Sjake	 */
1399102040Sjake	.globl	tl1_dmmu_miss_patch_2
1400102040Sjaketl1_dmmu_miss_patch_2:
1401102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1402102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1403102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1404102040Sjake
1405102040Sjake	and	%g5, %g6, %g5
1406102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1407102040Sjake	add	%g5, %g7, %g5
1408102040Sjake
1409102040Sjake	/*
1410102040Sjake	 * Set the reference bit.
1411102040Sjake	 */
1412102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1413102040Sjake
1414102040Sjake	/*
1415102040Sjake	 * May have become invalid during casxa, in which case start over.
1416102040Sjake	 */
1417102040Sjake	brgez,pn %g6, 1f
1418102040Sjake	 nop
1419102040Sjake
1420102040Sjake	/*
142182906Sjake	 * Load the tte data into the TLB and retry the instruction.
142280709Sjake	 */
1423102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1424102040Sjake1:	retry
1425102040SjakeEND(tl1_dmmu_miss_set_ref)
142680709Sjake
142791224SjakeENTRY(tl1_dmmu_miss_trap)
142880709Sjake	/*
142982906Sjake	 * Switch to alternate globals.
143080709Sjake	 */
143191224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
143280709Sjake
1433108195Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1434108195Sjake
143588781Sjake	KSTACK_CHECK
143688781Sjake
143791246Sjake	tl1_split
1438103921Sjake	clr	%o1
1439103921Sjake	set	trap, %o2
144091224Sjake	mov	%g2, %o3
144182906Sjake	b	%xcc, tl1_trap
144288644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
144388781SjakeEND(tl1_dmmu_miss_trap)
144480709Sjake
1445100771SjakeENTRY(tl1_dmmu_miss_direct)
1446100771Sjake	/*
1447100771Sjake	 * Check the cache bits in the virtual address to see if this mapping
1448100771Sjake	 * is virtually cacheable.  We set this up so that the masks fit in
1449100771Sjake	 * immediates...  Note that the arithmetic shift sign extends, keeping
1450100771Sjake	 * all the top bits set.
1451100771Sjake	 */
1452102040Sjake	srax	%g5, TLB_DIRECT_SHIFT, %g5
1453102040Sjake	andcc	%g5, TLB_DIRECT_UNCACHEABLE, %g0
1454102040Sjake	mov	TD_CP | TD_CV | TD_W, %g6
1455102040Sjake	movnz	%xcc, TD_CP | TD_W, %g6
1456102040Sjake	or	%g5, %g6, %g5
1457100771Sjake
1458100771Sjake	/*
1459100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1460100771Sjake	 * address, and or in the tte bits.  The high bit is left set in the
1461100771Sjake	 * physical address, which corresponds to the tte valid bit, so that
1462100771Sjake	 * we don't have to include it in the tte bits.  We ignore the cache
1463100771Sjake	 * bits, since they get shifted into the soft tte bits anyway.
1464100771Sjake	 */
1465102040Sjake	setx	TLB_DIRECT_MASK & ~TD_V, %g7, %g6
1466102040Sjake	andn	%g5, %g6, %g5
1467100771Sjake
1468100771Sjake	/*
1469100771Sjake	 * Load the tte data into the TLB and retry the instruction.
1470100771Sjake	 */
1471102040Sjake	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1472100771Sjake	retry
1473100771SjakeEND(tl1_dmmu_miss_direct)
1474100771Sjake
147581180SjakeENTRY(tl1_dmmu_miss_user)
147681180Sjake	/*
147788644Sjake	 * Try a fast inline lookup of the user tsb.
147881180Sjake	 */
147981180Sjake	dmmu_miss_user
148081180Sjake
148181180Sjake	/*
148296207Sjake	 * Put back the contents of the tag access register, in case we
148396207Sjake	 * faulted.
148496207Sjake	 */
1485102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
148696207Sjake	membar	#Sync
148796207Sjake
148896207Sjake	/*
148982906Sjake	 * Switch to alternate globals.
149081180Sjake	 */
149182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
149281180Sjake
149391224Sjake	/*
149491224Sjake	 * Handle faults during window spill/fill.
149591224Sjake	 */
149688644Sjake	RESUME_SPILLFILL_MMU
149788644Sjake
149891224Sjake	/*
149991224Sjake	 * Reload the tag access register.
150091224Sjake	 */
150191224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
150291224Sjake
150391246Sjake	tl1_split
1504103921Sjake	clr	%o1
1505103921Sjake	set	trap, %o2
150691224Sjake	mov	%g2, %o3
150782906Sjake	b	%xcc, tl1_trap
150888644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
150982906SjakeEND(tl1_dmmu_miss_user)
151081180Sjake
151182906Sjake	.macro	tl1_dmmu_prot
1512102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1513102040Sjake	 nop
1514102040Sjake	.align	128
1515102040Sjake	.endm
1516102040Sjake
1517102040SjakeENTRY(tl1_dmmu_prot_1)
151891224Sjake	/*
151991224Sjake	 * Load the context and the virtual page number from the tag access
152091224Sjake	 * register.
152191224Sjake	 */
152291224Sjake	wr	%g0, ASI_DMMU, %asi
1523102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
152488644Sjake
152591224Sjake	/*
152691224Sjake	 * Extract the context from the contents of the tag access register.
1527108195Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1528108195Sjake	 * faulting address is passed in %g1.
152991224Sjake	 */
1530102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1531102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1532102040Sjake	 mov	%g5, %g1
153388644Sjake
153491224Sjake	/*
1535102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1536102040Sjake	 * tsb are patched at startup.
153791224Sjake	 */
1538102040Sjake	.globl	tl1_dmmu_prot_patch_1
1539102040Sjaketl1_dmmu_prot_patch_1:
1540102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1541102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1542102040Sjake	sethi	%hi(TSB_KERNEL), %g7
154388644Sjake
1544102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1545102040Sjake	and	%g5, %g6, %g6
1546102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1547102040Sjake	add	%g6, %g7, %g6
154891224Sjake
154991224Sjake	/*
155091224Sjake	 * Load the tte.
155191224Sjake	 */
1552102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
155391224Sjake
155491224Sjake	/*
155591224Sjake	 * Check that its valid and writeable and that the virtual page
155691224Sjake	 * numbers match.
155791224Sjake	 */
1558102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1559102040Sjake	 andcc	%g7, TD_SW, %g0
156091224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1561102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1562102040Sjake	cmp	%g5, %g6
156391224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
156488644Sjake	 EMPTY
156588644Sjake
156688644Sjake	/*
156791224Sjake	 * Delete the old TLB entry and clear the sfsr.
156888644Sjake	 */
1569102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
157091224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
157191224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
157281180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
157391224Sjake	membar	#Sync
157481180Sjake
1575102040Sjake	/*
1576102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1577102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1578102040Sjake	 */
1579102040Sjake	.globl	tl1_dmmu_prot_patch_2
1580102040Sjaketl1_dmmu_prot_patch_2:
1581102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1582102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1583102040Sjake	sethi	%hi(TSB_KERNEL), %g7
158496207Sjake
1585102040Sjake	and	%g5, %g6, %g5
1586102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1587102040Sjake	add	%g5, %g7, %g5
1588102040Sjake
158981180Sjake	/*
159091224Sjake	 * Set the hardware write bit.
159191224Sjake	 */
1592102040Sjake	TTE_SET_W(%g5, %g6, %g7)
159391224Sjake
159491224Sjake	/*
1595102040Sjake	 * May have become invalid during casxa, in which case start over.
1596102040Sjake	 */
1597102040Sjake	brgez,pn %g6, 1f
1598102040Sjake	 or	%g6, TD_W, %g6
1599102040Sjake
1600102040Sjake	/*
160188644Sjake	 * Load the tte data into the TLB and retry the instruction.
160288644Sjake	 */
1603102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1604102040Sjake1:	retry
1605102040SjakeEND(tl1_dmmu_prot_1)
160688644Sjake
160788644SjakeENTRY(tl1_dmmu_prot_user)
160888644Sjake	/*
160988644Sjake	 * Try a fast inline lookup of the user tsb.
161088644Sjake	 */
161188644Sjake	dmmu_prot_user
161288644Sjake
161388644Sjake	/*
161496207Sjake	 * Put back the contents of the tag access register, in case we
161596207Sjake	 * faulted.
161696207Sjake	 */
1617102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
161896207Sjake	membar	#Sync
161996207Sjake
162096207Sjake	/*
162182906Sjake	 * Switch to alternate globals.
162281180Sjake	 */
162388644Sjake	wrpr	%g0, PSTATE_ALT, %pstate
162480709Sjake
162588644Sjake	/* Handle faults during window spill/fill. */
162688644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
162788644Sjake
162888644Sjake	b,a	%xcc, tl1_dmmu_prot_trap
162988644Sjake	 nop
163088644SjakeEND(tl1_dmmu_prot_user)
163188644Sjake
163288644SjakeENTRY(tl1_dmmu_prot_trap)
163381180Sjake	/*
163491224Sjake	 * Switch to alternate globals.
163591224Sjake	 */
163691224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
163791224Sjake
163891224Sjake	/*
163981180Sjake	 * Load the sfar, sfsr and tar.  Clear the sfsr.
164081180Sjake	 */
164188644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
164288644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
164388644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
164481180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
164581180Sjake	membar	#Sync
164681180Sjake
164791246Sjake	tl1_split
1648103921Sjake	clr	%o1
1649103921Sjake	set	trap, %o2
165088644Sjake	mov	%g2, %o3
165188644Sjake	mov	%g3, %o4
165288644Sjake	mov	%g4, %o5
165382906Sjake	b	%xcc, tl1_trap
165488644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
165588644SjakeEND(tl1_dmmu_prot_trap)
165681180Sjake
165780709Sjake	.macro	tl1_spill_0_n
165882906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
165980709Sjake	saved
166080709Sjake	retry
166182906Sjake	.align	32
166282906Sjake	RSF_FATAL(T_SPILL)
166382906Sjake	RSF_FATAL(T_SPILL)
166480709Sjake	.endm
166580709Sjake
166691246Sjake	.macro	tl1_spill_2_n
166791246Sjake	wr	%g0, ASI_AIUP, %asi
166891246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
166982906Sjake	saved
167082906Sjake	retry
167182906Sjake	.align	32
167282906Sjake	RSF_SPILL_TOPCB
167382906Sjake	RSF_SPILL_TOPCB
167481380Sjake	.endm
167581380Sjake
167691246Sjake	.macro	tl1_spill_3_n
167791246Sjake	wr	%g0, ASI_AIUP, %asi
167892200Sjake	SPILL(stwa, %sp, 4, %asi)
167982906Sjake	saved
168082906Sjake	retry
168182906Sjake	.align	32
168282906Sjake	RSF_SPILL_TOPCB
168382906Sjake	RSF_SPILL_TOPCB
168482906Sjake	.endm
168582906Sjake
168691246Sjake	.macro	tl1_spill_0_o
168782906Sjake	wr	%g0, ASI_AIUP, %asi
168882906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
168982906Sjake	saved
169082906Sjake	retry
169182906Sjake	.align	32
169282906Sjake	RSF_SPILL_TOPCB
169382906Sjake	RSF_SPILL_TOPCB
169482906Sjake	.endm
169582906Sjake
169682906Sjake	.macro	tl1_spill_1_o
169791246Sjake	wr	%g0, ASI_AIUP, %asi
169882906Sjake	SPILL(stwa, %sp, 4, %asi)
169982005Sjake	saved
170082005Sjake	retry
170182906Sjake	.align	32
170282906Sjake	RSF_SPILL_TOPCB
170382906Sjake	RSF_SPILL_TOPCB
170482906Sjake	.endm
170582005Sjake
170682906Sjake	.macro	tl1_spill_2_o
170782906Sjake	RSF_SPILL_TOPCB
170891246Sjake	.align	128
170980709Sjake	.endm
171080709Sjake
171180709Sjake	.macro	tl1_fill_0_n
171282906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
171380709Sjake	restored
171480709Sjake	retry
171582906Sjake	.align	32
171682906Sjake	RSF_FATAL(T_FILL)
171782906Sjake	RSF_FATAL(T_FILL)
171880709Sjake	.endm
171980709Sjake
172091246Sjake	.macro	tl1_fill_2_n
172182906Sjake	wr	%g0, ASI_AIUP, %asi
172282906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
172382906Sjake	restored
172482906Sjake	retry
172582906Sjake	.align 32
172682906Sjake	RSF_FILL_MAGIC
172791246Sjake	RSF_FILL_MAGIC
172882906Sjake	.endm
172982906Sjake
173091246Sjake	.macro	tl1_fill_3_n
173182906Sjake	wr	%g0, ASI_AIUP, %asi
173282906Sjake	FILL(lduwa, %sp, 4, %asi)
173382906Sjake	restored
173482906Sjake	retry
173582906Sjake	.align 32
173682906Sjake	RSF_FILL_MAGIC
173791246Sjake	RSF_FILL_MAGIC
173882906Sjake	.endm
173982906Sjake
174082005Sjake/*
174182906Sjake * This is used to spill windows that are still occupied with user
174282906Sjake * data on kernel entry to the pcb.
174382005Sjake */
174482906SjakeENTRY(tl1_spill_topcb)
174582906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
174682906Sjake
174782005Sjake	/* Free some globals for our use. */
174888644Sjake	dec	24, ASP_REG
174988644Sjake	stx	%g1, [ASP_REG + 0]
175088644Sjake	stx	%g2, [ASP_REG + 8]
175188644Sjake	stx	%g3, [ASP_REG + 16]
175282906Sjake
175388644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
175482906Sjake
175588644Sjake	sllx	%g1, PTR_SHIFT, %g2
175688644Sjake	add	%g2, PCB_REG, %g2
175788644Sjake	stx	%sp, [%g2 + PCB_RWSP]
175882906Sjake
175988644Sjake	sllx	%g1, RW_SHIFT, %g2
176088644Sjake	add	%g2, PCB_REG, %g2
176188644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
176282906Sjake
176388644Sjake	inc	%g1
176488644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
176582906Sjake
176685243Sjake#if KTR_COMPILE & KTR_TRAP
176788785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
176882906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
176982906Sjake	rdpr	%tpc, %g2
177082906Sjake	stx	%g2, [%g1 + KTR_PARM1]
177188785Sjake	rdpr	%tnpc, %g2
177288785Sjake	stx	%g2, [%g1 + KTR_PARM2]
177388785Sjake	stx	%sp, [%g1 + KTR_PARM3]
177488644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
177588785Sjake	stx	%g2, [%g1 + KTR_PARM4]
177682906Sjake9:
177782906Sjake#endif
177882906Sjake
177982906Sjake	saved
178082906Sjake
178188644Sjake	ldx	[ASP_REG + 16], %g3
178288644Sjake	ldx	[ASP_REG + 8], %g2
178388644Sjake	ldx	[ASP_REG + 0], %g1
178488644Sjake	inc	24, ASP_REG
178582005Sjake	retry
178682906SjakeEND(tl1_spill_topcb)
178782005Sjake
178882906Sjake	.macro	tl1_spill_bad	count
178982906Sjake	.rept	\count
179088644Sjake	sir
179188644Sjake	.align	128
179282906Sjake	.endr
179382906Sjake	.endm
179482906Sjake
179580709Sjake	.macro	tl1_fill_bad	count
179680709Sjake	.rept	\count
179788644Sjake	sir
179888644Sjake	.align	128
179980709Sjake	.endr
180080709Sjake	.endm
180180709Sjake
180280709Sjake	.macro	tl1_soft	count
180382906Sjake	.rept	\count
180482906Sjake	tl1_gen	T_SOFT | T_KERNEL
180582906Sjake	.endr
180680709Sjake	.endm
180780709Sjake
180880709Sjake	.sect	.trap
180980709Sjake	.align	0x8000
181080709Sjake	.globl	tl0_base
181180709Sjake
181280709Sjaketl0_base:
181388779Sjake	tl0_reserved	8				! 0x0-0x7
181480709Sjaketl0_insn_excptn:
181588779Sjake	tl0_insn_excptn					! 0x8
181688779Sjake	tl0_reserved	1				! 0x9
181780709Sjaketl0_insn_error:
181888779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
181988779Sjake	tl0_reserved	5				! 0xb-0xf
182080709Sjaketl0_insn_illegal:
182188779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
182280709Sjaketl0_priv_opcode:
182388779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
182488779Sjake	tl0_reserved	14				! 0x12-0x1f
182580709Sjaketl0_fp_disabled:
182688779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
182780709Sjaketl0_fp_ieee:
182888779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
182980709Sjaketl0_fp_other:
183088779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
183180709Sjaketl0_tag_ovflw:
183288779Sjake	tl0_gen		T_TAG_OFERFLOW			! 0x23
183380709Sjaketl0_clean_window:
183488779Sjake	clean_window					! 0x24
183580709Sjaketl0_divide:
183688779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
183788779Sjake	tl0_reserved	7				! 0x29-0x2f
183880709Sjaketl0_data_excptn:
183988779Sjake	tl0_data_excptn					! 0x30
184088779Sjake	tl0_reserved	1				! 0x31
184180709Sjaketl0_data_error:
184288779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
184388779Sjake	tl0_reserved	1				! 0x33
184480709Sjaketl0_align:
184588779Sjake	tl0_align					! 0x34
184680709Sjaketl0_align_lddf:
184788779Sjake	tl0_gen		T_RESERVED			! 0x35
184880709Sjaketl0_align_stdf:
184988779Sjake	tl0_gen		T_RESERVED			! 0x36
185080709Sjaketl0_priv_action:
185188779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
185288779Sjake	tl0_reserved	9				! 0x38-0x40
185380709Sjaketl0_intr_level:
185488779Sjake	tl0_intr_level					! 0x41-0x4f
185588779Sjake	tl0_reserved	16				! 0x50-0x5f
185680709Sjaketl0_intr_vector:
185797265Sjake	intr_vector					! 0x60
185880709Sjaketl0_watch_phys:
185988779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
186080709Sjaketl0_watch_virt:
186188779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
186280709Sjaketl0_ecc:
186388779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
186480709Sjaketl0_immu_miss:
186588779Sjake	tl0_immu_miss					! 0x64
186680709Sjaketl0_dmmu_miss:
186788779Sjake	tl0_dmmu_miss					! 0x68
186880709Sjaketl0_dmmu_prot:
186988779Sjake	tl0_dmmu_prot					! 0x6c
187088779Sjake	tl0_reserved	16				! 0x70-0x7f
187180709Sjaketl0_spill_0_n:
187288779Sjake	tl0_spill_0_n					! 0x80
187382906Sjaketl0_spill_1_n:
187488779Sjake	tl0_spill_1_n					! 0x84
187591246Sjake	tl0_spill_bad	14				! 0x88-0xbf
187680709Sjaketl0_fill_0_n:
187788779Sjake	tl0_fill_0_n					! 0xc0
187882906Sjaketl0_fill_1_n:
187988779Sjake	tl0_fill_1_n					! 0xc4
188091246Sjake	tl0_fill_bad	14				! 0xc8-0xff
188188644Sjaketl0_soft:
1882106050Sjake	tl0_gen		T_SYSCALL			! 0x100
188388779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
188488779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
188588779Sjake	tl0_reserved	1				! 0x103
188688779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
188788779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
188888779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
188988779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
1890106050Sjake	tl0_gen		T_SYSCALL			! 0x108
1891106050Sjake#ifdef COMPAT_FREEBSD4
189288779Sjake	tl0_syscall					! 0x109
1893106050Sjake#else
1894106050Sjake	tl0_gen		T_SYSCALL			! 0x109
1895106050Sjake#endif
189688779Sjake	tl0_fp_restore					! 0x10a
189788779Sjake	tl0_reserved	5				! 0x10b-0x10f
189888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
189988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
190088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
190188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
190288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
190388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
190488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
190588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
190688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
190788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
190888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
190988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
191088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
191188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
191288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
191388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
1914106050Sjake	tl0_reserved	32				! 0x120-0x13f
1915106050Sjake	tl0_gen		T_SYSCALL			! 0x140
1916106050Sjake	tl0_syscall					! 0x141
1917106050Sjake	tl0_gen		T_SYSCALL			! 0x142
1918106050Sjake	tl0_gen		T_SYSCALL			! 0x143
1919106050Sjake	tl0_reserved	188				! 0x144-0x1ff
192080709Sjake
192180709Sjaketl1_base:
192288779Sjake	tl1_reserved	8				! 0x200-0x207
192380709Sjaketl1_insn_excptn:
192488779Sjake	tl1_insn_excptn					! 0x208
192588779Sjake	tl1_reserved	1				! 0x209
192680709Sjaketl1_insn_error:
192788779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
192888779Sjake	tl1_reserved	5				! 0x20b-0x20f
192980709Sjaketl1_insn_illegal:
193088779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
193180709Sjaketl1_priv_opcode:
193288779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
193388779Sjake	tl1_reserved	14				! 0x212-0x21f
193480709Sjaketl1_fp_disabled:
193588779Sjake	tl1_gen		T_FP_DISABLED			! 0x220
193680709Sjaketl1_fp_ieee:
193788779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
193880709Sjaketl1_fp_other:
193988779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
194080709Sjaketl1_tag_ovflw:
194188779Sjake	tl1_gen		T_TAG_OFERFLOW			! 0x223
194280709Sjaketl1_clean_window:
194388779Sjake	clean_window					! 0x224
194480709Sjaketl1_divide:
194588779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
194688779Sjake	tl1_reserved	7				! 0x229-0x22f
194780709Sjaketl1_data_excptn:
194888779Sjake	tl1_data_excptn					! 0x230
194988779Sjake	tl1_reserved	1				! 0x231
195080709Sjaketl1_data_error:
195188779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
195288779Sjake	tl1_reserved	1				! 0x233
195380709Sjaketl1_align:
195488779Sjake	tl1_align					! 0x234
195580709Sjaketl1_align_lddf:
195688779Sjake	tl1_gen		T_RESERVED			! 0x235
195780709Sjaketl1_align_stdf:
195888779Sjake	tl1_gen		T_RESERVED			! 0x236
195980709Sjaketl1_priv_action:
196088779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
196188779Sjake	tl1_reserved	9				! 0x238-0x240
196280709Sjaketl1_intr_level:
196388779Sjake	tl1_intr_level					! 0x241-0x24f
196488779Sjake	tl1_reserved	16				! 0x250-0x25f
196580709Sjaketl1_intr_vector:
196697265Sjake	intr_vector					! 0x260
196780709Sjaketl1_watch_phys:
196888779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
196980709Sjaketl1_watch_virt:
197088779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
197180709Sjaketl1_ecc:
197288779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
197380709Sjaketl1_immu_miss:
197488779Sjake	tl1_immu_miss					! 0x264
197580709Sjaketl1_dmmu_miss:
197688779Sjake	tl1_dmmu_miss					! 0x268
197780709Sjaketl1_dmmu_prot:
197888779Sjake	tl1_dmmu_prot					! 0x26c
197988779Sjake	tl1_reserved	16				! 0x270-0x27f
198080709Sjaketl1_spill_0_n:
198188779Sjake	tl1_spill_0_n					! 0x280
198291246Sjake	tl1_spill_bad	1				! 0x284
198391246Sjaketl1_spill_2_n:
198491246Sjake	tl1_spill_2_n					! 0x288
198591246Sjaketl1_spill_3_n:
198691246Sjake	tl1_spill_3_n					! 0x29c
198791246Sjake	tl1_spill_bad	4				! 0x290-0x29f
198881380Sjaketl1_spill_0_o:
198988779Sjake	tl1_spill_0_o					! 0x2a0
199082906Sjaketl1_spill_1_o:
199188779Sjake	tl1_spill_1_o					! 0x2a4
199282906Sjaketl1_spill_2_o:
199388779Sjake	tl1_spill_2_o					! 0x2a8
199491246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
199580709Sjaketl1_fill_0_n:
199688779Sjake	tl1_fill_0_n					! 0x2c0
199791246Sjake	tl1_fill_bad	1				! 0x2c4
199891246Sjaketl1_fill_2_n:
199991246Sjake	tl1_fill_2_n					! 0x2d0
200091246Sjaketl1_fill_3_n:
200191246Sjake	tl1_fill_3_n					! 0x2d4
200291246Sjake	tl1_fill_bad	12				! 0x2d8-0x2ff
200388779Sjake	tl1_reserved	1				! 0x300
200480709Sjaketl1_breakpoint:
200588779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
200688779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
200788779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
200888779Sjake	tl1_reserved	252				! 0x304-0x3ff
200980709Sjake
201081380Sjake/*
201182906Sjake * User trap entry point.
201282906Sjake *
2013103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2014103897Sjake *                u_long sfsr)
2015103897Sjake *
2016103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
2017103897Sjake * program must have first registered a trap handler with the kernel using
2018103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2019103897Sjake * for it to return to the trapping code directly, it will not return through
2020103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2021103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2022103897Sjake * parameters passed in out registers may be used by the user trap handler.
2023103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2024103897Sjake *
2025103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2026103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2027103897Sjake */
2028103897SjakeENTRY(tl0_utrap)
2029103897Sjake	/*
2030103897Sjake	 * Check if the trap type allows user traps.
2031103897Sjake	 */
2032103897Sjake	cmp	%o0, UT_MAX
2033103897Sjake	bge,a,pt %xcc, tl0_trap
2034103897Sjake	 nop
2035103897Sjake
2036103897Sjake	/*
2037103897Sjake	 * Load the user trap handler from the utrap table.
2038103897Sjake	 */
2039103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2040103897Sjake	ldx	[%l0 + TD_PROC], %l0
2041103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2042103897Sjake	brz,pt	%l0, tl0_trap
2043103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2044103897Sjake	ldx	[%l0 + %l1], %l0
2045103897Sjake	brz,a,pt %l0, tl0_trap
2046103897Sjake	 nop
2047103897Sjake
2048103897Sjake	/*
2049103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2050103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2051103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2052103897Sjake	 * not be able to find them, since the user trap handler returns
2053103897Sjake	 * directly to the trapping code.  Note that we only support precise
2054103897Sjake	 * user traps, which implies that the condition that caused the trap
2055103897Sjake	 * in the first place is still valid, so it will occur again when we
2056103897Sjake	 * re-execute the trapping instruction.
2057103897Sjake	 */
2058103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2059103897Sjake	brnz,a,pn %l1, tl0_trap
2060103897Sjake	 mov	T_SPILL, %o0
2061103897Sjake
2062103897Sjake	/*
2063103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2064103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2065103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2066103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2067103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2068103897Sjake	 * temporary stack for that.
2069103897Sjake	 */
2070103897Sjake	rd	%fprs, %l1
2071103897Sjake	or	%l1, FPRS_FEF, %l2
2072103897Sjake	wr	%l2, 0, %fprs
2073103897Sjake	dec	8, ASP_REG
2074103897Sjake	stx	%fsr, [ASP_REG]
2075103897Sjake	ldx	[ASP_REG], %l4
2076103897Sjake	inc	8, ASP_REG
2077103897Sjake	wr	%l1, 0, %fprs
2078103897Sjake
2079103897Sjake	rdpr	%tstate, %l5
2080103897Sjake	rdpr	%tpc, %l6
2081103897Sjake	rdpr	%tnpc, %l7
2082103897Sjake
2083103897Sjake	/*
2084103897Sjake	 * Setup %tnpc to return to.
2085103897Sjake	 */
2086103897Sjake	wrpr	%l0, 0, %tnpc
2087103897Sjake
2088103897Sjake	/*
2089103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2090103897Sjake	 */
2091103897Sjake	rdpr	%wstate, %l1
2092103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2093103897Sjake	wrpr	%l1, 0, %wstate
2094103897Sjake
2095103897Sjake	/*
2096103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2097103897Sjake	 * current window instead of the window at the time of the trap.
2098103897Sjake	 */
2099103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2100103897Sjake	rdpr	%cwp, %l2
2101103897Sjake	wrpr	%l1, %l2, %tstate
2102103897Sjake
2103103897Sjake	/*
2104103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2105103897Sjake	 */
2106103897Sjake	sub	%fp, CCFSZ, %sp
2107103897Sjake
2108103897Sjake	/*
2109103897Sjake	 * Execute the user trap handler.
2110103897Sjake	 */
2111103897Sjake	done
2112103897SjakeEND(tl0_utrap)
2113103897Sjake
2114103897Sjake/*
2115103897Sjake * (Real) User trap entry point.
2116103897Sjake *
211788644Sjake * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
211888644Sjake *		 u_int sfsr)
211982906Sjake *
212082906Sjake * The following setup has been performed:
212182906Sjake *	- the windows have been split and the active user window has been saved
212282906Sjake *	  (maybe just to the pcb)
212382906Sjake *	- we are on alternate globals and interrupts are disabled
212482906Sjake *
212589050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
212688644Sjake * globals, enable interrupts and call trap.
212782906Sjake *
212882906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
212982906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
213082906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
213187702Sjhb * of cpu migration and using the wrong pcpup.
213281380Sjake */
213382005SjakeENTRY(tl0_trap)
213482906Sjake	/*
213582906Sjake	 * Force kernel store order.
213682906Sjake	 */
213782906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
213880709Sjake
213981380Sjake	rdpr	%tstate, %l0
214088644Sjake	rdpr	%tpc, %l1
214188644Sjake	rdpr	%tnpc, %l2
214288644Sjake	rd	%y, %l3
214388644Sjake	rd	%fprs, %l4
214488644Sjake	rdpr	%wstate, %l5
214588644Sjake
214688644Sjake#if KTR_COMPILE & KTR_TRAP
214788644Sjake	CATR(KTR_TRAP,
214888644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
214988644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
215088644Sjake	ldx	[PCPU(CURTHREAD)], %g2
215188644Sjake	stx	%g2, [%g1 + KTR_PARM1]
215288644Sjake	stx	%o0, [%g1 + KTR_PARM2]
215388644Sjake	rdpr	%pil, %g2
215488644Sjake	stx	%g2, [%g1 + KTR_PARM3]
215588644Sjake	stx	%l1, [%g1 + KTR_PARM4]
215688644Sjake	stx	%l2, [%g1 + KTR_PARM5]
215788644Sjake	stx	%i6, [%g1 + KTR_PARM6]
215888644Sjake9:
215988644Sjake#endif
216088644Sjake
2161103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2162103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
216388644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
216488644Sjake	rdpr	%canrestore, %l6
216588644Sjake	wrpr	%l6, 0, %otherwin
216688644Sjake	wrpr	%g0, 0, %canrestore
216788644Sjake
216888644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
216988644Sjake
2170105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2171105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
217288644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
217388644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2174105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
217588644Sjake
217681380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
217781380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
217881380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2179105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2180105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2181105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
218281380Sjake
218388644Sjake	wr	%g0, FPRS_FEF, %fprs
218488644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2185105733Sjake	rd	%asr19, %l6
2186105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
218788644Sjake	wr	%g0, 0, %fprs
218882906Sjake
218989050Sjake	mov	PCB_REG, %l0
219089050Sjake	mov	PCPU_REG, %l1
219182906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
219282005Sjake
219382005Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
219482005Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
219582005Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
219682005Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
219782005Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
219882005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
219982005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
220082005Sjake
220189050Sjake	mov	%l0, PCB_REG
220289050Sjake	mov	%l1, PCPU_REG
220388644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
220484186Sjake
220584186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
220684186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
220784186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
220884186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
220984186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
221084186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
221184186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
221284186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
221384186Sjake
2214103921Sjake	set	tl0_ret - 8, %o7
2215103921Sjake	jmpl	%o2, %g0
221684186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
221784186SjakeEND(tl0_trap)
221884186Sjake
221988644Sjake/*
222091246Sjake * void tl0_intr(u_int level, u_int mask)
222191246Sjake */
222284186SjakeENTRY(tl0_intr)
222384186Sjake	/*
222484186Sjake	 * Force kernel store order.
222584186Sjake	 */
222684186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
222784186Sjake
222884186Sjake	rdpr	%tstate, %l0
222988644Sjake	rdpr	%tpc, %l1
223088644Sjake	rdpr	%tnpc, %l2
223188644Sjake	rd	%y, %l3
223288644Sjake	rd	%fprs, %l4
223388644Sjake	rdpr	%wstate, %l5
223488644Sjake
223588644Sjake#if KTR_COMPILE & KTR_INTR
223688644Sjake	CATR(KTR_INTR,
223791246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
223888644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
223988644Sjake	ldx	[PCPU(CURTHREAD)], %g2
224088644Sjake	stx	%g2, [%g1 + KTR_PARM1]
224188644Sjake	stx	%o0, [%g1 + KTR_PARM2]
224288644Sjake	rdpr	%pil, %g2
224388644Sjake	stx	%g2, [%g1 + KTR_PARM3]
224488644Sjake	stx	%l1, [%g1 + KTR_PARM4]
224588644Sjake	stx	%l2, [%g1 + KTR_PARM5]
224688644Sjake	stx	%i6, [%g1 + KTR_PARM6]
224788644Sjake9:
224888644Sjake#endif
224988644Sjake
225091246Sjake	wrpr	%o0, 0, %pil
225191246Sjake	wr	%o1, 0, %asr21
225291246Sjake
225388644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
225488644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
225588644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
225688644Sjake	rdpr	%canrestore, %l6
225788644Sjake	wrpr	%l6, 0, %otherwin
225888644Sjake	wrpr	%g0, 0, %canrestore
225988644Sjake
226088644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
226188644Sjake
226284186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
226384186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
226484186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2265105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2266105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2267105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
226881380Sjake
226988644Sjake	wr	%g0, FPRS_FEF, %fprs
227088644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2271105733Sjake	rd	%asr19, %l6
2272105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
227388644Sjake	wr	%g0, 0, %fprs
227484186Sjake
227591246Sjake	mov	%o0, %l3
227691246Sjake	mov	T_INTERRUPT, %o1
227789050Sjake
2278105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2279105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
228088644Sjake
228189050Sjake	mov	PCB_REG, %l0
228289050Sjake	mov	PCPU_REG, %l1
228384186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
228484186Sjake
228584186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
228684186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
228784186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
228884186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
228984186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
229084186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
229184186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
229284186Sjake
229389050Sjake	mov	%l0, PCB_REG
229489050Sjake	mov	%l1, PCPU_REG
229588644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
229684186Sjake
229784186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
229884186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
229984186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
230084186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
230184186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
230284186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
230384186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
230484186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
230584186Sjake
230689050Sjake	call	critical_enter
230789050Sjake	 nop
230889050Sjake
230986519Sjake	SET(cnt+V_INTR, %l1, %l0)
231088644Sjake	ATOMIC_INC_INT(%l0, %l1, %l2)
231184186Sjake
231286519Sjake	SET(intr_handlers, %l1, %l0)
231389050Sjake	sllx	%l3, IH_SHIFT, %l1
231488644Sjake	ldx	[%l0 + %l1], %l1
231589050Sjake	KASSERT(%l1, "tl0_intr: ih null")
231684186Sjake	call	%l1
231784186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
231889050Sjake
231989050Sjake	call	critical_exit
232089050Sjake	 nop
232189050Sjake
232284186Sjake	b,a	%xcc, tl0_ret
232384186Sjake	 nop
232484186SjakeEND(tl0_intr)
232584186Sjake
2326105733Sjake/*
2327105733Sjake * Initiate return to usermode.
2328105733Sjake *
2329105733Sjake * Called with a trapframe on the stack.  The window that was setup in
2330105733Sjake * tl0_trap may have been used by "fast" trap handlers that pretend to be
2331105733Sjake * leaf functions, so all ins and locals may have been clobbered since
2332105733Sjake * then.
2333105733Sjake *
2334105733Sjake * This code is rather long and complicated.
2335105733Sjake */
233682005SjakeENTRY(tl0_ret)
233793389Sjake	/*
233893389Sjake	 * Check for pending asts atomically with returning.  We must raise
233993389Sjake	 * the pil before checking, and if no asts are found the pil must
234093389Sjake	 * remain raised until the retry is executed, or we risk missing asts
234193389Sjake	 * caused by interrupts occuring after the test.  If the pil is lowered,
234293389Sjake	 * as it is when we call ast, the check must be re-executed.
234393389Sjake	 */
2344103784Sjake	wrpr	%g0, PIL_TICK, %pil
234584186Sjake	ldx	[PCPU(CURTHREAD)], %l0
234684186Sjake	ldx	[%l0 + TD_KSE], %l1
234784186Sjake	lduw	[%l1 + KE_FLAGS], %l2
234884186Sjake	and	%l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
2349103784Sjake	brz,a,pt %l2, 1f
235082906Sjake	 nop
2351105733Sjake
2352105733Sjake	/*
2353105733Sjake	 * We have an ast.  Re-enable interrupts and handle it, then restart
2354105733Sjake	 * the return sequence.
2355105733Sjake	 */
235693389Sjake	wrpr	%g0, 0, %pil
235782906Sjake	call	ast
235882906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2359103784Sjake	ba,a	%xcc, tl0_ret
236093389Sjake	 nop
236182906Sjake
236293389Sjake	/*
236393389Sjake	 * Check for windows that were spilled to the pcb and need to be
236493389Sjake	 * copied out.  This must be the last thing that is done before the
236593389Sjake	 * return to usermode.  If there are still user windows in the cpu
236693389Sjake	 * and we call a nested function after this, which causes them to be
236793389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
236893389Sjake	 * be inconsistent.
236993389Sjake	 */
2370103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2371103784Sjake	brz,a,pt %l1, 2f
2372103784Sjake	 nop
2373103784Sjake	wrpr	%g0, 0, %pil
237493389Sjake	mov	T_SPILL, %o0
2375105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2376103784Sjake	call	trap
2377103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2378103784Sjake	ba,a	%xcc, tl0_ret
2379103784Sjake	 nop
238082906Sjake
2381105733Sjake	/*
2382105733Sjake	 * Restore the out registers from the trapframe.  These are ins
2383105733Sjake	 * now, they will become the outs when we restore below.
2384105733Sjake	 */
2385103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
238682906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
238782906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
238882906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
238982906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
239082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
239182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
239282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
239381380Sjake
2394105733Sjake	/*
2395105733Sjake	 * Load everything we need to restore below before disabling
2396105733Sjake	 * interrupts.
2397105733Sjake	 */
2398105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2399105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
240085243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2401105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2402105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2403105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2404105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
240582906Sjake
2406105733Sjake	/*
2407105733Sjake	 * Disable interrupts to restore the globals.  We need to restore
2408105733Sjake	 * %g6 and %g7 which are used as global variables in the kernel.
2409105733Sjake	 * They are not saved and restored for kernel traps, so an interrupt
2410105733Sjake	 * at the wrong time would clobber them.
2411105733Sjake	 */
241289050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
241389050Sjake
241489050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
241589050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
241689050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
241789050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
241889050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
241989050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
242089050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
242189050Sjake
2422105733Sjake	/*
2423105733Sjake	 * Switch to alternate globals.  This frees up some registers we
2424105733Sjake	 * can use after the restore changes our window.
2425105733Sjake	 */
242682906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
242782906Sjake
2428105733Sjake	/*
2429105733Sjake	 * Drop %pil to zero.  It must have been zero at the time of the
2430105733Sjake	 * trap, since we were in usermode, but it was raised above in
2431105733Sjake	 * order to check for asts atomically.  We have interrupts disabled
2432105733Sjake	 * so any interrupts will not be serviced until we complete the
2433105733Sjake	 * return to usermode.
2434105733Sjake	 */
243588644Sjake	wrpr	%g0, 0, %pil
2436105733Sjake
2437105733Sjake	/*
2438105733Sjake	 * Save %fprs in an alternate global so it can be restored after the
2439105733Sjake	 * restore instruction below.  If we restore it before the restore,
2440105733Sjake	 * and the restore traps we may run for a while with floating point
2441105733Sjake	 * enabled in the kernel, which we want to avoid.
2442105733Sjake	 */
2443105733Sjake	mov	%l0, %g1
2444105733Sjake
2445105733Sjake	/*
2446105733Sjake	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2447105733Sjake	 * so we set it temporarily and then clear it.
2448105733Sjake	 * XXX %asr19 should be %gsr but gas is invoked incorrectly.
2449105733Sjake	 */
2450105733Sjake	wr	%g0, FPRS_FEF, %fprs
2451105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2452105733Sjake	wr	%l1, 0, %asr19
2453105733Sjake	wr	%g0, 0, %fprs
2454105733Sjake
2455105733Sjake	/*
2456105733Sjake	 * Restore program counters.  This could be done after the restore
2457105733Sjake	 * but we're out of alternate globals to store them in...
2458105733Sjake	 */
245988644Sjake	wrpr	%l2, 0, %tnpc
2460105733Sjake	wrpr	%l3, 0, %tpc
246182906Sjake
2462105733Sjake	/*
2463105733Sjake	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2464105733Sjake	 * will be affected by the restore below and we need to make sure it
2465105733Sjake	 * points to the current window at that time, not the window that was
2466105733Sjake	 * active at the time of the trap.
2467105733Sjake	 */
2468105733Sjake	andn	%l4, TSTATE_CWP_MASK, %g2
246982906Sjake
2470105733Sjake	/*
2471105733Sjake	 * Restore %y.  Could also be below if we had more alternate globals.
2472105733Sjake	 */
2473105733Sjake	wr	%l5, 0, %y
2474105733Sjake
2475105733Sjake	/*
2476105733Sjake	 * Setup %wstate for return.  We need to restore the user window state
2477105733Sjake	 * which we saved in wstate.other when we trapped.  We also need to
2478105733Sjake	 * set the transition bit so the restore will be handled specially
2479105733Sjake	 * if it traps, use the xor feature of wrpr to do that.
2480105733Sjake	 */
2481105733Sjake	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
248288644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
2483105733Sjake
2484105733Sjake	/*
2485105733Sjake	 * Setup window management registers for return.  If not all user
2486105733Sjake	 * windows were spilled in the kernel %otherwin will be non-zero,
2487105733Sjake	 * so we need to transfer it to %canrestore to correctly restore
2488105733Sjake	 * those windows.  Otherwise everything gets set to zero and the
2489105733Sjake	 * restore below will fill a window directly from the user stack.
2490105733Sjake	 */
249188644Sjake	rdpr	%otherwin, %o0
249288644Sjake	wrpr	%o0, 0, %canrestore
249382906Sjake	wrpr	%g0, 0, %otherwin
249488644Sjake	wrpr	%o0, 0, %cleanwin
249581380Sjake
249682005Sjake	/*
2497105733Sjake	 * Now do the restore.  If this instruction causes a fill trap which
2498105733Sjake	 * fails to fill a window from the user stack, we will resume at
2499105733Sjake	 * tl0_ret_fill_end and call back into the kernel.
250082005Sjake	 */
250182906Sjake	restore
250282906Sjaketl0_ret_fill:
250381380Sjake
2504105733Sjake	/*
2505105733Sjake	 * We made it.  We're back in the window that was active at the time
2506105733Sjake	 * of the trap, and ready to return to usermode.
2507105733Sjake	 */
2508105733Sjake
2509105733Sjake	/*
2510105733Sjake	 * Restore %frps.  This was saved in an alternate global above.
2511105733Sjake	 */
2512105733Sjake	wr	%g1, 0, %fprs
2513105733Sjake
2514105733Sjake	/*
2515105733Sjake	 * Fixup %tstate so the saved %cwp points to the current window and
2516105733Sjake	 * restore it.
2517105733Sjake	 */
251888644Sjake	rdpr	%cwp, %g4
2519105733Sjake	wrpr	%g2, %g4, %tstate
2520105733Sjake
2521105733Sjake	/*
2522105733Sjake	 * Restore the user window state.  The transition bit was set above
2523105733Sjake	 * for special handling of the restore, this clears it.
2524105733Sjake	 */
252588644Sjake	wrpr	%g3, 0, %wstate
252685243Sjake
252784186Sjake#if KTR_COMPILE & KTR_TRAP
252888644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
252982906Sjake	    , %g2, %g3, %g4, 7, 8, 9)
253083366Sjulian	ldx	[PCPU(CURTHREAD)], %g3
253182906Sjake	stx	%g3, [%g2 + KTR_PARM1]
253285243Sjake	rdpr	%pil, %g3
253385243Sjake	stx	%g3, [%g2 + KTR_PARM2]
253488644Sjake	rdpr	%tpc, %g3
253584186Sjake	stx	%g3, [%g2 + KTR_PARM3]
253688644Sjake	rdpr	%tnpc, %g3
253784186Sjake	stx	%g3, [%g2 + KTR_PARM4]
253884186Sjake	stx	%sp, [%g2 + KTR_PARM5]
253982906Sjake9:
254082906Sjake#endif
254181380Sjake
2542105733Sjake	/*
2543105733Sjake	 * Return to usermode.
2544105733Sjake	 */
254582906Sjake	retry
254682906Sjaketl0_ret_fill_end:
254782005Sjake
254884186Sjake#if KTR_COMPILE & KTR_TRAP
254988785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
255082906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
255188785Sjake	rdpr	%pstate, %l1
255288785Sjake	stx	%l1, [%l0 + KTR_PARM1]
255388785Sjake	stx	%l5, [%l0 + KTR_PARM2]
255488785Sjake	stx	%sp, [%l0 + KTR_PARM3]
255582906Sjake9:
255682906Sjake#endif
255782906Sjake
255882906Sjake	/*
2559105733Sjake	 * The restore above caused a fill trap and the fill handler was
2560105733Sjake	 * unable to fill a window from the user stack.  The special fill
2561105733Sjake	 * handler recognized this and punted, sending us here.  We need
2562105733Sjake	 * to carefully undo any state that was restored before the restore
2563105733Sjake	 * was executed and call trap again.  Trap will copyin a window
2564105733Sjake	 * from the user stack which will fault in the page we need so the
2565105733Sjake	 * restore above will succeed when we try again.  If this fails
2566105733Sjake	 * the process has trashed its stack, so we kill it.
256782906Sjake	 */
2568105733Sjake
2569105733Sjake	/*
2570105733Sjake	 * Restore the kernel window state.  This was saved in %l6 above, and
2571105733Sjake	 * since the restore failed we're back in the same window.
2572105733Sjake	 */
2573105733Sjake	wrpr	%l6, 0, %wstate
2574105733Sjake
2575105733Sjake	/*
2576105733Sjake	 * Restore the normal globals which have predefined values in the
2577105733Sjake	 * kernel.  We clobbered them above restoring the user's globals
2578105733Sjake	 * so this is very important.
2579105733Sjake	 * XXX PSTATE_ALT must already be set.
2580105733Sjake	 */
258188785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
258289050Sjake	mov	PCB_REG, %o0
258389050Sjake	mov	PCPU_REG, %o1
258488785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
258589050Sjake	mov	%o0, PCB_REG
258689050Sjake	mov	%o1, PCPU_REG
258788644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2588105733Sjake
2589105733Sjake	/*
2590105733Sjake	 * Simulate a fill trap and then start the whole return sequence over
2591105733Sjake	 * again.  This is special because it only copies in 1 window, not 2
2592105733Sjake	 * as we would for a normal failed fill.  This may be the first time
2593105733Sjake	 * the process has been run, so there may not be 2 windows worth of
2594105733Sjake	 * stack to copyin.
2595105733Sjake	 */
2596103784Sjake	mov	T_FILL_RET, %o0
2597105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2598103784Sjake	call	trap
2599103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2600103784Sjake	ba,a	%xcc, tl0_ret
2601103784Sjake	 nop
260282005SjakeEND(tl0_ret)
260381380Sjake
260480709Sjake/*
260582906Sjake * Kernel trap entry point
260682906Sjake *
260791246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
260888644Sjake *		 u_int sfsr)
260982906Sjake *
261082906Sjake * This is easy because the stack is already setup and the windows don't need
261182906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
261282906Sjake * the outs don't need to be saved.
261380709Sjake */
261480709SjakeENTRY(tl1_trap)
261580709Sjake	rdpr	%tstate, %l0
261680709Sjake	rdpr	%tpc, %l1
261780709Sjake	rdpr	%tnpc, %l2
261891246Sjake	rdpr	%pil, %l3
261991316Sjake	rd	%y, %l4
262091316Sjake	rdpr	%wstate, %l5
262180709Sjake
262284186Sjake#if KTR_COMPILE & KTR_TRAP
262388644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
262488644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
262588644Sjake	ldx	[PCPU(CURTHREAD)], %g2
262688644Sjake	stx	%g2, [%g1 + KTR_PARM1]
262797265Sjake	stx	%o0, [%g1 + KTR_PARM2]
262891246Sjake	stx	%l3, [%g1 + KTR_PARM3]
262988644Sjake	stx	%l1, [%g1 + KTR_PARM4]
263088644Sjake	stx	%i6, [%g1 + KTR_PARM5]
263182906Sjake9:
263282906Sjake#endif
263382906Sjake
263480709Sjake	wrpr	%g0, 1, %tl
263588644Sjake
263691316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
263791316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
263891246Sjake
2639105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2640105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2641103919Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2642103919Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2643105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2644103919Sjake
264588644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
264688644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
264788644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2648105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2649105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
265088644Sjake
2651103919Sjake	mov	PCB_REG, %l0
2652103919Sjake	mov	PCPU_REG, %l1
265391158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
265491158Sjake
265580709Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
265680709Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
265780709Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
265880709Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
265980709Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
266080709Sjake
2661103919Sjake	mov	%l0, PCB_REG
2662103919Sjake	mov	%l1, PCPU_REG
266391158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
266491158Sjake
2665103919Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2666103919Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2667103919Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2668103919Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2669103919Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2670103919Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2671103919Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2672103919Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2673103919Sjake
2674103921Sjake	set	tl1_ret - 8, %o7
2675103921Sjake	jmpl	%o2, %g0
267680709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2677103921SjakeEND(tl1_trap)
267880709Sjake
2679103921SjakeENTRY(tl1_ret)
2680103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2681103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2682103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2683103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2684103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2685103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2686103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2687103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2688103919Sjake
268988644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
269088644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
269188644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2692105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2693105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
269488644Sjake
269580709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
269680709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
269780709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
269880709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
269980709Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
270080709Sjake
270182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
270280709Sjake
270388644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
270486519Sjake	mov	%l1, %g2
270586519Sjake	mov	%l2, %g3
270681380Sjake
270788644Sjake	wrpr	%l3, 0, %pil
270891316Sjake	wr	%l4, 0, %y
270986519Sjake
271086519Sjake	restore
271186519Sjake
271280709Sjake	wrpr	%g0, 2, %tl
271380709Sjake
271488644Sjake	rdpr	%cwp, %g4
271588644Sjake	wrpr	%g1, %g4, %tstate
271686519Sjake	wrpr	%g2, 0, %tpc
271786519Sjake	wrpr	%g3, 0, %tnpc
271886519Sjake
271984186Sjake#if KTR_COMPILE & KTR_TRAP
2720103921Sjake	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
272186519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
272286519Sjake	ldx	[PCPU(CURTHREAD)], %g3
272386519Sjake	stx	%g3, [%g2 + KTR_PARM1]
272486519Sjake	rdpr	%pil, %g3
272586519Sjake	stx	%g3, [%g2 + KTR_PARM2]
272686519Sjake	rdpr	%tstate, %g3
272786519Sjake	stx	%g3, [%g2 + KTR_PARM3]
272886519Sjake	rdpr	%tpc, %g3
272986519Sjake	stx	%g3, [%g2 + KTR_PARM4]
273086519Sjake	stx	%sp, [%g2 + KTR_PARM5]
273182906Sjake9:
273282906Sjake#endif
273382906Sjake
273480709Sjake	retry
2735103921SjakeEND(tl1_ret)
273680709Sjake
273791246Sjake/*
273891246Sjake * void tl1_intr(u_int level, u_int mask)
273991246Sjake */
274084186SjakeENTRY(tl1_intr)
274184186Sjake	rdpr	%tstate, %l0
274284186Sjake	rdpr	%tpc, %l1
274384186Sjake	rdpr	%tnpc, %l2
274491246Sjake	rdpr	%pil, %l3
274591316Sjake	rd	%y, %l4
274691316Sjake	rdpr	%wstate, %l5
274784186Sjake
274884186Sjake#if KTR_COMPILE & KTR_INTR
274989050Sjake	CATR(KTR_INTR,
275091246Sjake	    "tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
275188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
275288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
275388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
275491246Sjake	stx	%o0, [%g1 + KTR_PARM2]
275591246Sjake	stx	%l3, [%g1 + KTR_PARM3]
275691246Sjake	stx	%l1, [%g1 + KTR_PARM4]
275791246Sjake	stx	%i6, [%g1 + KTR_PARM5]
275884186Sjake9:
275984186Sjake#endif
276084186Sjake
276191246Sjake	wrpr	%o0, 0, %pil
276291246Sjake	wr	%o1, 0, %asr21
276391246Sjake
276484186Sjake	wrpr	%g0, 1, %tl
276588644Sjake
276691316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
276791316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
276891246Sjake
276988644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
277088644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
277188644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2772105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2773105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
277488644Sjake
277591246Sjake	mov	%o0, %l7
277691246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
277789050Sjake
2778105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2779105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
278088644Sjake
278188644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
278288644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
278388644Sjake
278491158Sjake	mov	PCB_REG, %l4
278591158Sjake	mov	PCPU_REG, %l5
278691158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
278791158Sjake
278884186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
278984186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
279084186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
279184186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
279284186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
279384186Sjake
279491158Sjake	mov	%l4, PCB_REG
279591158Sjake	mov	%l5, PCPU_REG
279691158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
279791158Sjake
279889050Sjake	call	critical_enter
279989050Sjake	 nop
280084186Sjake
280188644Sjake	SET(cnt+V_INTR, %l5, %l4)
280288644Sjake	ATOMIC_INC_INT(%l4, %l5, %l6)
280388644Sjake
280488644Sjake	SET(intr_handlers, %l5, %l4)
280589050Sjake	sllx	%l7, IH_SHIFT, %l5
280688644Sjake	ldx	[%l4 + %l5], %l5
280789050Sjake	KASSERT(%l5, "tl1_intr: ih null")
280888644Sjake	call	%l5
280984186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
281084186Sjake
281189050Sjake	call	critical_exit
281289050Sjake	 nop
281389050Sjake
2814105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
281591316Sjake
281684186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
281784186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
281884186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
281984186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
282084186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
282184186Sjake
282284186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
282384186Sjake
282488644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
282586519Sjake	mov	%l1, %g2
282686519Sjake	mov	%l2, %g3
282788644Sjake	wrpr	%l3, 0, %pil
282891316Sjake	wr	%l4, 0, %y
282984186Sjake
283086519Sjake	restore
283186519Sjake
283284186Sjake	wrpr	%g0, 2, %tl
283384186Sjake
283488644Sjake	rdpr	%cwp, %g4
283588644Sjake	wrpr	%g1, %g4, %tstate
283686519Sjake	wrpr	%g2, 0, %tpc
283786519Sjake	wrpr	%g3, 0, %tnpc
283886519Sjake
283988644Sjake#if KTR_COMPILE & KTR_INTR
284088644Sjake	CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
284186519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
284286519Sjake	ldx	[PCPU(CURTHREAD)], %g3
284386519Sjake	stx	%g3, [%g2 + KTR_PARM1]
284486519Sjake	rdpr	%pil, %g3
284586519Sjake	stx	%g3, [%g2 + KTR_PARM2]
284686519Sjake	rdpr	%tstate, %g3
284786519Sjake	stx	%g3, [%g2 + KTR_PARM3]
284886519Sjake	rdpr	%tpc, %g3
284986519Sjake	stx	%g3, [%g2 + KTR_PARM4]
285086519Sjake	stx	%sp, [%g2 + KTR_PARM5]
285184186Sjake9:
285284186Sjake#endif
285384186Sjake
285484186Sjake	retry
285584186SjakeEND(tl1_intr)
285684186Sjake
285782906Sjake/*
285882906Sjake * Freshly forked processes come here when switched to for the first time.
285982906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
286082906Sjake * them to the outs.
286182906Sjake */
286280709SjakeENTRY(fork_trampoline)
286384186Sjake#if KTR_COMPILE & KTR_PROC
286484186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
286582906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
286683366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
286782906Sjake	stx	%g2, [%g1 + KTR_PARM1]
286884186Sjake	ldx	[%g2 + TD_PROC], %g2
286982906Sjake	add	%g2, P_COMM, %g2
287082906Sjake	stx	%g2, [%g1 + KTR_PARM2]
287182906Sjake	rdpr	%cwp, %g2
287282906Sjake	stx	%g2, [%g1 + KTR_PARM3]
287382906Sjake9:
287482906Sjake#endif
287580709Sjake	mov	%l0, %o0
287680709Sjake	mov	%l1, %o1
287780709Sjake	call	fork_exit
287888644Sjake	 mov	%l2, %o2
287982005Sjake	b,a	%xcc, tl0_ret
288084186Sjake	 nop
288180709SjakeEND(fork_trampoline)
2882