exception.S revision 117658
180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
28114085Sobrien *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake */
5580709Sjake
56114188Sjake#include <machine/asm.h>
57114188Sjake__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/exception.S 117658 2003-07-16 00:08:43Z jmg $");
58114188Sjake
59106050Sjake#include "opt_compat.h"
6080709Sjake#include "opt_ddb.h"
6180709Sjake
6280709Sjake#include <machine/asi.h>
6380709Sjake#include <machine/asmacros.h>
6482906Sjake#include <machine/ktr.h>
6582906Sjake#include <machine/pstate.h>
6680709Sjake#include <machine/trap.h>
6782906Sjake#include <machine/tstate.h>
6882906Sjake#include <machine/wstate.h>
6980709Sjake
7080709Sjake#include "assym.s"
7180709Sjake
72101653Sjake#define	TSB_KERNEL_MASK	0x0
73101653Sjake#define	TSB_KERNEL	0x0
74101653Sjake
7588644Sjake	.register %g2,#ignore
7688644Sjake	.register %g3,#ignore
7788644Sjake	.register %g6,#ignore
7888644Sjake	.register %g7,#ignore
7988644Sjake
8082005Sjake/*
8188644Sjake * Atomically set the reference bit in a tte.
8288644Sjake */
8388644Sjake#define	TTE_SET_BIT(r1, r2, r3, bit) \
8488644Sjake	add	r1, TTE_DATA, r1 ; \
8588644Sjake	ldx	[r1], r2 ; \
8688644Sjake9:	or	r2, bit, r3 ; \
8788644Sjake	casxa	[r1] ASI_N, r2, r3 ; \
8888644Sjake	cmp	r2, r3 ; \
8988644Sjake	bne,pn	%xcc, 9b ; \
9088644Sjake	 mov	r3, r2
9188644Sjake
9288644Sjake#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
9388644Sjake#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
9488644Sjake
9588644Sjake/*
9682906Sjake * Macros for spilling and filling live windows.
9782906Sjake *
9882906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
9982906Sjake * handler will not use more than 24 instructions total, to leave room for
10082906Sjake * resume vectors which occupy the last 8 instructions.
10182005Sjake */
10280709Sjake
10382906Sjake#define	SPILL(storer, base, size, asi) \
10482906Sjake	storer	%l0, [base + (0 * size)] asi ; \
10582906Sjake	storer	%l1, [base + (1 * size)] asi ; \
10682906Sjake	storer	%l2, [base + (2 * size)] asi ; \
10782906Sjake	storer	%l3, [base + (3 * size)] asi ; \
10882906Sjake	storer	%l4, [base + (4 * size)] asi ; \
10982906Sjake	storer	%l5, [base + (5 * size)] asi ; \
11082906Sjake	storer	%l6, [base + (6 * size)] asi ; \
11182906Sjake	storer	%l7, [base + (7 * size)] asi ; \
11282906Sjake	storer	%i0, [base + (8 * size)] asi ; \
11382906Sjake	storer	%i1, [base + (9 * size)] asi ; \
11482906Sjake	storer	%i2, [base + (10 * size)] asi ; \
11582906Sjake	storer	%i3, [base + (11 * size)] asi ; \
11682906Sjake	storer	%i4, [base + (12 * size)] asi ; \
11782906Sjake	storer	%i5, [base + (13 * size)] asi ; \
11882906Sjake	storer	%i6, [base + (14 * size)] asi ; \
11982906Sjake	storer	%i7, [base + (15 * size)] asi
12080709Sjake
12182906Sjake#define	FILL(loader, base, size, asi) \
12282906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
12382906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
12482906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
12582906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
12682906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
12782906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
12882906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
12982906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
13082906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
13182906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
13282906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
13382906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
13482906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
13582906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
13682906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
13782906Sjake	loader	[base + (15 * size)] asi, %i7
13882005Sjake
13982906Sjake#define	ERRATUM50(reg)	mov reg, reg
14082906Sjake
14188781Sjake#define	KSTACK_SLOP	1024
14288781Sjake
14389048Sjake/*
14489048Sjake * Sanity check the kernel stack and bail out if its wrong.
14589048Sjake * XXX: doesn't handle being on the panic stack.
14689048Sjake */
14788781Sjake#define	KSTACK_CHECK \
14888781Sjake	dec	16, ASP_REG ; \
14988781Sjake	stx	%g1, [ASP_REG + 0] ; \
15088781Sjake	stx	%g2, [ASP_REG + 8] ; \
15188781Sjake	add	%sp, SPOFF, %g1 ; \
15288781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
15388781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
15488781Sjake	 inc	16, ASP_REG ; \
15588781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
15688781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
15788781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
15888781Sjake	subcc	%g1, %g2, %g1 ; \
15988781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
16088781Sjake	 inc	16, ASP_REG ; \
16188781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
16288781Sjake	cmp	%g1, %g2 ; \
16388781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
16488781Sjake	 inc	16, ASP_REG ; \
16588781Sjake	ldx	[ASP_REG + 8], %g2 ; \
16688781Sjake	ldx	[ASP_REG + 0], %g1 ; \
16788781Sjake	inc	16, ASP_REG
16888781Sjake
16988781SjakeENTRY(tl1_kstack_fault)
17088781Sjake	rdpr	%tl, %g1
17197263Sjake1:	cmp	%g1, 2
17297263Sjake	be,a	2f
17388781Sjake	 nop
17488781Sjake
17588781Sjake#if KTR_COMPILE & KTR_TRAP
17688781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
17797263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
17897263Sjake	rdpr	%tl, %g3
17997263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18097263Sjake	rdpr	%tpc, %g3
18197263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18297263Sjake	rdpr	%tnpc, %g3
18397263Sjake	stx	%g3, [%g2 + KTR_PARM1]
18488781Sjake9:
18588781Sjake#endif
18688781Sjake
18797263Sjake	sub	%g1, 1, %g1
18897263Sjake	wrpr	%g1, 0, %tl
18997263Sjake	ba,a	%xcc, 1b
19097263Sjake	 nop
19197263Sjake
19288781Sjake2:
19388781Sjake#if KTR_COMPILE & KTR_TRAP
19488781Sjake	CATR(KTR_TRAP,
19588781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
19688781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
19788781Sjake	add	%sp, SPOFF, %g2
19888781Sjake	stx	%g2, [%g1 + KTR_PARM1]
19988781Sjake	ldx	[PCPU(CURTHREAD)], %g2
20088781Sjake	ldx	[%g2 + TD_KSTACK], %g2
20188781Sjake	stx	%g2, [%g1 + KTR_PARM2]
20288781Sjake	rdpr	%canrestore, %g2
20388781Sjake	stx	%g2, [%g1 + KTR_PARM3]
20488781Sjake	rdpr	%cansave, %g2
20588781Sjake	stx	%g2, [%g1 + KTR_PARM4]
20688781Sjake	rdpr	%otherwin, %g2
20788781Sjake	stx	%g2, [%g1 + KTR_PARM5]
20888781Sjake	rdpr	%wstate, %g2
20988781Sjake	stx	%g2, [%g1 + KTR_PARM6]
21088781Sjake9:
21188781Sjake#endif
21288781Sjake
21388781Sjake	wrpr	%g0, 0, %canrestore
21488781Sjake	wrpr	%g0, 6, %cansave
21588781Sjake	wrpr	%g0, 0, %otherwin
21688781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
21788781Sjake
21889048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
21988781Sjake	clr	%fp
22088781Sjake
221103921Sjake	set	trap, %o2
222116589Sjake	ba	%xcc, tl1_trap
22388781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
22488781SjakeEND(tl1_kstack_fault)
22588781Sjake
22682906Sjake/*
22782906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
22882906Sjake * mmu fault during a spill or a fill, this macro will detect the fault and
22988644Sjake * resume at a set instruction offset in the trap handler.
23082906Sjake *
23188644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
23288644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
23382906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
23482906Sjake * tl bit allows us to detect both ranges with one test.
23582906Sjake *
23682906Sjake * This is:
23788644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
23882906Sjake *
23982906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
24082906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
24182906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
24282906Sjake *
24382906Sjake *	0x7f ^ 0x1f == 0x60
24482906Sjake *	0x1f == (0x80 - 0x60) - 1
24582906Sjake *
24686519Sjake * Which are the offset and xor value used to resume from alignment faults.
24782906Sjake */
24882906Sjake
24982906Sjake/*
25088644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
25188644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
25288644Sjake * alternate globals.
25382906Sjake */
25488644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
25588644Sjake	dec	16, ASP_REG ; \
25688644Sjake	stx	%g1, [ASP_REG + 0] ; \
25788644Sjake	stx	%g2, [ASP_REG + 8] ; \
25888644Sjake	rdpr	%tpc, %g1 ; \
25988644Sjake	ERRATUM50(%g1) ; \
26088644Sjake	rdpr	%tba, %g2 ; \
26188644Sjake	sub	%g1, %g2, %g2 ; \
26288644Sjake	srlx	%g2, 5, %g2 ; \
26388644Sjake	andn	%g2, 0x200, %g2 ; \
26488644Sjake	cmp	%g2, 0x80 ; \
26588644Sjake	blu,pt	%xcc, 9f ; \
26688644Sjake	 cmp	%g2, 0x100 ; \
26788644Sjake	bgeu,pt	%xcc, 9f ; \
26888644Sjake	 or	%g1, 0x7f, %g1 ; \
26988644Sjake	wrpr	%g1, xor, %tnpc ; \
27088644Sjake	stxa_g0_sfsr ; \
27188644Sjake	ldx	[ASP_REG + 8], %g2 ; \
27288644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27388644Sjake	inc	16, ASP_REG ; \
27488644Sjake	done ; \
27588644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
27688644Sjake	ldx	[ASP_REG + 0], %g1 ; \
27788644Sjake	inc	16, ASP_REG
27882906Sjake
27988644Sjake/*
28088644Sjake * For certain faults we need to clear the sfsr mmu register before returning.
28188644Sjake */
28288644Sjake#define	RSF_CLR_SFSR \
28388644Sjake	wr	%g0, ASI_DMMU, %asi ; \
28488644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
28588644Sjake
28682906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
28782906Sjake
28882906Sjake/*
28982906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
29082906Sjake * nested traps, and corresponding xor constants for wrpr.
29182906Sjake */
29286519Sjake#define	RSF_OFF_ALIGN	0x60
29386519Sjake#define	RSF_OFF_MMU	0x70
29482906Sjake
29588644Sjake#define	RESUME_SPILLFILL_ALIGN \
29688644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
29788644Sjake#define	RESUME_SPILLFILL_MMU \
29888644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
29988644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
30088644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
30182906Sjake
30282906Sjake/*
30382906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
30488644Sjake * user mode.
30582906Sjake */
30682906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
30782906Sjake
30882906Sjake/*
30982906Sjake * Retry a spill or fill with a different wstate due to an alignment fault.
31082906Sjake * We may just be using the wrong stack offset.
31182906Sjake */
31282906Sjake#define	RSF_ALIGN_RETRY(ws) \
31382906Sjake	wrpr	%g0, (ws), %wstate ; \
31482906Sjake	retry ; \
31582906Sjake	.align	16
31682906Sjake
31782906Sjake/*
31882906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
31982906Sjake */
32082906Sjake#define	RSF_TRAP(type) \
321116589Sjake	ba	%xcc, tl0_sftrap ; \
32282906Sjake	 mov	type, %g2 ; \
32382906Sjake	.align	16
32482906Sjake
32582906Sjake/*
32682906Sjake * Game over if the window operation fails.
32782906Sjake */
32882906Sjake#define	RSF_FATAL(type) \
329116589Sjake	ba	%xcc, rsf_fatal ; \
33088781Sjake	 mov	type, %g2 ; \
33182906Sjake	.align	16
33282906Sjake
33382906Sjake/*
33482906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
33582906Sjake * restore.  This is used on return from the kernel to usermode.
33682906Sjake */
33782906Sjake#define	RSF_FILL_MAGIC \
33882906Sjake	rdpr	%tnpc, %g1 ; \
33982906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
34082906Sjake	wrpr	%g1, 0, %tnpc ; \
34182906Sjake	done ; \
34282906Sjake	.align	16
34382906Sjake
34482906Sjake/*
34582906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
34682906Sjake */
34782906Sjake#define	RSF_SPILL_TOPCB \
348116589Sjake	ba,a	%xcc, tl1_spill_topcb ; \
34982906Sjake	 nop ; \
35082906Sjake	.align	16
35182906Sjake
35288781SjakeENTRY(rsf_fatal)
35388781Sjake#if KTR_COMPILE & KTR_TRAP
35488781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
35588781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
35688781Sjake	rdpr	%tt, %g3
35788781Sjake	stx	%g3, [%g1 + KTR_PARM1]
35888781Sjake	stx	%g2, [%g1 + KTR_PARM2]
35988781Sjake9:
36088781Sjake#endif
36188781Sjake
36288781Sjake	KSTACK_CHECK
36388781Sjake
36488781Sjake	sir
36588781SjakeEND(rsf_fatal)
36688781Sjake
367117658Sjmg	.comm	intrnames, IV_NAMLEN
36885243Sjake	.comm	eintrnames, 0
36980709Sjake
37097265Sjake	.comm	intrcnt, IV_MAX * 8
37185243Sjake	.comm	eintrcnt, 0
37280709Sjake
37382906Sjake/*
37482906Sjake * Trap table and associated macros
37582906Sjake *
37682906Sjake * Due to its size a trap table is an inherently hard thing to represent in
37782906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
37882906Sjake * instructions each, many of which are identical.  The way that this is
37982906Sjake * layed out is the instructions (8 or 32) for the actual trap vector appear
38082906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
38182906Sjake * but if not supporting code can be placed just after the definition of the
38282906Sjake * macro.  The macros are then instantiated in a different section (.trap),
38382906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
38482906Sjake * code around the macros is moved to the end of trap table.  In this way the
38582906Sjake * code that must be sequential in memory can be split up, and located near
38682906Sjake * its supporting code so that it is easier to follow.
38782906Sjake */
38882906Sjake
38982906Sjake	/*
39082906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
39182906Sjake	 * is not leaked between address spaces in registers.
39282906Sjake	 */
39380709Sjake	.macro	clean_window
39480709Sjake	clr	%o0
39580709Sjake	clr	%o1
39680709Sjake	clr	%o2
39780709Sjake	clr	%o3
39880709Sjake	clr	%o4
39980709Sjake	clr	%o5
40080709Sjake	clr	%o6
40180709Sjake	clr	%o7
40280709Sjake	clr	%l0
40380709Sjake	clr	%l1
40480709Sjake	clr	%l2
40580709Sjake	clr	%l3
40680709Sjake	clr	%l4
40780709Sjake	clr	%l5
40880709Sjake	clr	%l6
40980709Sjake	rdpr	%cleanwin, %l7
41080709Sjake	inc	%l7
41180709Sjake	wrpr	%l7, 0, %cleanwin
41280709Sjake	clr	%l7
41380709Sjake	retry
41480709Sjake	.align	128
41580709Sjake	.endm
41680709Sjake
41781380Sjake	/*
41882906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
41982906Sjake	 * user stack, and with its live registers, so we must save soon.  We
42082906Sjake	 * are on alternate globals so we do have some registers.  Set the
42188644Sjake	 * transitional window state, and do the save.  If this traps we
42288644Sjake	 * we attempt to spill a window to the user stack.  If this fails,
42388644Sjake	 * we spill the window to the pcb and continue.  Spilling to the pcb
42488644Sjake	 * must not fail.
42582906Sjake	 *
42682906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
42781380Sjake	 */
42882906Sjake
42988644Sjake	.macro	tl0_split
43082906Sjake	rdpr	%wstate, %g1
43182906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
43281380Sjake	save
43381380Sjake	.endm
43481380Sjake
43582906Sjake	.macro	tl0_setup	type
43688644Sjake	tl0_split
437108374Sjake	clr	%o1
438103921Sjake	set	trap, %o2
439103897Sjake	ba	%xcc, tl0_utrap
44082906Sjake	 mov	\type, %o0
44181380Sjake	.endm
44281380Sjake
44381380Sjake	/*
44482906Sjake	 * Generic trap type.  Call trap() with the specified type.
44581380Sjake	 */
44680709Sjake	.macro	tl0_gen		type
44782906Sjake	tl0_setup \type
44880709Sjake	.align	32
44980709Sjake	.endm
45080709Sjake
45182906Sjake	/*
45282906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
45382906Sjake	 * Generates count "reserved" trap vectors.
45482906Sjake	 */
45580709Sjake	.macro	tl0_reserved	count
45680709Sjake	.rept	\count
45780709Sjake	tl0_gen	T_RESERVED
45880709Sjake	.endr
45980709Sjake	.endm
46080709Sjake
461109810Sjake	.macro	tl1_split
462109810Sjake	rdpr	%wstate, %g1
463109810Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
464109810Sjake	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
465109810Sjake	.endm
466109810Sjake
467109810Sjake	.macro	tl1_setup	type
468109810Sjake	tl1_split
469109810Sjake	clr	%o1
470109810Sjake	set	trap, %o2
471116589Sjake	ba	%xcc, tl1_trap
472109810Sjake	 mov	\type | T_KERNEL, %o0
473109810Sjake	.endm
474109810Sjake
475109810Sjake	.macro	tl1_gen		type
476109810Sjake	tl1_setup \type
477109810Sjake	.align	32
478109810Sjake	.endm
479109810Sjake
480109810Sjake	.macro	tl1_reserved	count
481109810Sjake	.rept	\count
482109810Sjake	tl1_gen	T_RESERVED
483109810Sjake	.endr
484109810Sjake	.endm
485109810Sjake
48688644Sjake	.macro	tl0_insn_excptn
487101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
48888644Sjake	wr	%g0, ASI_IMMU, %asi
48988644Sjake	rdpr	%tpc, %g3
49088644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
49188644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
49288644Sjake	membar	#Sync
493116589Sjake	ba	%xcc, tl0_sfsr_trap
49488644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
49588644Sjake	.align	32
49688644Sjake	.endm
49788644Sjake
49882906Sjake	.macro	tl0_data_excptn
499101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
50082906Sjake	wr	%g0, ASI_DMMU, %asi
50182906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
50282906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
50388644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
50488644Sjake	membar	#Sync
505116589Sjake	ba	%xcc, tl0_sfsr_trap
50688644Sjake	 mov	T_DATA_EXCEPTION, %g2
50782906Sjake	.align	32
50882906Sjake	.endm
50982906Sjake
51082005Sjake	.macro	tl0_align
51182906Sjake	wr	%g0, ASI_DMMU, %asi
51282906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
51382906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
51488644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
51588644Sjake	membar	#Sync
516116589Sjake	ba	%xcc, tl0_sfsr_trap
51788644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
51882005Sjake	.align	32
51982005Sjake	.endm
52082005Sjake
52182005SjakeENTRY(tl0_sfsr_trap)
52288644Sjake	tl0_split
523108374Sjake	clr	%o1
524103921Sjake	set	trap, %o2
52588644Sjake	mov	%g3, %o4
52688644Sjake	mov	%g4, %o5
527103897Sjake	ba	%xcc, tl0_utrap
52882906Sjake	 mov	%g2, %o0
52982005SjakeEND(tl0_sfsr_trap)
53082005Sjake
53182906Sjake	.macro	tl0_intr level, mask
53288644Sjake	tl0_split
53391246Sjake	set	\mask, %o1
534116589Sjake	ba	%xcc, tl0_intr
53591246Sjake	 mov	\level, %o0
53681380Sjake	.align	32
53781380Sjake	.endm
53881380Sjake
53981380Sjake#define	INTR(level, traplvl)						\
54082906Sjake	tl ## traplvl ## _intr	level, 1 << level
54181380Sjake
54281380Sjake#define	TICK(traplvl) \
54382906Sjake	tl ## traplvl ## _intr	PIL_TICK, 1
54481380Sjake
54581380Sjake#define	INTR_LEVEL(tl)							\
54681380Sjake	INTR(1, tl) ;							\
54781380Sjake	INTR(2, tl) ;							\
54881380Sjake	INTR(3, tl) ;							\
54981380Sjake	INTR(4, tl) ;							\
55081380Sjake	INTR(5, tl) ;							\
55181380Sjake	INTR(6, tl) ;							\
55281380Sjake	INTR(7, tl) ;							\
55381380Sjake	INTR(8, tl) ;							\
55481380Sjake	INTR(9, tl) ;							\
55581380Sjake	INTR(10, tl) ;							\
55681380Sjake	INTR(11, tl) ;							\
55781380Sjake	INTR(12, tl) ;							\
55881380Sjake	INTR(13, tl) ;							\
55981380Sjake	TICK(tl) ;							\
56081380Sjake	INTR(15, tl) ;
56181380Sjake
56280709Sjake	.macro	tl0_intr_level
56381380Sjake	INTR_LEVEL(0)
56480709Sjake	.endm
56580709Sjake
56697265Sjake	.macro	intr_vector
56797265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
56897265Sjake	andcc	%g1, IRSR_BUSY, %g0
569104075Sjake	bnz,a,pt %xcc, intr_vector
57097265Sjake	 nop
57197265Sjake	sir
57281380Sjake	.align	32
57380709Sjake	.endm
57480709Sjake
575109860Sjake	.macro	tl0_immu_miss
57681380Sjake	/*
577109860Sjake	 * Load the virtual page number and context from the tag access
578109860Sjake	 * register.  We ignore the context.
579109860Sjake	 */
580109860Sjake	wr	%g0, ASI_IMMU, %asi
581109860Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
582109860Sjake
583109860Sjake	/*
584102040Sjake	 * Initialize the page size walker.
585102040Sjake	 */
586102040Sjake	mov	TS_MIN, %g2
587102040Sjake
588102040Sjake	/*
589102040Sjake	 * Loop over all supported page sizes.
590102040Sjake	 */
591102040Sjake
592102040Sjake	/*
593102040Sjake	 * Compute the page shift for the page size we are currently looking
594102040Sjake	 * for.
595102040Sjake	 */
596102040Sjake1:	add	%g2, %g2, %g3
597102040Sjake	add	%g3, %g2, %g3
598102040Sjake	add	%g3, PAGE_SHIFT, %g3
599102040Sjake
600102040Sjake	/*
60191224Sjake	 * Extract the virtual page number from the contents of the tag
60291224Sjake	 * access register.
60381380Sjake	 */
604102040Sjake	srlx	%g1, %g3, %g3
60581380Sjake
60681380Sjake	/*
60791224Sjake	 * Compute the tte bucket address.
60881380Sjake	 */
609102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
610102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
611102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
612102040Sjake	add	%g4, %g5, %g4
61381380Sjake
61481380Sjake	/*
615102040Sjake	 * Compute the tte tag target.
61681380Sjake	 */
617102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
618102040Sjake	or	%g3, %g2, %g3
61981380Sjake
62081380Sjake	/*
621102040Sjake	 * Loop over the ttes in this bucket
62281380Sjake	 */
62381380Sjake
62481380Sjake	/*
625102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
626102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
627102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
628102040Sjake	 * completes successfully.
62981380Sjake	 */
630102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
63181380Sjake
63281380Sjake	/*
633102040Sjake	 * Check that its valid and executable and that the tte tags match.
63481380Sjake	 */
635102040Sjake	brgez,pn %g7, 3f
636102040Sjake	 andcc	%g7, TD_EXEC, %g0
637102040Sjake	bz,pn	%xcc, 3f
638102040Sjake	 cmp	%g3, %g6
639102040Sjake	bne,pn	%xcc, 3f
64088644Sjake	 EMPTY
64181380Sjake
64281380Sjake	/*
64381380Sjake	 * We matched a tte, load the tlb.
64481380Sjake	 */
64581380Sjake
64681380Sjake	/*
64781380Sjake	 * Set the reference bit, if it's currently clear.
64881380Sjake	 */
649102040Sjake	 andcc	%g7, TD_REF, %g0
65082906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
65181380Sjake	 nop
65281380Sjake
65381380Sjake	/*
65491224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
65581380Sjake	 */
656102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
657102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
65881380Sjake	retry
65981380Sjake
66081380Sjake	/*
661102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
662102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
66381380Sjake	 */
664102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
665102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
666102040Sjake	bnz,pt	%xcc, 2b
667102040Sjake	 EMPTY
66891224Sjake
66991224Sjake	/*
670102040Sjake	 * See if we just checked the largest page size, and advance to the
671102040Sjake	 * next one if not.
67291224Sjake	 */
673102040Sjake	 cmp	%g2, TS_MAX
674102040Sjake	bne,pt	%xcc, 1b
675102040Sjake	 add	%g2, 1, %g2
67691224Sjake
67796207Sjake	/*
678102040Sjake	 * Not in user tsb, call c code.
679102040Sjake	 */
680102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
68181380Sjake	.align	128
68280709Sjake	.endm
68380709Sjake
68482906SjakeENTRY(tl0_immu_miss_set_ref)
68581380Sjake	/*
68681380Sjake	 * Set the reference bit.
68781380Sjake	 */
688102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
68981380Sjake
69081380Sjake	/*
691102040Sjake	 * May have become invalid during casxa, in which case start over.
69281380Sjake	 */
693102040Sjake	brgez,pn %g2, 1f
694102040Sjake	 nop
69581380Sjake
69681380Sjake	/*
69791224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
69881380Sjake	 */
699102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
700102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
70191224Sjake1:	retry
70282906SjakeEND(tl0_immu_miss_set_ref)
70381380Sjake
70482906SjakeENTRY(tl0_immu_miss_trap)
70581380Sjake	/*
70696207Sjake	 * Put back the contents of the tag access register, in case we
70796207Sjake	 * faulted.
70896207Sjake	 */
709102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
71096207Sjake	membar	#Sync
71196207Sjake
71296207Sjake	/*
71382906Sjake	 * Switch to alternate globals.
71482906Sjake	 */
71582906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
71682906Sjake
71782906Sjake	/*
71891224Sjake	 * Reload the tag access register.
71981380Sjake	 */
72091224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
72181380Sjake
72281380Sjake	/*
72391224Sjake	 * Save the tag access register, and call common trap code.
72481380Sjake	 */
72588644Sjake	tl0_split
726108374Sjake	clr	%o1
727103921Sjake	set	trap, %o2
72891224Sjake	mov	%g2, %o3
729114257Sjake	ba	%xcc, tl0_utrap
73088644Sjake	 mov	T_INSTRUCTION_MISS, %o0
73182906SjakeEND(tl0_immu_miss_trap)
73281380Sjake
733109860Sjake	.macro	tl0_dmmu_miss
73481180Sjake	/*
735109860Sjake	 * Load the virtual page number and context from the tag access
736109860Sjake	 * register.  We ignore the context.
737109860Sjake	 */
738109860Sjake	wr	%g0, ASI_DMMU, %asi
739109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
740109860Sjake
741109860Sjake	/*
742102040Sjake	 * Initialize the page size walker.
743102040Sjake	 */
744109860Sjaketl1_dmmu_miss_user:
745102040Sjake	mov	TS_MIN, %g2
746102040Sjake
747102040Sjake	/*
748102040Sjake	 * Loop over all supported page sizes.
749102040Sjake	 */
750102040Sjake
751102040Sjake	/*
752102040Sjake	 * Compute the page shift for the page size we are currently looking
753102040Sjake	 * for.
754102040Sjake	 */
755102040Sjake1:	add	%g2, %g2, %g3
756102040Sjake	add	%g3, %g2, %g3
757102040Sjake	add	%g3, PAGE_SHIFT, %g3
758102040Sjake
759102040Sjake	/*
76091224Sjake	 * Extract the virtual page number from the contents of the tag
76191224Sjake	 * access register.
76291224Sjake	 */
763102040Sjake	srlx	%g1, %g3, %g3
76491224Sjake
76591224Sjake	/*
76688644Sjake	 * Compute the tte bucket address.
76781180Sjake	 */
768102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
769102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
770102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
771102040Sjake	add	%g4, %g5, %g4
77281180Sjake
77381180Sjake	/*
774102040Sjake	 * Compute the tte tag target.
77581180Sjake	 */
776102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
777102040Sjake	or	%g3, %g2, %g3
77881180Sjake
77981180Sjake	/*
780102040Sjake	 * Loop over the ttes in this bucket
78181180Sjake	 */
78281180Sjake
78381180Sjake	/*
784102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
785102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
786102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
787102040Sjake	 * completes successfully.
78881180Sjake	 */
789102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
79081180Sjake
79181180Sjake	/*
79291224Sjake	 * Check that its valid and that the virtual page numbers match.
79381180Sjake	 */
794102040Sjake	brgez,pn %g7, 3f
795102040Sjake	 cmp	%g3, %g6
796102040Sjake	bne,pn	%xcc, 3f
79788644Sjake	 EMPTY
79881180Sjake
79981180Sjake	/*
80081180Sjake	 * We matched a tte, load the tlb.
80181180Sjake	 */
80281180Sjake
80381180Sjake	/*
80481180Sjake	 * Set the reference bit, if it's currently clear.
80581180Sjake	 */
806102040Sjake	 andcc	%g7, TD_REF, %g0
807109860Sjake	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
80881180Sjake	 nop
80981180Sjake
81081180Sjake	/*
81191224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
81281180Sjake	 */
813102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
814102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
81581180Sjake	retry
81681180Sjake
81781180Sjake	/*
818102040Sjake	 * Advance to the next tte in this bucket, and check the low bits
819102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
82081180Sjake	 */
821102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
822102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
823102040Sjake	bnz,pt	%xcc, 2b
824102040Sjake	 EMPTY
825102040Sjake
826102040Sjake	/*
827102040Sjake	 * See if we just checked the largest page size, and advance to the
828102040Sjake	 * next one if not.
829102040Sjake	 */
830102040Sjake	 cmp	%g2, TS_MAX
831102040Sjake	bne,pt	%xcc, 1b
832102040Sjake	 add	%g2, 1, %g2
833109860Sjake
834109860Sjake	/*
835109860Sjake	 * Not in user tsb, call c code.
836109860Sjake	 */
837109860Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
838109860Sjake	.align	128
83981180Sjake	.endm
84081180Sjake
841109860SjakeENTRY(tl0_dmmu_miss_set_ref)
84281180Sjake	/*
84381180Sjake	 * Set the reference bit.
84481180Sjake	 */
845102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
84681180Sjake
84781180Sjake	/*
848102040Sjake	 * May have become invalid during casxa, in which case start over.
84981180Sjake	 */
850102040Sjake	brgez,pn %g2, 1f
851102040Sjake	 nop
85281180Sjake
85381180Sjake	/*
85491224Sjake	 * Load the tte tag and data into the tlb and retry the instruction.
85581180Sjake	 */
856102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
857102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
85891224Sjake1:	retry
859109860SjakeEND(tl0_dmmu_miss_set_ref)
86081180Sjake
86181180SjakeENTRY(tl0_dmmu_miss_trap)
86282005Sjake	/*
86396207Sjake	 * Put back the contents of the tag access register, in case we
86496207Sjake	 * faulted.
86596207Sjake	 */
866102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
86796207Sjake	membar	#Sync
86896207Sjake
86996207Sjake	/*
87082906Sjake	 * Switch to alternate globals.
87182005Sjake	 */
87282906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
87382005Sjake
87482005Sjake	/*
875109860Sjake	 * Check if we actually came from the kernel.
876109860Sjake	 */
877109860Sjake	rdpr	%tl, %g1
878109860Sjake	cmp	%g1, 1
879109860Sjake	bgt,a,pn %xcc, 1f
880109860Sjake	 nop
881109860Sjake
882109860Sjake	/*
88391224Sjake	 * Reload the tag access register.
88482005Sjake	 */
88591224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
88681180Sjake
88781180Sjake	/*
88891224Sjake	 * Save the tag access register and call common trap code.
88981180Sjake	 */
89088644Sjake	tl0_split
891108374Sjake	clr	%o1
892103921Sjake	set	trap, %o2
89391224Sjake	mov	%g2, %o3
894114257Sjake	ba	%xcc, tl0_utrap
89588644Sjake	 mov	T_DATA_MISS, %o0
896109860Sjake
897109860Sjake	/*
898109860Sjake	 * Handle faults during window spill/fill.
899109860Sjake	 */
900109860Sjake1:	RESUME_SPILLFILL_MMU
901109860Sjake
902109860Sjake	/*
903109860Sjake	 * Reload the tag access register.
904109860Sjake	 */
905109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
906109860Sjake
907109860Sjake	tl1_split
908109860Sjake	clr	%o1
909109860Sjake	set	trap, %o2
910109860Sjake	mov	%g2, %o3
911116589Sjake	ba	%xcc, tl1_trap
912109860Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
91382906SjakeEND(tl0_dmmu_miss_trap)
91481180Sjake
915109860Sjake	.macro	tl0_dmmu_prot
916109860Sjake	ba,a	%xcc, tl0_dmmu_prot_1
917109860Sjake	 nop
918109860Sjake	.align	128
919109860Sjake	.endm
920109860Sjake
921109860SjakeENTRY(tl0_dmmu_prot_1)
92288644Sjake	/*
923109860Sjake	 * Load the virtual page number and context from the tag access
924109860Sjake	 * register.  We ignore the context.
925109860Sjake	 */
926109860Sjake	wr	%g0, ASI_DMMU, %asi
927109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
928109860Sjake
929109860Sjake	/*
930102040Sjake	 * Initialize the page size walker.
931102040Sjake	 */
932109860Sjaketl1_dmmu_prot_user:
933102040Sjake	mov	TS_MIN, %g2
934102040Sjake
935102040Sjake	/*
936102040Sjake	 * Loop over all supported page sizes.
937102040Sjake	 */
938102040Sjake
939102040Sjake	/*
940102040Sjake	 * Compute the page shift for the page size we are currently looking
941102040Sjake	 * for.
942102040Sjake	 */
943102040Sjake1:	add	%g2, %g2, %g3
944102040Sjake	add	%g3, %g2, %g3
945102040Sjake	add	%g3, PAGE_SHIFT, %g3
946102040Sjake
947102040Sjake	/*
94891224Sjake	 * Extract the virtual page number from the contents of the tag
94991224Sjake	 * access register.
95091224Sjake	 */
951102040Sjake	srlx	%g1, %g3, %g3
95291224Sjake
95391224Sjake	/*
95488644Sjake	 * Compute the tte bucket address.
95588644Sjake	 */
956102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
957102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
958102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
959102040Sjake	add	%g4, %g5, %g4
96088644Sjake
96188644Sjake	/*
962102040Sjake	 * Compute the tte tag target.
96388644Sjake	 */
964102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
965102040Sjake	or	%g3, %g2, %g3
96688644Sjake
96788644Sjake	/*
968102040Sjake	 * Loop over the ttes in this bucket
96988644Sjake	 */
97088644Sjake
97188644Sjake	/*
972102040Sjake	 * Load the tte.  Note that this instruction may fault, clobbering
973102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
974102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
975102040Sjake	 * completes successfully.
97688644Sjake	 */
977102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
97888644Sjake
97988644Sjake	/*
98091224Sjake	 * Check that its valid and writable and that the virtual page
98191224Sjake	 * numbers match.
98288644Sjake	 */
983102040Sjake	brgez,pn %g7, 4f
984102040Sjake	 andcc	%g7, TD_SW, %g0
985102040Sjake	bz,pn	%xcc, 4f
986102040Sjake	 cmp	%g3, %g6
987102040Sjake	bne,pn	%xcc, 4f
98888644Sjake	 nop
98988644Sjake
99091224Sjake	/*
99191224Sjake	 * Set the hardware write bit.
99291224Sjake	 */
993102040Sjake	TTE_SET_W(%g4, %g2, %g3)
99488644Sjake
99588644Sjake	/*
996102040Sjake	 * Delete the old TLB entry and clear the sfsr.
99788644Sjake	 */
998102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
999102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
1000102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1001102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1002102040Sjake	membar	#Sync
100388644Sjake
100481180Sjake	/*
1005102040Sjake	 * May have become invalid during casxa, in which case start over.
100688644Sjake	 */
1007102040Sjake	brgez,pn %g2, 3f
1008102040Sjake	 or	%g2, TD_W, %g2
100988644Sjake
101088644Sjake	/*
1011102040Sjake	 * Load the tte data into the tlb and retry the instruction.
101296207Sjake	 */
1013102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1014102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
1015102040Sjake3:	retry
101696207Sjake
101796207Sjake	/*
1018102040Sjake	 * Check the low bits to see if we've finished the bucket.
101988644Sjake	 */
1020102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
1021102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1022102040Sjake	bnz,pt	%xcc, 2b
1023102040Sjake	 EMPTY
102488644Sjake
102588644Sjake	/*
1026102040Sjake	 * See if we just checked the largest page size, and advance to the
1027102040Sjake	 * next one if not.
102888644Sjake	 */
1029102040Sjake	 cmp	%g2, TS_MAX
1030102040Sjake	bne,pt	%xcc, 1b
1031102040Sjake	 add	%g2, 1, %g2
1032102040Sjake
103388644Sjake	/*
1034102040Sjake	 * Not in user tsb, call c code.
103591224Sjake	 */
1036116589Sjake	ba,a	%xcc, tl0_dmmu_prot_trap
1037102040Sjake	 nop
1038102040SjakeEND(tl0_dmmu_prot_1)
103991224Sjake
104088644SjakeENTRY(tl0_dmmu_prot_trap)
104188644Sjake	/*
104296207Sjake	 * Put back the contents of the tag access register, in case we
104396207Sjake	 * faulted.
104496207Sjake	 */
1045102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
104696207Sjake	membar	#Sync
104796207Sjake
104896207Sjake	/*
104982906Sjake	 * Switch to alternate globals.
105081180Sjake	 */
105182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
105281180Sjake
105381180Sjake	/*
1054109860Sjake	 * Check if we actually came from the kernel.
1055109860Sjake	 */
1056109860Sjake	rdpr	%tl, %g1
1057109860Sjake	cmp	%g1, 1
1058109860Sjake	bgt,a,pn %xcc, 1f
1059109860Sjake	 nop
1060109860Sjake
1061109860Sjake	/*
106282005Sjake	 * Load the tar, sfar and sfsr.
106382005Sjake	 */
106488644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
106588644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
106688644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
106785243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
106882005Sjake	membar	#Sync
106982005Sjake
107082005Sjake	/*
107191224Sjake	 * Save the mmu registers and call common trap code.
107282005Sjake	 */
107388644Sjake	tl0_split
1074108374Sjake	clr	%o1
1075103921Sjake	set	trap, %o2
107688644Sjake	mov	%g2, %o3
107788644Sjake	mov	%g3, %o4
107888644Sjake	mov	%g4, %o5
1079103897Sjake	ba	%xcc, tl0_utrap
108088644Sjake	 mov	T_DATA_PROTECTION, %o0
1081109860Sjake
1082109860Sjake	/*
1083109860Sjake	 * Handle faults during window spill/fill.
1084109860Sjake	 */
1085109860Sjake1:	RESUME_SPILLFILL_MMU_CLR_SFSR
1086109860Sjake
1087109860Sjake	/*
1088109860Sjake	 * Load the sfar, sfsr and tar.  Clear the sfsr.
1089109860Sjake	 */
1090109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1091109860Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1092109860Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1093109860Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1094109860Sjake	membar	#Sync
1095109860Sjake
1096109860Sjake	tl1_split
1097109860Sjake	clr	%o1
1098109860Sjake	set	trap, %o2
1099109860Sjake	mov	%g2, %o3
1100109860Sjake	mov	%g3, %o4
1101109860Sjake	mov	%g4, %o5
1102116589Sjake	ba	%xcc, tl1_trap
1103109860Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
110488644SjakeEND(tl0_dmmu_prot_trap)
110581180Sjake
110680709Sjake	.macro	tl0_spill_0_n
110791246Sjake	wr	%g0, ASI_AIUP, %asi
110891246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
110980709Sjake	saved
111080709Sjake	retry
111182906Sjake	.align	32
111282906Sjake	RSF_TRAP(T_SPILL)
111382906Sjake	RSF_TRAP(T_SPILL)
111480709Sjake	.endm
111580709Sjake
111682906Sjake	.macro	tl0_spill_1_n
111791246Sjake	wr	%g0, ASI_AIUP, %asi
111882906Sjake	SPILL(stwa, %sp, 4, %asi)
111982906Sjake	saved
112082906Sjake	retry
112182906Sjake	.align	32
112282906Sjake	RSF_TRAP(T_SPILL)
112382906Sjake	RSF_TRAP(T_SPILL)
112482906Sjake	.endm
112582005Sjake
112691246Sjake	.macro	tl0_fill_0_n
112782906Sjake	wr	%g0, ASI_AIUP, %asi
112891246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
112982906Sjake	restored
113082906Sjake	retry
113182906Sjake	.align	32
113282906Sjake	RSF_TRAP(T_FILL)
113382906Sjake	RSF_TRAP(T_FILL)
113480709Sjake	.endm
113580709Sjake
113682906Sjake	.macro	tl0_fill_1_n
113791246Sjake	wr	%g0, ASI_AIUP, %asi
113882906Sjake	FILL(lduwa, %sp, 4, %asi)
113982906Sjake	restored
114082906Sjake	retry
114182906Sjake	.align	32
114282906Sjake	RSF_TRAP(T_FILL)
114382906Sjake	RSF_TRAP(T_FILL)
114482906Sjake	.endm
114582906Sjake
114682906SjakeENTRY(tl0_sftrap)
114782906Sjake	rdpr	%tstate, %g1
114882906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
114982906Sjake	wrpr	%g1, 0, %cwp
115088644Sjake	tl0_split
1151108374Sjake	clr	%o1
1152103921Sjake	set	trap, %o2
1153116589Sjake	ba	%xcc, tl0_trap
115482906Sjake	 mov	%g2, %o0
115582906SjakeEND(tl0_sftrap)
115682906Sjake
115782906Sjake	.macro	tl0_spill_bad	count
115882906Sjake	.rept	\count
115988644Sjake	sir
116088644Sjake	.align	128
116182906Sjake	.endr
116282906Sjake	.endm
116382906Sjake
116480709Sjake	.macro	tl0_fill_bad	count
116580709Sjake	.rept	\count
116688644Sjake	sir
116788644Sjake	.align	128
116880709Sjake	.endr
116980709Sjake	.endm
117080709Sjake
117184186Sjake	.macro	tl0_syscall
117288644Sjake	tl0_split
1173108374Sjake	clr	%o1
1174103921Sjake	set	syscall, %o2
1175103921Sjake	ba	%xcc, tl0_trap
117684186Sjake	 mov	T_SYSCALL, %o0
117788784Sjake	.align	32
117884186Sjake	.endm
117984186Sjake
1180112920Sjake	.macro	tl0_fp_restore
1181112920Sjake	ba,a	%xcc, tl0_fp_restore
1182112920Sjake	 nop
1183112920Sjake	.align	32
1184112920Sjake	.endm
1185112920Sjake
1186112920SjakeENTRY(tl0_fp_restore)
1187112924Sjake	ldx	[PCB_REG + PCB_FLAGS], %g1
1188112924Sjake	andn	%g1, PCB_FEF, %g1
1189112924Sjake	stx	%g1, [PCB_REG + PCB_FLAGS]
1190112924Sjake
1191112920Sjake	wr	%g0, FPRS_FEF, %fprs
1192112920Sjake	wr	%g0, ASI_BLK_S, %asi
1193112920Sjake	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1194112920Sjake	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1195112920Sjake	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1196112920Sjake	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1197112920Sjake	membar	#Sync
1198112920Sjake	done
1199112920SjakeEND(tl0_fp_restore)
1200112920Sjake
120180709Sjake	.macro	tl1_insn_excptn
1202101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
120388644Sjake	wr	%g0, ASI_IMMU, %asi
120488644Sjake	rdpr	%tpc, %g3
120588644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
120688644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
120788644Sjake	membar	#Sync
1208116589Sjake	ba	%xcc, tl1_insn_exceptn_trap
120988644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
121080709Sjake	.align	32
121180709Sjake	.endm
121280709Sjake
121388644SjakeENTRY(tl1_insn_exceptn_trap)
121491246Sjake	tl1_split
1215103921Sjake	clr	%o1
1216103921Sjake	set	trap, %o2
121788644Sjake	mov	%g3, %o4
121888644Sjake	mov	%g4, %o5
1219116589Sjake	ba	%xcc, tl1_trap
122088644Sjake	 mov	%g2, %o0
122188644SjakeEND(tl1_insn_exceptn_trap)
122288644Sjake
1223113024Sjake	.macro	tl1_fp_disabled
1224113024Sjake	ba,a	%xcc, tl1_fp_disabled_1
1225113024Sjake	 nop
1226113024Sjake	.align	32
1227113024Sjake	.endm
1228113024Sjake
1229113024SjakeENTRY(tl1_fp_disabled_1)
1230113024Sjake	rdpr	%tpc, %g1
1231113024Sjake	set	fpu_fault_begin, %g2
1232113024Sjake	sub	%g1, %g2, %g1
1233113024Sjake	cmp	%g1, fpu_fault_size
1234113024Sjake	bgeu,a,pn %xcc, 1f
1235113024Sjake	 nop
1236113024Sjake
1237113024Sjake	wr	%g0, FPRS_FEF, %fprs
1238113024Sjake	wr	%g0, ASI_BLK_S, %asi
1239113024Sjake	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1240113024Sjake	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1241113024Sjake	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1242113024Sjake	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1243113024Sjake	membar	#Sync
1244113024Sjake	retry
1245113024Sjake
1246113024Sjake1:	tl1_split
1247113024Sjake	clr	%o1
1248113024Sjake	set	trap, %o2
1249113024Sjake	ba	%xcc, tl1_trap
1250113024Sjake	 mov	T_FP_DISABLED | T_KERNEL, %o0
1251113024SjakeEND(tl1_fp_disabled_1)
1252113024Sjake
125382005Sjake	.macro	tl1_data_excptn
1254101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
1255116589Sjake	ba,a	%xcc, tl1_data_excptn_trap
125682906Sjake	 nop
125782005Sjake	.align	32
125882005Sjake	.endm
125982005Sjake
126088644SjakeENTRY(tl1_data_excptn_trap)
126188644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
1262116589Sjake	ba	%xcc, tl1_sfsr_trap
126388644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
126488644SjakeEND(tl1_data_excptn_trap)
126582906Sjake
126680709Sjake	.macro	tl1_align
1267116589Sjake	ba,a	%xcc, tl1_align_trap
126888644Sjake	 nop
126980709Sjake	.align	32
127080709Sjake	.endm
127180709Sjake
127282906SjakeENTRY(tl1_align_trap)
127388644Sjake	RESUME_SPILLFILL_ALIGN
1274116589Sjake	ba	%xcc, tl1_sfsr_trap
127588644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
127688644SjakeEND(tl1_data_excptn_trap)
127782906Sjake
127880709SjakeENTRY(tl1_sfsr_trap)
127988644Sjake	wr	%g0, ASI_DMMU, %asi
128088644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
128188644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
128280709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
128380709Sjake	membar	#Sync
128482005Sjake
128591246Sjake	tl1_split
1286103921Sjake	clr	%o1
1287103921Sjake	set	trap, %o2
128888644Sjake	mov	%g3, %o4
128988644Sjake	mov	%g4, %o5
1290116589Sjake	ba	%xcc, tl1_trap
129188644Sjake	 mov	%g2, %o0
129288644SjakeEND(tl1_sfsr_trap)
129380709Sjake
129484186Sjake	.macro	tl1_intr level, mask
129591246Sjake	tl1_split
129691246Sjake	set	\mask, %o1
1297116589Sjake	ba	%xcc, tl1_intr
129891246Sjake	 mov	\level, %o0
129981380Sjake	.align	32
130081380Sjake	.endm
130181380Sjake
130280709Sjake	.macro	tl1_intr_level
130381380Sjake	INTR_LEVEL(1)
130480709Sjake	.endm
130580709Sjake
130680709Sjake	.macro	tl1_immu_miss
130791224Sjake	/*
130891224Sjake	 * Load the context and the virtual page number from the tag access
130991224Sjake	 * register.  We ignore the context.
131091224Sjake	 */
131191224Sjake	wr	%g0, ASI_IMMU, %asi
1312102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
131385585Sjake
131491224Sjake	/*
1315102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1316102040Sjake	 * tsb are patched at startup.
131791224Sjake	 */
1318102040Sjake	.globl	tl1_immu_miss_patch_1
1319102040Sjaketl1_immu_miss_patch_1:
1320102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1321102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1322102040Sjake	sethi	%hi(TSB_KERNEL), %g7
132385585Sjake
1324102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1325102040Sjake	and	%g5, %g6, %g6
1326102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1327102040Sjake	add	%g6, %g7, %g6
132885585Sjake
132985585Sjake	/*
133091224Sjake	 * Load the tte.
133191224Sjake	 */
1332102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
133391224Sjake
133491224Sjake	/*
133591224Sjake	 * Check that its valid and executable and that the virtual page
133691224Sjake	 * numbers match.
133791224Sjake	 */
1338102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1339102040Sjake	 andcc	%g7, TD_EXEC, %g0
134091224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1341102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1342102040Sjake	cmp	%g5, %g6
134391224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
134485585Sjake	 EMPTY
134585585Sjake
134685585Sjake	/*
134791224Sjake	 * Set the reference bit if its currently clear.
134885585Sjake	 */
1349102040Sjake	 andcc	%g7, TD_REF, %g0
1350102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
135191224Sjake	 nop
135285585Sjake
135391224Sjake	/*
1354102040Sjake	 * Load the tte data into the TLB and retry the instruction.
135591224Sjake	 */
1356102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1357102040Sjake	retry
1358102040Sjake	.align	128
1359102040Sjake	.endm
136088644Sjake
1361102040SjakeENTRY(tl1_immu_miss_set_ref)
136285585Sjake	/*
1363102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1364102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1365102040Sjake	 */
1366102040Sjake	.globl	tl1_immu_miss_patch_2
1367102040Sjaketl1_immu_miss_patch_2:
1368102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1369102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1370102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1371102040Sjake
1372102040Sjake	and	%g5, %g6, %g5
1373102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1374102040Sjake	add	%g5, %g7, %g5
1375102040Sjake
1376102040Sjake	/*
1377102040Sjake	 * Set the reference bit.
1378102040Sjake	 */
1379102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1380102040Sjake
1381102040Sjake	/*
1382102040Sjake	 * May have become invalid during casxa, in which case start over.
1383102040Sjake	 */
1384102040Sjake	brgez,pn %g6, 1f
1385102040Sjake	 nop
1386102040Sjake
1387102040Sjake	/*
138885585Sjake	 * Load the tte data into the TLB and retry the instruction.
138985585Sjake	 */
1390102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1391102040Sjake1:	retry
1392102040SjakeEND(tl1_immu_miss_set_ref)
139385585Sjake
139491224SjakeENTRY(tl1_immu_miss_trap)
139585585Sjake	/*
139685585Sjake	 * Switch to alternate globals.
139785585Sjake	 */
139891224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
139985585Sjake
140091224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
140185585Sjake
140291246Sjake	tl1_split
1403103921Sjake	clr	%o1
1404103921Sjake	set	trap, %o2
140591224Sjake	mov	%g2, %o3
1406116589Sjake	ba	%xcc, tl1_trap
140788644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
140891224SjakeEND(tl1_immu_miss_trap)
140991224Sjake
141091224Sjake	.macro	tl1_dmmu_miss
141191224Sjake	/*
141291224Sjake	 * Load the context and the virtual page number from the tag access
141391224Sjake	 * register.
141491224Sjake	 */
141591224Sjake	wr	%g0, ASI_DMMU, %asi
1416102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
141780709Sjake
141891224Sjake	/*
141991224Sjake	 * Extract the context from the contents of the tag access register.
1420100771Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1421108195Sjake	 * faulting address is passed in %g1.
142291224Sjake	 */
1423102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1424102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1425102040Sjake	 mov	%g5, %g1
142680709Sjake
142791224Sjake	/*
1428100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1429100771Sjake	 * the high bit set so they are negative.
1430100771Sjake	 */
1431102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1432100771Sjake	 EMPTY
1433100771Sjake
1434100771Sjake	/*
1435102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1436102040Sjake	 * tsb are patched at startup.
143791224Sjake	 */
1438102040Sjake	.globl	tl1_dmmu_miss_patch_1
1439102040Sjaketl1_dmmu_miss_patch_1:
1440102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1441102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1442102040Sjake	sethi	%hi(TSB_KERNEL), %g7
144384186Sjake
1444102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1445102040Sjake	and	%g5, %g6, %g6
1446102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1447102040Sjake	add	%g6, %g7, %g6
144891224Sjake
144991224Sjake	/*
145091224Sjake	 * Load the tte.
145191224Sjake	 */
1452102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
145391224Sjake
145491224Sjake	/*
145591224Sjake	 * Check that its valid and that the virtual page numbers match.
145691224Sjake	 */
1457102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1458102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1459102040Sjake	cmp	%g5, %g6
146091224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
146180709Sjake	 EMPTY
146280709Sjake
146380709Sjake	/*
146491224Sjake	 * Set the reference bit if its currently clear.
146580709Sjake	 */
1466102040Sjake	 andcc	%g7, TD_REF, %g0
1467102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
146891224Sjake	 nop
146980709Sjake
147091224Sjake	/*
1471102040Sjake	 * Load the tte data into the TLB and retry the instruction.
147291224Sjake	 */
1473102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1474102040Sjake	retry
1475102040Sjake	.align	128
1476102040Sjake	.endm
147788644Sjake
1478102040SjakeENTRY(tl1_dmmu_miss_set_ref)
147980709Sjake	/*
1480102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1481102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1482102040Sjake	 */
1483102040Sjake	.globl	tl1_dmmu_miss_patch_2
1484102040Sjaketl1_dmmu_miss_patch_2:
1485102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1486102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1487102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1488102040Sjake
1489102040Sjake	and	%g5, %g6, %g5
1490102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1491102040Sjake	add	%g5, %g7, %g5
1492102040Sjake
1493102040Sjake	/*
1494102040Sjake	 * Set the reference bit.
1495102040Sjake	 */
1496102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1497102040Sjake
1498102040Sjake	/*
1499102040Sjake	 * May have become invalid during casxa, in which case start over.
1500102040Sjake	 */
1501102040Sjake	brgez,pn %g6, 1f
1502102040Sjake	 nop
1503102040Sjake
1504102040Sjake	/*
150582906Sjake	 * Load the tte data into the TLB and retry the instruction.
150680709Sjake	 */
1507102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1508102040Sjake1:	retry
1509102040SjakeEND(tl1_dmmu_miss_set_ref)
151080709Sjake
151191224SjakeENTRY(tl1_dmmu_miss_trap)
151280709Sjake	/*
151382906Sjake	 * Switch to alternate globals.
151480709Sjake	 */
151591224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
151680709Sjake
1517108195Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1518108195Sjake
151988781Sjake	KSTACK_CHECK
152088781Sjake
152191246Sjake	tl1_split
1522103921Sjake	clr	%o1
1523103921Sjake	set	trap, %o2
152491224Sjake	mov	%g2, %o3
1525116589Sjake	ba	%xcc, tl1_trap
152688644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
152788781SjakeEND(tl1_dmmu_miss_trap)
152880709Sjake
1529100771SjakeENTRY(tl1_dmmu_miss_direct)
1530100771Sjake	/*
1531100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1532108245Sjake	 * address, and or in the tte bits.  The virtual address bits that
1533108245Sjake	 * correspond to the tte valid and page size bits are left set, so
1534108245Sjake	 * they don't have to be included in the tte bits below.  We know they
1535108245Sjake	 * are set because the virtual address is in the upper va hole.
1536100771Sjake	 */
1537108245Sjake	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1538108245Sjake	and	%g5, %g6, %g5
1539108245Sjake	or	%g5, TD_CP | TD_CV | TD_W, %g5
1540100771Sjake
1541100771Sjake	/*
1542100771Sjake	 * Load the tte data into the TLB and retry the instruction.
1543100771Sjake	 */
1544102040Sjake	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1545100771Sjake	retry
1546100771SjakeEND(tl1_dmmu_miss_direct)
1547100771Sjake
154882906Sjake	.macro	tl1_dmmu_prot
1549102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1550102040Sjake	 nop
1551102040Sjake	.align	128
1552102040Sjake	.endm
1553102040Sjake
1554102040SjakeENTRY(tl1_dmmu_prot_1)
155591224Sjake	/*
155691224Sjake	 * Load the context and the virtual page number from the tag access
155791224Sjake	 * register.
155891224Sjake	 */
155991224Sjake	wr	%g0, ASI_DMMU, %asi
1560102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
156188644Sjake
156291224Sjake	/*
156391224Sjake	 * Extract the context from the contents of the tag access register.
1564108195Sjake	 * If its non-zero this is a fault on a user address.  Note that the
1565108195Sjake	 * faulting address is passed in %g1.
156691224Sjake	 */
1567102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1568102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1569102040Sjake	 mov	%g5, %g1
157088644Sjake
157191224Sjake	/*
1572102040Sjake	 * Compute the address of the tte.  The tsb mask and address of the
1573102040Sjake	 * tsb are patched at startup.
157491224Sjake	 */
1575102040Sjake	.globl	tl1_dmmu_prot_patch_1
1576102040Sjaketl1_dmmu_prot_patch_1:
1577102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1578102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1579102040Sjake	sethi	%hi(TSB_KERNEL), %g7
158088644Sjake
1581102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1582102040Sjake	and	%g5, %g6, %g6
1583102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1584102040Sjake	add	%g6, %g7, %g6
158591224Sjake
158691224Sjake	/*
158791224Sjake	 * Load the tte.
158891224Sjake	 */
1589102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
159091224Sjake
159191224Sjake	/*
159291224Sjake	 * Check that its valid and writeable and that the virtual page
159391224Sjake	 * numbers match.
159491224Sjake	 */
1595102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1596102040Sjake	 andcc	%g7, TD_SW, %g0
159791224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1598102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1599102040Sjake	cmp	%g5, %g6
160091224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
160188644Sjake	 EMPTY
160288644Sjake
160388644Sjake	/*
160491224Sjake	 * Delete the old TLB entry and clear the sfsr.
160588644Sjake	 */
1606102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
160791224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
160891224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
160981180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
161091224Sjake	membar	#Sync
161181180Sjake
1612102040Sjake	/*
1613102040Sjake	 * Recompute the tte address, which we clobbered loading the tte.  The
1614102040Sjake	 * tsb mask and address of the tsb are patched at startup.
1615102040Sjake	 */
1616102040Sjake	.globl	tl1_dmmu_prot_patch_2
1617102040Sjaketl1_dmmu_prot_patch_2:
1618102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1619102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1620102040Sjake	sethi	%hi(TSB_KERNEL), %g7
162196207Sjake
1622102040Sjake	and	%g5, %g6, %g5
1623102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1624102040Sjake	add	%g5, %g7, %g5
1625102040Sjake
162681180Sjake	/*
162791224Sjake	 * Set the hardware write bit.
162891224Sjake	 */
1629102040Sjake	TTE_SET_W(%g5, %g6, %g7)
163091224Sjake
163191224Sjake	/*
1632102040Sjake	 * May have become invalid during casxa, in which case start over.
1633102040Sjake	 */
1634102040Sjake	brgez,pn %g6, 1f
1635102040Sjake	 or	%g6, TD_W, %g6
1636102040Sjake
1637102040Sjake	/*
163888644Sjake	 * Load the tte data into the TLB and retry the instruction.
163988644Sjake	 */
1640102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1641102040Sjake1:	retry
1642102040SjakeEND(tl1_dmmu_prot_1)
164388644Sjake
164488644SjakeENTRY(tl1_dmmu_prot_trap)
164581180Sjake	/*
164691224Sjake	 * Switch to alternate globals.
164791224Sjake	 */
164891224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
164991224Sjake
165091224Sjake	/*
165181180Sjake	 * Load the sfar, sfsr and tar.  Clear the sfsr.
165281180Sjake	 */
165388644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
165488644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
165588644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
165681180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
165781180Sjake	membar	#Sync
165881180Sjake
165991246Sjake	tl1_split
1660103921Sjake	clr	%o1
1661103921Sjake	set	trap, %o2
166288644Sjake	mov	%g2, %o3
166388644Sjake	mov	%g3, %o4
166488644Sjake	mov	%g4, %o5
1665116589Sjake	ba	%xcc, tl1_trap
166688644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
166788644SjakeEND(tl1_dmmu_prot_trap)
166881180Sjake
166980709Sjake	.macro	tl1_spill_0_n
167082906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
167180709Sjake	saved
167280709Sjake	retry
167382906Sjake	.align	32
167482906Sjake	RSF_FATAL(T_SPILL)
167582906Sjake	RSF_FATAL(T_SPILL)
167680709Sjake	.endm
167780709Sjake
167891246Sjake	.macro	tl1_spill_2_n
167991246Sjake	wr	%g0, ASI_AIUP, %asi
168091246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
168182906Sjake	saved
168282906Sjake	retry
168382906Sjake	.align	32
168482906Sjake	RSF_SPILL_TOPCB
168582906Sjake	RSF_SPILL_TOPCB
168681380Sjake	.endm
168781380Sjake
168891246Sjake	.macro	tl1_spill_3_n
168991246Sjake	wr	%g0, ASI_AIUP, %asi
169092200Sjake	SPILL(stwa, %sp, 4, %asi)
169182906Sjake	saved
169282906Sjake	retry
169382906Sjake	.align	32
169482906Sjake	RSF_SPILL_TOPCB
169582906Sjake	RSF_SPILL_TOPCB
169682906Sjake	.endm
169782906Sjake
169891246Sjake	.macro	tl1_spill_0_o
169982906Sjake	wr	%g0, ASI_AIUP, %asi
170082906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
170182906Sjake	saved
170282906Sjake	retry
170382906Sjake	.align	32
170482906Sjake	RSF_SPILL_TOPCB
170582906Sjake	RSF_SPILL_TOPCB
170682906Sjake	.endm
170782906Sjake
170882906Sjake	.macro	tl1_spill_1_o
170991246Sjake	wr	%g0, ASI_AIUP, %asi
171082906Sjake	SPILL(stwa, %sp, 4, %asi)
171182005Sjake	saved
171282005Sjake	retry
171382906Sjake	.align	32
171482906Sjake	RSF_SPILL_TOPCB
171582906Sjake	RSF_SPILL_TOPCB
171682906Sjake	.endm
171782005Sjake
171882906Sjake	.macro	tl1_spill_2_o
171982906Sjake	RSF_SPILL_TOPCB
172091246Sjake	.align	128
172180709Sjake	.endm
172280709Sjake
172380709Sjake	.macro	tl1_fill_0_n
172482906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
172580709Sjake	restored
172680709Sjake	retry
172782906Sjake	.align	32
172882906Sjake	RSF_FATAL(T_FILL)
172982906Sjake	RSF_FATAL(T_FILL)
173080709Sjake	.endm
173180709Sjake
173291246Sjake	.macro	tl1_fill_2_n
173382906Sjake	wr	%g0, ASI_AIUP, %asi
173482906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
173582906Sjake	restored
173682906Sjake	retry
173782906Sjake	.align 32
173882906Sjake	RSF_FILL_MAGIC
173991246Sjake	RSF_FILL_MAGIC
174082906Sjake	.endm
174182906Sjake
174291246Sjake	.macro	tl1_fill_3_n
174382906Sjake	wr	%g0, ASI_AIUP, %asi
174482906Sjake	FILL(lduwa, %sp, 4, %asi)
174582906Sjake	restored
174682906Sjake	retry
174782906Sjake	.align 32
174882906Sjake	RSF_FILL_MAGIC
174991246Sjake	RSF_FILL_MAGIC
175082906Sjake	.endm
175182906Sjake
175282005Sjake/*
175382906Sjake * This is used to spill windows that are still occupied with user
175482906Sjake * data on kernel entry to the pcb.
175582005Sjake */
175682906SjakeENTRY(tl1_spill_topcb)
175782906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
175882906Sjake
175982005Sjake	/* Free some globals for our use. */
176088644Sjake	dec	24, ASP_REG
176188644Sjake	stx	%g1, [ASP_REG + 0]
176288644Sjake	stx	%g2, [ASP_REG + 8]
176388644Sjake	stx	%g3, [ASP_REG + 16]
176482906Sjake
176588644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
176682906Sjake
176788644Sjake	sllx	%g1, PTR_SHIFT, %g2
176888644Sjake	add	%g2, PCB_REG, %g2
176988644Sjake	stx	%sp, [%g2 + PCB_RWSP]
177082906Sjake
177188644Sjake	sllx	%g1, RW_SHIFT, %g2
177288644Sjake	add	%g2, PCB_REG, %g2
177388644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
177482906Sjake
177588644Sjake	inc	%g1
177688644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
177782906Sjake
177885243Sjake#if KTR_COMPILE & KTR_TRAP
177988785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
178082906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
178182906Sjake	rdpr	%tpc, %g2
178282906Sjake	stx	%g2, [%g1 + KTR_PARM1]
178388785Sjake	rdpr	%tnpc, %g2
178488785Sjake	stx	%g2, [%g1 + KTR_PARM2]
178588785Sjake	stx	%sp, [%g1 + KTR_PARM3]
178688644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
178788785Sjake	stx	%g2, [%g1 + KTR_PARM4]
178882906Sjake9:
178982906Sjake#endif
179082906Sjake
179182906Sjake	saved
179282906Sjake
179388644Sjake	ldx	[ASP_REG + 16], %g3
179488644Sjake	ldx	[ASP_REG + 8], %g2
179588644Sjake	ldx	[ASP_REG + 0], %g1
179688644Sjake	inc	24, ASP_REG
179782005Sjake	retry
179882906SjakeEND(tl1_spill_topcb)
179982005Sjake
180082906Sjake	.macro	tl1_spill_bad	count
180182906Sjake	.rept	\count
180288644Sjake	sir
180388644Sjake	.align	128
180482906Sjake	.endr
180582906Sjake	.endm
180682906Sjake
180780709Sjake	.macro	tl1_fill_bad	count
180880709Sjake	.rept	\count
180988644Sjake	sir
181088644Sjake	.align	128
181180709Sjake	.endr
181280709Sjake	.endm
181380709Sjake
181480709Sjake	.macro	tl1_soft	count
181582906Sjake	.rept	\count
181682906Sjake	tl1_gen	T_SOFT | T_KERNEL
181782906Sjake	.endr
181880709Sjake	.endm
181980709Sjake
182080709Sjake	.sect	.trap
182180709Sjake	.align	0x8000
182280709Sjake	.globl	tl0_base
182380709Sjake
182480709Sjaketl0_base:
182588779Sjake	tl0_reserved	8				! 0x0-0x7
182680709Sjaketl0_insn_excptn:
182788779Sjake	tl0_insn_excptn					! 0x8
182888779Sjake	tl0_reserved	1				! 0x9
182980709Sjaketl0_insn_error:
183088779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
183188779Sjake	tl0_reserved	5				! 0xb-0xf
183280709Sjaketl0_insn_illegal:
183388779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
183480709Sjaketl0_priv_opcode:
183588779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
183688779Sjake	tl0_reserved	14				! 0x12-0x1f
183780709Sjaketl0_fp_disabled:
183888779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
183980709Sjaketl0_fp_ieee:
184088779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
184180709Sjaketl0_fp_other:
184288779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
184380709Sjaketl0_tag_ovflw:
184488779Sjake	tl0_gen		T_TAG_OFERFLOW			! 0x23
184580709Sjaketl0_clean_window:
184688779Sjake	clean_window					! 0x24
184780709Sjaketl0_divide:
184888779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
184988779Sjake	tl0_reserved	7				! 0x29-0x2f
185080709Sjaketl0_data_excptn:
185188779Sjake	tl0_data_excptn					! 0x30
185288779Sjake	tl0_reserved	1				! 0x31
185380709Sjaketl0_data_error:
185488779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
185588779Sjake	tl0_reserved	1				! 0x33
185680709Sjaketl0_align:
185788779Sjake	tl0_align					! 0x34
185880709Sjaketl0_align_lddf:
185988779Sjake	tl0_gen		T_RESERVED			! 0x35
186080709Sjaketl0_align_stdf:
186188779Sjake	tl0_gen		T_RESERVED			! 0x36
186280709Sjaketl0_priv_action:
186388779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
186488779Sjake	tl0_reserved	9				! 0x38-0x40
186580709Sjaketl0_intr_level:
186688779Sjake	tl0_intr_level					! 0x41-0x4f
186788779Sjake	tl0_reserved	16				! 0x50-0x5f
186880709Sjaketl0_intr_vector:
186997265Sjake	intr_vector					! 0x60
187080709Sjaketl0_watch_phys:
187188779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
187280709Sjaketl0_watch_virt:
187388779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
187480709Sjaketl0_ecc:
187588779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
187680709Sjaketl0_immu_miss:
187788779Sjake	tl0_immu_miss					! 0x64
187880709Sjaketl0_dmmu_miss:
187988779Sjake	tl0_dmmu_miss					! 0x68
188080709Sjaketl0_dmmu_prot:
188188779Sjake	tl0_dmmu_prot					! 0x6c
188288779Sjake	tl0_reserved	16				! 0x70-0x7f
188380709Sjaketl0_spill_0_n:
188488779Sjake	tl0_spill_0_n					! 0x80
188582906Sjaketl0_spill_1_n:
188688779Sjake	tl0_spill_1_n					! 0x84
188791246Sjake	tl0_spill_bad	14				! 0x88-0xbf
188880709Sjaketl0_fill_0_n:
188988779Sjake	tl0_fill_0_n					! 0xc0
189082906Sjaketl0_fill_1_n:
189188779Sjake	tl0_fill_1_n					! 0xc4
189291246Sjake	tl0_fill_bad	14				! 0xc8-0xff
189388644Sjaketl0_soft:
1894106050Sjake	tl0_gen		T_SYSCALL			! 0x100
189588779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
189688779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
189788779Sjake	tl0_reserved	1				! 0x103
189888779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
189988779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
190088779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
190188779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
1902106050Sjake	tl0_gen		T_SYSCALL			! 0x108
1903106050Sjake#ifdef COMPAT_FREEBSD4
190488779Sjake	tl0_syscall					! 0x109
1905106050Sjake#else
1906106050Sjake	tl0_gen		T_SYSCALL			! 0x109
1907106050Sjake#endif
190888779Sjake	tl0_fp_restore					! 0x10a
190988779Sjake	tl0_reserved	5				! 0x10b-0x10f
191088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
191188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
191288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
191388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
191488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
191588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
191688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
191788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
191888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
191988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
192088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
192188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
192288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
192388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
192488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
192588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
1926106050Sjake	tl0_reserved	32				! 0x120-0x13f
1927106050Sjake	tl0_gen		T_SYSCALL			! 0x140
1928106050Sjake	tl0_syscall					! 0x141
1929106050Sjake	tl0_gen		T_SYSCALL			! 0x142
1930106050Sjake	tl0_gen		T_SYSCALL			! 0x143
1931106050Sjake	tl0_reserved	188				! 0x144-0x1ff
193280709Sjake
193380709Sjaketl1_base:
193488779Sjake	tl1_reserved	8				! 0x200-0x207
193580709Sjaketl1_insn_excptn:
193688779Sjake	tl1_insn_excptn					! 0x208
193788779Sjake	tl1_reserved	1				! 0x209
193880709Sjaketl1_insn_error:
193988779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
194088779Sjake	tl1_reserved	5				! 0x20b-0x20f
194180709Sjaketl1_insn_illegal:
194288779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
194380709Sjaketl1_priv_opcode:
194488779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
194588779Sjake	tl1_reserved	14				! 0x212-0x21f
194680709Sjaketl1_fp_disabled:
1947113024Sjake	tl1_fp_disabled					! 0x220
194880709Sjaketl1_fp_ieee:
194988779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
195080709Sjaketl1_fp_other:
195188779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
195280709Sjaketl1_tag_ovflw:
195388779Sjake	tl1_gen		T_TAG_OFERFLOW			! 0x223
195480709Sjaketl1_clean_window:
195588779Sjake	clean_window					! 0x224
195680709Sjaketl1_divide:
195788779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
195888779Sjake	tl1_reserved	7				! 0x229-0x22f
195980709Sjaketl1_data_excptn:
196088779Sjake	tl1_data_excptn					! 0x230
196188779Sjake	tl1_reserved	1				! 0x231
196280709Sjaketl1_data_error:
196388779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
196488779Sjake	tl1_reserved	1				! 0x233
196580709Sjaketl1_align:
196688779Sjake	tl1_align					! 0x234
196780709Sjaketl1_align_lddf:
196888779Sjake	tl1_gen		T_RESERVED			! 0x235
196980709Sjaketl1_align_stdf:
197088779Sjake	tl1_gen		T_RESERVED			! 0x236
197180709Sjaketl1_priv_action:
197288779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
197388779Sjake	tl1_reserved	9				! 0x238-0x240
197480709Sjaketl1_intr_level:
197588779Sjake	tl1_intr_level					! 0x241-0x24f
197688779Sjake	tl1_reserved	16				! 0x250-0x25f
197780709Sjaketl1_intr_vector:
197897265Sjake	intr_vector					! 0x260
197980709Sjaketl1_watch_phys:
198088779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
198180709Sjaketl1_watch_virt:
198288779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
198380709Sjaketl1_ecc:
198488779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
198580709Sjaketl1_immu_miss:
198688779Sjake	tl1_immu_miss					! 0x264
198780709Sjaketl1_dmmu_miss:
198888779Sjake	tl1_dmmu_miss					! 0x268
198980709Sjaketl1_dmmu_prot:
199088779Sjake	tl1_dmmu_prot					! 0x26c
199188779Sjake	tl1_reserved	16				! 0x270-0x27f
199280709Sjaketl1_spill_0_n:
199388779Sjake	tl1_spill_0_n					! 0x280
199491246Sjake	tl1_spill_bad	1				! 0x284
199591246Sjaketl1_spill_2_n:
199691246Sjake	tl1_spill_2_n					! 0x288
199791246Sjaketl1_spill_3_n:
199891246Sjake	tl1_spill_3_n					! 0x29c
199991246Sjake	tl1_spill_bad	4				! 0x290-0x29f
200081380Sjaketl1_spill_0_o:
200188779Sjake	tl1_spill_0_o					! 0x2a0
200282906Sjaketl1_spill_1_o:
200388779Sjake	tl1_spill_1_o					! 0x2a4
200482906Sjaketl1_spill_2_o:
200588779Sjake	tl1_spill_2_o					! 0x2a8
200691246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
200780709Sjaketl1_fill_0_n:
200888779Sjake	tl1_fill_0_n					! 0x2c0
200991246Sjake	tl1_fill_bad	1				! 0x2c4
201091246Sjaketl1_fill_2_n:
201191246Sjake	tl1_fill_2_n					! 0x2d0
201291246Sjaketl1_fill_3_n:
201391246Sjake	tl1_fill_3_n					! 0x2d4
201491246Sjake	tl1_fill_bad	12				! 0x2d8-0x2ff
201588779Sjake	tl1_reserved	1				! 0x300
201680709Sjaketl1_breakpoint:
201788779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
201888779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
201988779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
202088779Sjake	tl1_reserved	252				! 0x304-0x3ff
202180709Sjake
202281380Sjake/*
202382906Sjake * User trap entry point.
202482906Sjake *
2025103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2026103897Sjake *                u_long sfsr)
2027103897Sjake *
2028103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
2029103897Sjake * program must have first registered a trap handler with the kernel using
2030103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2031103897Sjake * for it to return to the trapping code directly, it will not return through
2032103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2033103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2034103897Sjake * parameters passed in out registers may be used by the user trap handler.
2035103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2036103897Sjake *
2037103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2038103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2039103897Sjake */
2040103897SjakeENTRY(tl0_utrap)
2041103897Sjake	/*
2042103897Sjake	 * Check if the trap type allows user traps.
2043103897Sjake	 */
2044103897Sjake	cmp	%o0, UT_MAX
2045103897Sjake	bge,a,pt %xcc, tl0_trap
2046103897Sjake	 nop
2047103897Sjake
2048103897Sjake	/*
2049103897Sjake	 * Load the user trap handler from the utrap table.
2050103897Sjake	 */
2051103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2052103897Sjake	ldx	[%l0 + TD_PROC], %l0
2053103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2054103897Sjake	brz,pt	%l0, tl0_trap
2055103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2056103897Sjake	ldx	[%l0 + %l1], %l0
2057103897Sjake	brz,a,pt %l0, tl0_trap
2058103897Sjake	 nop
2059103897Sjake
2060103897Sjake	/*
2061103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2062103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2063103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2064103897Sjake	 * not be able to find them, since the user trap handler returns
2065103897Sjake	 * directly to the trapping code.  Note that we only support precise
2066103897Sjake	 * user traps, which implies that the condition that caused the trap
2067103897Sjake	 * in the first place is still valid, so it will occur again when we
2068103897Sjake	 * re-execute the trapping instruction.
2069103897Sjake	 */
2070103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2071103897Sjake	brnz,a,pn %l1, tl0_trap
2072103897Sjake	 mov	T_SPILL, %o0
2073103897Sjake
2074103897Sjake	/*
2075103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2076103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2077103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2078103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2079103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2080103897Sjake	 * temporary stack for that.
2081103897Sjake	 */
2082103897Sjake	rd	%fprs, %l1
2083103897Sjake	or	%l1, FPRS_FEF, %l2
2084103897Sjake	wr	%l2, 0, %fprs
2085103897Sjake	dec	8, ASP_REG
2086103897Sjake	stx	%fsr, [ASP_REG]
2087103897Sjake	ldx	[ASP_REG], %l4
2088103897Sjake	inc	8, ASP_REG
2089103897Sjake	wr	%l1, 0, %fprs
2090103897Sjake
2091103897Sjake	rdpr	%tstate, %l5
2092103897Sjake	rdpr	%tpc, %l6
2093103897Sjake	rdpr	%tnpc, %l7
2094103897Sjake
2095103897Sjake	/*
2096103897Sjake	 * Setup %tnpc to return to.
2097103897Sjake	 */
2098103897Sjake	wrpr	%l0, 0, %tnpc
2099103897Sjake
2100103897Sjake	/*
2101103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2102103897Sjake	 */
2103103897Sjake	rdpr	%wstate, %l1
2104103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2105103897Sjake	wrpr	%l1, 0, %wstate
2106103897Sjake
2107103897Sjake	/*
2108103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2109103897Sjake	 * current window instead of the window at the time of the trap.
2110103897Sjake	 */
2111103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2112103897Sjake	rdpr	%cwp, %l2
2113103897Sjake	wrpr	%l1, %l2, %tstate
2114103897Sjake
2115103897Sjake	/*
2116103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2117103897Sjake	 */
2118103897Sjake	sub	%fp, CCFSZ, %sp
2119103897Sjake
2120103897Sjake	/*
2121103897Sjake	 * Execute the user trap handler.
2122103897Sjake	 */
2123103897Sjake	done
2124103897SjakeEND(tl0_utrap)
2125103897Sjake
2126103897Sjake/*
2127103897Sjake * (Real) User trap entry point.
2128103897Sjake *
212988644Sjake * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
213088644Sjake *		 u_int sfsr)
213182906Sjake *
213282906Sjake * The following setup has been performed:
213382906Sjake *	- the windows have been split and the active user window has been saved
213482906Sjake *	  (maybe just to the pcb)
213582906Sjake *	- we are on alternate globals and interrupts are disabled
213682906Sjake *
213789050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
213888644Sjake * globals, enable interrupts and call trap.
213982906Sjake *
214082906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
214182906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
214282906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
214387702Sjhb * of cpu migration and using the wrong pcpup.
214481380Sjake */
214582005SjakeENTRY(tl0_trap)
214682906Sjake	/*
214782906Sjake	 * Force kernel store order.
214882906Sjake	 */
214982906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
215080709Sjake
215181380Sjake	rdpr	%tstate, %l0
215288644Sjake	rdpr	%tpc, %l1
215388644Sjake	rdpr	%tnpc, %l2
215488644Sjake	rd	%y, %l3
215588644Sjake	rd	%fprs, %l4
215688644Sjake	rdpr	%wstate, %l5
215788644Sjake
215888644Sjake#if KTR_COMPILE & KTR_TRAP
215988644Sjake	CATR(KTR_TRAP,
216088644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
216188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
216288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
216388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
216488644Sjake	stx	%o0, [%g1 + KTR_PARM2]
216588644Sjake	rdpr	%pil, %g2
216688644Sjake	stx	%g2, [%g1 + KTR_PARM3]
216788644Sjake	stx	%l1, [%g1 + KTR_PARM4]
216888644Sjake	stx	%l2, [%g1 + KTR_PARM5]
216988644Sjake	stx	%i6, [%g1 + KTR_PARM6]
217088644Sjake9:
217188644Sjake#endif
217288644Sjake
2173103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2174103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
217588644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
217688644Sjake	rdpr	%canrestore, %l6
217788644Sjake	wrpr	%l6, 0, %otherwin
217888644Sjake	wrpr	%g0, 0, %canrestore
217988644Sjake
218088644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
218188644Sjake
2182105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2183105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
218488644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
218588644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2186105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
218788644Sjake
218881380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
218981380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
219081380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2191105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2192105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2193105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
219481380Sjake
219588644Sjake	wr	%g0, FPRS_FEF, %fprs
219688644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2197108379Sjake	rd	%gsr, %l6
2198105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
219988644Sjake	wr	%g0, 0, %fprs
220082906Sjake
220189050Sjake	mov	PCB_REG, %l0
220289050Sjake	mov	PCPU_REG, %l1
220382906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
220482005Sjake
220582005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
220682005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
220782005Sjake
220889050Sjake	mov	%l0, PCB_REG
220989050Sjake	mov	%l1, PCPU_REG
221088644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
221184186Sjake
221284186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
221384186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
221484186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
221584186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
221684186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
221784186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
221884186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
221984186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
222084186Sjake
2221108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2222108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2223108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2224108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2225108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2226108377Sjake
2227103921Sjake	set	tl0_ret - 8, %o7
2228103921Sjake	jmpl	%o2, %g0
222984186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
223084186SjakeEND(tl0_trap)
223184186Sjake
223288644Sjake/*
223391246Sjake * void tl0_intr(u_int level, u_int mask)
223491246Sjake */
223584186SjakeENTRY(tl0_intr)
223684186Sjake	/*
223784186Sjake	 * Force kernel store order.
223884186Sjake	 */
223984186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
224084186Sjake
224184186Sjake	rdpr	%tstate, %l0
224288644Sjake	rdpr	%tpc, %l1
224388644Sjake	rdpr	%tnpc, %l2
224488644Sjake	rd	%y, %l3
224588644Sjake	rd	%fprs, %l4
224688644Sjake	rdpr	%wstate, %l5
224788644Sjake
224888644Sjake#if KTR_COMPILE & KTR_INTR
224988644Sjake	CATR(KTR_INTR,
225091246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
225188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
225288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
225388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
225488644Sjake	stx	%o0, [%g1 + KTR_PARM2]
225588644Sjake	rdpr	%pil, %g2
225688644Sjake	stx	%g2, [%g1 + KTR_PARM3]
225788644Sjake	stx	%l1, [%g1 + KTR_PARM4]
225888644Sjake	stx	%l2, [%g1 + KTR_PARM5]
225988644Sjake	stx	%i6, [%g1 + KTR_PARM6]
226088644Sjake9:
226188644Sjake#endif
226288644Sjake
226391246Sjake	wrpr	%o0, 0, %pil
2264108379Sjake	wr	%o1, 0, %clear_softint
226591246Sjake
226688644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
226788644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
226888644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
226988644Sjake	rdpr	%canrestore, %l6
227088644Sjake	wrpr	%l6, 0, %otherwin
227188644Sjake	wrpr	%g0, 0, %canrestore
227288644Sjake
227388644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
227488644Sjake
227584186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
227684186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
227784186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2278105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2279105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2280105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
228181380Sjake
228288644Sjake	wr	%g0, FPRS_FEF, %fprs
228388644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2284108379Sjake	rd	%gsr, %l6
2285105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
228688644Sjake	wr	%g0, 0, %fprs
228784186Sjake
228891246Sjake	mov	%o0, %l3
228991246Sjake	mov	T_INTERRUPT, %o1
229089050Sjake
2291105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2292105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
229388644Sjake
229489050Sjake	mov	PCB_REG, %l0
229589050Sjake	mov	PCPU_REG, %l1
229684186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
229784186Sjake
229884186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
229984186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
230084186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
230184186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
230284186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
230384186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
230484186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
230584186Sjake
230689050Sjake	mov	%l0, PCB_REG
230789050Sjake	mov	%l1, PCPU_REG
230888644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
230984186Sjake
231084186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
231184186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
231284186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
231384186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
231484186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
231584186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
231684186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
231784186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
231884186Sjake
2319117658Sjmg	/* %l3 contains PIL */
2320117658Sjmg	SET(intrcnt, %l1, %l2)
2321117658Sjmg	prefetcha [%l2] ASI_N, 1
2322117658Sjmg	SET(pil_countp, %l1, %l0)
2323117658Sjmg	sllx	%l3, 1, %l1
2324117658Sjmg	lduh	[%l0 + %l1], %l0
2325117658Sjmg	sllx	%l0, 3, %l0
2326117658Sjmg	add	%l0, %l2, %l0
2327117658Sjmg
2328117658Sjmg	ATOMIC_INC_ULONG(%l0, %l1, %l2)
2329117658Sjmg
233089050Sjake	call	critical_enter
233189050Sjake	 nop
233289050Sjake
233386519Sjake	SET(cnt+V_INTR, %l1, %l0)
233488644Sjake	ATOMIC_INC_INT(%l0, %l1, %l2)
233584186Sjake
233686519Sjake	SET(intr_handlers, %l1, %l0)
233789050Sjake	sllx	%l3, IH_SHIFT, %l1
233888644Sjake	ldx	[%l0 + %l1], %l1
233989050Sjake	KASSERT(%l1, "tl0_intr: ih null")
234084186Sjake	call	%l1
234184186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
234289050Sjake
234389050Sjake	call	critical_exit
234489050Sjake	 nop
234589050Sjake
2346116589Sjake	ba,a	%xcc, tl0_ret
234784186Sjake	 nop
234884186SjakeEND(tl0_intr)
234984186Sjake
2350105733Sjake/*
2351105733Sjake * Initiate return to usermode.
2352105733Sjake *
2353105733Sjake * Called with a trapframe on the stack.  The window that was setup in
2354105733Sjake * tl0_trap may have been used by "fast" trap handlers that pretend to be
2355105733Sjake * leaf functions, so all ins and locals may have been clobbered since
2356105733Sjake * then.
2357105733Sjake *
2358105733Sjake * This code is rather long and complicated.
2359105733Sjake */
236082005SjakeENTRY(tl0_ret)
236193389Sjake	/*
236293389Sjake	 * Check for pending asts atomically with returning.  We must raise
236393389Sjake	 * the pil before checking, and if no asts are found the pil must
236493389Sjake	 * remain raised until the retry is executed, or we risk missing asts
236593389Sjake	 * caused by interrupts occuring after the test.  If the pil is lowered,
236693389Sjake	 * as it is when we call ast, the check must be re-executed.
236793389Sjake	 */
2368103784Sjake	wrpr	%g0, PIL_TICK, %pil
236984186Sjake	ldx	[PCPU(CURTHREAD)], %l0
2370111032Sjulian	lduw	[%l0 + TD_FLAGS], %l1
2371111032Sjulian	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2372111032Sjulian	and	%l1, %l2, %l1
2373111032Sjulian	brz,a,pt %l1, 1f
237482906Sjake	 nop
2375105733Sjake
2376105733Sjake	/*
2377105733Sjake	 * We have an ast.  Re-enable interrupts and handle it, then restart
2378105733Sjake	 * the return sequence.
2379105733Sjake	 */
238093389Sjake	wrpr	%g0, 0, %pil
238182906Sjake	call	ast
238282906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2383103784Sjake	ba,a	%xcc, tl0_ret
238493389Sjake	 nop
238582906Sjake
238693389Sjake	/*
238793389Sjake	 * Check for windows that were spilled to the pcb and need to be
238893389Sjake	 * copied out.  This must be the last thing that is done before the
238993389Sjake	 * return to usermode.  If there are still user windows in the cpu
239093389Sjake	 * and we call a nested function after this, which causes them to be
239193389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
239293389Sjake	 * be inconsistent.
239393389Sjake	 */
2394103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2395103784Sjake	brz,a,pt %l1, 2f
2396103784Sjake	 nop
2397103784Sjake	wrpr	%g0, 0, %pil
239893389Sjake	mov	T_SPILL, %o0
2399105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2400103784Sjake	call	trap
2401103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2402103784Sjake	ba,a	%xcc, tl0_ret
2403103784Sjake	 nop
240482906Sjake
2405105733Sjake	/*
2406108377Sjake	 * Restore the out and most global registers from the trapframe.
2407108377Sjake	 * The ins will become the outs when we restore below.
2408105733Sjake	 */
2409103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
241082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
241182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
241282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
241382906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
241482906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
241582906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
241682906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
241781380Sjake
2418108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2419108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2420108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2421108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2422108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2423108377Sjake
2424105733Sjake	/*
2425105733Sjake	 * Load everything we need to restore below before disabling
2426105733Sjake	 * interrupts.
2427105733Sjake	 */
2428105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2429105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
243085243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2431105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2432105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2433105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2434105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
243582906Sjake
2436105733Sjake	/*
2437108377Sjake	 * Disable interrupts to restore the special globals.  They are not
2438108377Sjake	 * saved and restored for all kernel traps, so an interrupt at the
2439108377Sjake	 * wrong time would clobber them.
2440105733Sjake	 */
244189050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
244289050Sjake
244389050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
244489050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
244589050Sjake
2446105733Sjake	/*
2447105733Sjake	 * Switch to alternate globals.  This frees up some registers we
2448105733Sjake	 * can use after the restore changes our window.
2449105733Sjake	 */
245082906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
245182906Sjake
2452105733Sjake	/*
2453105733Sjake	 * Drop %pil to zero.  It must have been zero at the time of the
2454105733Sjake	 * trap, since we were in usermode, but it was raised above in
2455105733Sjake	 * order to check for asts atomically.  We have interrupts disabled
2456105733Sjake	 * so any interrupts will not be serviced until we complete the
2457105733Sjake	 * return to usermode.
2458105733Sjake	 */
245988644Sjake	wrpr	%g0, 0, %pil
2460105733Sjake
2461105733Sjake	/*
2462105733Sjake	 * Save %fprs in an alternate global so it can be restored after the
2463105733Sjake	 * restore instruction below.  If we restore it before the restore,
2464105733Sjake	 * and the restore traps we may run for a while with floating point
2465105733Sjake	 * enabled in the kernel, which we want to avoid.
2466105733Sjake	 */
2467105733Sjake	mov	%l0, %g1
2468105733Sjake
2469105733Sjake	/*
2470105733Sjake	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2471105733Sjake	 * so we set it temporarily and then clear it.
2472105733Sjake	 */
2473105733Sjake	wr	%g0, FPRS_FEF, %fprs
2474105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2475108379Sjake	wr	%l1, 0, %gsr
2476105733Sjake	wr	%g0, 0, %fprs
2477105733Sjake
2478105733Sjake	/*
2479105733Sjake	 * Restore program counters.  This could be done after the restore
2480105733Sjake	 * but we're out of alternate globals to store them in...
2481105733Sjake	 */
248288644Sjake	wrpr	%l2, 0, %tnpc
2483105733Sjake	wrpr	%l3, 0, %tpc
248482906Sjake
2485105733Sjake	/*
2486105733Sjake	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2487105733Sjake	 * will be affected by the restore below and we need to make sure it
2488105733Sjake	 * points to the current window at that time, not the window that was
2489105733Sjake	 * active at the time of the trap.
2490105733Sjake	 */
2491105733Sjake	andn	%l4, TSTATE_CWP_MASK, %g2
249282906Sjake
2493105733Sjake	/*
2494105733Sjake	 * Restore %y.  Could also be below if we had more alternate globals.
2495105733Sjake	 */
2496105733Sjake	wr	%l5, 0, %y
2497105733Sjake
2498105733Sjake	/*
2499105733Sjake	 * Setup %wstate for return.  We need to restore the user window state
2500105733Sjake	 * which we saved in wstate.other when we trapped.  We also need to
2501105733Sjake	 * set the transition bit so the restore will be handled specially
2502105733Sjake	 * if it traps, use the xor feature of wrpr to do that.
2503105733Sjake	 */
2504105733Sjake	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
250588644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
2506105733Sjake
2507105733Sjake	/*
2508105733Sjake	 * Setup window management registers for return.  If not all user
2509105733Sjake	 * windows were spilled in the kernel %otherwin will be non-zero,
2510105733Sjake	 * so we need to transfer it to %canrestore to correctly restore
2511105733Sjake	 * those windows.  Otherwise everything gets set to zero and the
2512105733Sjake	 * restore below will fill a window directly from the user stack.
2513105733Sjake	 */
251488644Sjake	rdpr	%otherwin, %o0
251588644Sjake	wrpr	%o0, 0, %canrestore
251682906Sjake	wrpr	%g0, 0, %otherwin
251788644Sjake	wrpr	%o0, 0, %cleanwin
251881380Sjake
251982005Sjake	/*
2520105733Sjake	 * Now do the restore.  If this instruction causes a fill trap which
2521105733Sjake	 * fails to fill a window from the user stack, we will resume at
2522105733Sjake	 * tl0_ret_fill_end and call back into the kernel.
252382005Sjake	 */
252482906Sjake	restore
252582906Sjaketl0_ret_fill:
252681380Sjake
2527105733Sjake	/*
2528105733Sjake	 * We made it.  We're back in the window that was active at the time
2529105733Sjake	 * of the trap, and ready to return to usermode.
2530105733Sjake	 */
2531105733Sjake
2532105733Sjake	/*
2533105733Sjake	 * Restore %frps.  This was saved in an alternate global above.
2534105733Sjake	 */
2535105733Sjake	wr	%g1, 0, %fprs
2536105733Sjake
2537105733Sjake	/*
2538105733Sjake	 * Fixup %tstate so the saved %cwp points to the current window and
2539105733Sjake	 * restore it.
2540105733Sjake	 */
254188644Sjake	rdpr	%cwp, %g4
2542105733Sjake	wrpr	%g2, %g4, %tstate
2543105733Sjake
2544105733Sjake	/*
2545105733Sjake	 * Restore the user window state.  The transition bit was set above
2546105733Sjake	 * for special handling of the restore, this clears it.
2547105733Sjake	 */
254888644Sjake	wrpr	%g3, 0, %wstate
254985243Sjake
255084186Sjake#if KTR_COMPILE & KTR_TRAP
255188644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
255282906Sjake	    , %g2, %g3, %g4, 7, 8, 9)
255383366Sjulian	ldx	[PCPU(CURTHREAD)], %g3
255482906Sjake	stx	%g3, [%g2 + KTR_PARM1]
255585243Sjake	rdpr	%pil, %g3
255685243Sjake	stx	%g3, [%g2 + KTR_PARM2]
255788644Sjake	rdpr	%tpc, %g3
255884186Sjake	stx	%g3, [%g2 + KTR_PARM3]
255988644Sjake	rdpr	%tnpc, %g3
256084186Sjake	stx	%g3, [%g2 + KTR_PARM4]
256184186Sjake	stx	%sp, [%g2 + KTR_PARM5]
256282906Sjake9:
256382906Sjake#endif
256481380Sjake
2565105733Sjake	/*
2566105733Sjake	 * Return to usermode.
2567105733Sjake	 */
256882906Sjake	retry
256982906Sjaketl0_ret_fill_end:
257082005Sjake
257184186Sjake#if KTR_COMPILE & KTR_TRAP
257288785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
257382906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
257488785Sjake	rdpr	%pstate, %l1
257588785Sjake	stx	%l1, [%l0 + KTR_PARM1]
257688785Sjake	stx	%l5, [%l0 + KTR_PARM2]
257788785Sjake	stx	%sp, [%l0 + KTR_PARM3]
257882906Sjake9:
257982906Sjake#endif
258082906Sjake
258182906Sjake	/*
2582105733Sjake	 * The restore above caused a fill trap and the fill handler was
2583105733Sjake	 * unable to fill a window from the user stack.  The special fill
2584105733Sjake	 * handler recognized this and punted, sending us here.  We need
2585105733Sjake	 * to carefully undo any state that was restored before the restore
2586105733Sjake	 * was executed and call trap again.  Trap will copyin a window
2587105733Sjake	 * from the user stack which will fault in the page we need so the
2588105733Sjake	 * restore above will succeed when we try again.  If this fails
2589105733Sjake	 * the process has trashed its stack, so we kill it.
259082906Sjake	 */
2591105733Sjake
2592105733Sjake	/*
2593105733Sjake	 * Restore the kernel window state.  This was saved in %l6 above, and
2594105733Sjake	 * since the restore failed we're back in the same window.
2595105733Sjake	 */
2596105733Sjake	wrpr	%l6, 0, %wstate
2597105733Sjake
2598105733Sjake	/*
2599105733Sjake	 * Restore the normal globals which have predefined values in the
2600105733Sjake	 * kernel.  We clobbered them above restoring the user's globals
2601105733Sjake	 * so this is very important.
2602105733Sjake	 * XXX PSTATE_ALT must already be set.
2603105733Sjake	 */
260488785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
260589050Sjake	mov	PCB_REG, %o0
260689050Sjake	mov	PCPU_REG, %o1
260788785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
260889050Sjake	mov	%o0, PCB_REG
260989050Sjake	mov	%o1, PCPU_REG
261088644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2611105733Sjake
2612105733Sjake	/*
2613105733Sjake	 * Simulate a fill trap and then start the whole return sequence over
2614105733Sjake	 * again.  This is special because it only copies in 1 window, not 2
2615105733Sjake	 * as we would for a normal failed fill.  This may be the first time
2616105733Sjake	 * the process has been run, so there may not be 2 windows worth of
2617105733Sjake	 * stack to copyin.
2618105733Sjake	 */
2619103784Sjake	mov	T_FILL_RET, %o0
2620105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2621103784Sjake	call	trap
2622103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2623103784Sjake	ba,a	%xcc, tl0_ret
2624103784Sjake	 nop
262582005SjakeEND(tl0_ret)
262681380Sjake
262780709Sjake/*
262882906Sjake * Kernel trap entry point
262982906Sjake *
263091246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
263188644Sjake *		 u_int sfsr)
263282906Sjake *
263382906Sjake * This is easy because the stack is already setup and the windows don't need
263482906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
263582906Sjake * the outs don't need to be saved.
263680709Sjake */
263780709SjakeENTRY(tl1_trap)
263880709Sjake	rdpr	%tstate, %l0
263980709Sjake	rdpr	%tpc, %l1
264080709Sjake	rdpr	%tnpc, %l2
264191246Sjake	rdpr	%pil, %l3
264291316Sjake	rd	%y, %l4
264391316Sjake	rdpr	%wstate, %l5
264480709Sjake
264584186Sjake#if KTR_COMPILE & KTR_TRAP
264688644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
264788644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
264888644Sjake	ldx	[PCPU(CURTHREAD)], %g2
264988644Sjake	stx	%g2, [%g1 + KTR_PARM1]
265097265Sjake	stx	%o0, [%g1 + KTR_PARM2]
265191246Sjake	stx	%l3, [%g1 + KTR_PARM3]
265288644Sjake	stx	%l1, [%g1 + KTR_PARM4]
265388644Sjake	stx	%i6, [%g1 + KTR_PARM5]
265482906Sjake9:
265582906Sjake#endif
265682906Sjake
265780709Sjake	wrpr	%g0, 1, %tl
265888644Sjake
265991316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
266091316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
266191246Sjake
2662105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2663105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2664103919Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2665103919Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2666105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2667103919Sjake
266888644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
266988644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
267088644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2671105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2672105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
267388644Sjake
2674103919Sjake	mov	PCB_REG, %l0
2675103919Sjake	mov	PCPU_REG, %l1
267691158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
267791158Sjake
2678108377Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2679108377Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
268080709Sjake
2681103919Sjake	mov	%l0, PCB_REG
2682103919Sjake	mov	%l1, PCPU_REG
268391158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
268491158Sjake
2685103919Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2686103919Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2687103919Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2688103919Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2689103919Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2690103919Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2691103919Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2692103919Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2693103919Sjake
2694108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2695108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2696108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2697108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2698108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2699108377Sjake
2700103921Sjake	set	tl1_ret - 8, %o7
2701103921Sjake	jmpl	%o2, %g0
270280709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2703103921SjakeEND(tl1_trap)
270480709Sjake
2705103921SjakeENTRY(tl1_ret)
2706103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2707103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2708103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2709103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2710103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2711103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2712103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2713103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2714103919Sjake
2715108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2716108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2717108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2718108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2719108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2720108377Sjake
272188644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
272288644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
272388644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2724105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2725105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
272688644Sjake
2727108377Sjake	set	VM_MIN_PROM_ADDRESS, %l5
2728108377Sjake	cmp	%l1, %l5
2729108377Sjake	bl,a,pt	%xcc, 1f
2730108377Sjake	 nop
273180709Sjake
2732108377Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
273380709Sjake
2734108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2735108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2736108377Sjake
2737108377Sjake1:	wrpr	%g0, PSTATE_ALT, %pstate
2738108377Sjake
273988644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
274086519Sjake	mov	%l1, %g2
274186519Sjake	mov	%l2, %g3
274281380Sjake
274388644Sjake	wrpr	%l3, 0, %pil
274491316Sjake	wr	%l4, 0, %y
274586519Sjake
274686519Sjake	restore
274786519Sjake
274880709Sjake	wrpr	%g0, 2, %tl
274980709Sjake
275088644Sjake	rdpr	%cwp, %g4
275188644Sjake	wrpr	%g1, %g4, %tstate
275286519Sjake	wrpr	%g2, 0, %tpc
275386519Sjake	wrpr	%g3, 0, %tnpc
275486519Sjake
275584186Sjake#if KTR_COMPILE & KTR_TRAP
2756103921Sjake	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
275786519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
275886519Sjake	ldx	[PCPU(CURTHREAD)], %g3
275986519Sjake	stx	%g3, [%g2 + KTR_PARM1]
276086519Sjake	rdpr	%pil, %g3
276186519Sjake	stx	%g3, [%g2 + KTR_PARM2]
276286519Sjake	rdpr	%tstate, %g3
276386519Sjake	stx	%g3, [%g2 + KTR_PARM3]
276486519Sjake	rdpr	%tpc, %g3
276586519Sjake	stx	%g3, [%g2 + KTR_PARM4]
276686519Sjake	stx	%sp, [%g2 + KTR_PARM5]
276782906Sjake9:
276882906Sjake#endif
276982906Sjake
277080709Sjake	retry
2771103921SjakeEND(tl1_ret)
277280709Sjake
277391246Sjake/*
277491246Sjake * void tl1_intr(u_int level, u_int mask)
277591246Sjake */
277684186SjakeENTRY(tl1_intr)
277784186Sjake	rdpr	%tstate, %l0
277884186Sjake	rdpr	%tpc, %l1
277984186Sjake	rdpr	%tnpc, %l2
278091246Sjake	rdpr	%pil, %l3
278191316Sjake	rd	%y, %l4
278291316Sjake	rdpr	%wstate, %l5
278384186Sjake
278484186Sjake#if KTR_COMPILE & KTR_INTR
278589050Sjake	CATR(KTR_INTR,
278691246Sjake	    "tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
278788644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
278888644Sjake	ldx	[PCPU(CURTHREAD)], %g2
278988644Sjake	stx	%g2, [%g1 + KTR_PARM1]
279091246Sjake	stx	%o0, [%g1 + KTR_PARM2]
279191246Sjake	stx	%l3, [%g1 + KTR_PARM3]
279291246Sjake	stx	%l1, [%g1 + KTR_PARM4]
279391246Sjake	stx	%i6, [%g1 + KTR_PARM5]
279484186Sjake9:
279584186Sjake#endif
279684186Sjake
279791246Sjake	wrpr	%o0, 0, %pil
2798108379Sjake	wr	%o1, 0, %clear_softint
279991246Sjake
280084186Sjake	wrpr	%g0, 1, %tl
280188644Sjake
280291316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
280391316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
280491246Sjake
280588644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
280688644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
280788644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2808105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2809105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
281088644Sjake
281191246Sjake	mov	%o0, %l7
281291246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
281389050Sjake
2814105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2815105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
281688644Sjake
281788644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
281888644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
281988644Sjake
282091158Sjake	mov	PCB_REG, %l4
282191158Sjake	mov	PCPU_REG, %l5
282291158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
282391158Sjake
282484186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
282584186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
282684186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
282784186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
282884186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
282984186Sjake
283091158Sjake	mov	%l4, PCB_REG
283191158Sjake	mov	%l5, PCPU_REG
283291158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
283391158Sjake
2834117658Sjmg	/* %l3 contains PIL */
2835117658Sjmg	SET(intrcnt, %l5, %l4)
2836117658Sjmg	prefetcha [%l4] ASI_N, 1
2837117658Sjmg	SET(pil_countp, %l5, %l6)
2838117658Sjmg	sllx	%l7, 1, %l5
2839117658Sjmg	lduh	[%l5 + %l6], %l5
2840117658Sjmg	sllx	%l5, 3, %l5
2841117658Sjmg	add	%l5, %l4, %l4
2842117658Sjmg
2843117658Sjmg	ATOMIC_INC_ULONG(%l4, %l5, %l6)
2844117658Sjmg
284589050Sjake	call	critical_enter
284689050Sjake	 nop
284784186Sjake
284888644Sjake	SET(cnt+V_INTR, %l5, %l4)
284988644Sjake	ATOMIC_INC_INT(%l4, %l5, %l6)
285088644Sjake
285188644Sjake	SET(intr_handlers, %l5, %l4)
285289050Sjake	sllx	%l7, IH_SHIFT, %l5
285388644Sjake	ldx	[%l4 + %l5], %l5
285489050Sjake	KASSERT(%l5, "tl1_intr: ih null")
285588644Sjake	call	%l5
285684186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
285784186Sjake
285889050Sjake	call	critical_exit
285989050Sjake	 nop
286089050Sjake
2861105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
286291316Sjake
286384186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
286484186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
286584186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
286684186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
286784186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
286884186Sjake
286984186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
287084186Sjake
287188644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
287286519Sjake	mov	%l1, %g2
287386519Sjake	mov	%l2, %g3
287488644Sjake	wrpr	%l3, 0, %pil
287591316Sjake	wr	%l4, 0, %y
287684186Sjake
287786519Sjake	restore
287886519Sjake
287984186Sjake	wrpr	%g0, 2, %tl
288084186Sjake
288188644Sjake	rdpr	%cwp, %g4
288288644Sjake	wrpr	%g1, %g4, %tstate
288386519Sjake	wrpr	%g2, 0, %tpc
288486519Sjake	wrpr	%g3, 0, %tnpc
288586519Sjake
288688644Sjake#if KTR_COMPILE & KTR_INTR
288788644Sjake	CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
288886519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
288986519Sjake	ldx	[PCPU(CURTHREAD)], %g3
289086519Sjake	stx	%g3, [%g2 + KTR_PARM1]
289186519Sjake	rdpr	%pil, %g3
289286519Sjake	stx	%g3, [%g2 + KTR_PARM2]
289386519Sjake	rdpr	%tstate, %g3
289486519Sjake	stx	%g3, [%g2 + KTR_PARM3]
289586519Sjake	rdpr	%tpc, %g3
289686519Sjake	stx	%g3, [%g2 + KTR_PARM4]
289786519Sjake	stx	%sp, [%g2 + KTR_PARM5]
289884186Sjake9:
289984186Sjake#endif
290084186Sjake
290184186Sjake	retry
290284186SjakeEND(tl1_intr)
290384186Sjake
290482906Sjake/*
290582906Sjake * Freshly forked processes come here when switched to for the first time.
290682906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
290782906Sjake * them to the outs.
290882906Sjake */
290980709SjakeENTRY(fork_trampoline)
291084186Sjake#if KTR_COMPILE & KTR_PROC
291184186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
291282906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
291383366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
291482906Sjake	stx	%g2, [%g1 + KTR_PARM1]
291584186Sjake	ldx	[%g2 + TD_PROC], %g2
291682906Sjake	add	%g2, P_COMM, %g2
291782906Sjake	stx	%g2, [%g1 + KTR_PARM2]
291882906Sjake	rdpr	%cwp, %g2
291982906Sjake	stx	%g2, [%g1 + KTR_PARM3]
292082906Sjake9:
292182906Sjake#endif
292280709Sjake	mov	%l0, %o0
292380709Sjake	mov	%l1, %o1
292480709Sjake	call	fork_exit
292588644Sjake	 mov	%l2, %o2
2926116589Sjake	ba,a	%xcc, tl0_ret
292784186Sjake	 nop
292880709SjakeEND(fork_trampoline)
2929