exception.S revision 182774
180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
28114085Sobrien *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake */
5580709Sjake
56114188Sjake#include <machine/asm.h>
57114188Sjake__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/exception.S 182774 2008-09-04 21:06:09Z marius $");
58114188Sjake
59106050Sjake#include "opt_compat.h"
6080709Sjake#include "opt_ddb.h"
6180709Sjake
6280709Sjake#include <machine/asi.h>
6380709Sjake#include <machine/asmacros.h>
64166105Smarius#include <machine/frame.h>
65166105Smarius#include <machine/fsr.h>
66166105Smarius#include <machine/intr_machdep.h>
6782906Sjake#include <machine/ktr.h>
68166105Smarius#include <machine/pcb.h>
6982906Sjake#include <machine/pstate.h>
7080709Sjake#include <machine/trap.h>
71166105Smarius#include <machine/tsb.h>
7282906Sjake#include <machine/tstate.h>
73166105Smarius#include <machine/utrap.h>
7482906Sjake#include <machine/wstate.h>
7580709Sjake
7680709Sjake#include "assym.s"
7780709Sjake
78101653Sjake#define	TSB_KERNEL_MASK	0x0
79101653Sjake#define	TSB_KERNEL	0x0
80101653Sjake
8188644Sjake	.register %g2,#ignore
8288644Sjake	.register %g3,#ignore
8388644Sjake	.register %g6,#ignore
8488644Sjake	.register %g7,#ignore
8588644Sjake
8682005Sjake/*
87181701Smarius * Atomically set the reference bit in a TTE.
8888644Sjake */
8988644Sjake#define	TTE_SET_BIT(r1, r2, r3, bit) \
9088644Sjake	add	r1, TTE_DATA, r1 ; \
9188644Sjake	ldx	[r1], r2 ; \
9288644Sjake9:	or	r2, bit, r3 ; \
9388644Sjake	casxa	[r1] ASI_N, r2, r3 ; \
9488644Sjake	cmp	r2, r3 ; \
9588644Sjake	bne,pn	%xcc, 9b ; \
9688644Sjake	 mov	r3, r2
9788644Sjake
9888644Sjake#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
9988644Sjake#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
10088644Sjake
10188644Sjake/*
10282906Sjake * Macros for spilling and filling live windows.
10382906Sjake *
10482906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
10582906Sjake * handler will not use more than 24 instructions total, to leave room for
10682906Sjake * resume vectors which occupy the last 8 instructions.
10782005Sjake */
10880709Sjake
10982906Sjake#define	SPILL(storer, base, size, asi) \
11082906Sjake	storer	%l0, [base + (0 * size)] asi ; \
11182906Sjake	storer	%l1, [base + (1 * size)] asi ; \
11282906Sjake	storer	%l2, [base + (2 * size)] asi ; \
11382906Sjake	storer	%l3, [base + (3 * size)] asi ; \
11482906Sjake	storer	%l4, [base + (4 * size)] asi ; \
11582906Sjake	storer	%l5, [base + (5 * size)] asi ; \
11682906Sjake	storer	%l6, [base + (6 * size)] asi ; \
11782906Sjake	storer	%l7, [base + (7 * size)] asi ; \
11882906Sjake	storer	%i0, [base + (8 * size)] asi ; \
11982906Sjake	storer	%i1, [base + (9 * size)] asi ; \
12082906Sjake	storer	%i2, [base + (10 * size)] asi ; \
12182906Sjake	storer	%i3, [base + (11 * size)] asi ; \
12282906Sjake	storer	%i4, [base + (12 * size)] asi ; \
12382906Sjake	storer	%i5, [base + (13 * size)] asi ; \
12482906Sjake	storer	%i6, [base + (14 * size)] asi ; \
12582906Sjake	storer	%i7, [base + (15 * size)] asi
12680709Sjake
12782906Sjake#define	FILL(loader, base, size, asi) \
12882906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
12982906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
13082906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
13182906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
13282906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
13382906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
13482906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
13582906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
13682906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
13782906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
13882906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
13982906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
14082906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
14182906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
14282906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
14382906Sjake	loader	[base + (15 * size)] asi, %i7
14482005Sjake
14582906Sjake#define	ERRATUM50(reg)	mov reg, reg
14682906Sjake
14788781Sjake#define	KSTACK_SLOP	1024
14888781Sjake
14989048Sjake/*
150181701Smarius * Sanity check the kernel stack and bail out if it's wrong.
15189048Sjake * XXX: doesn't handle being on the panic stack.
15289048Sjake */
15388781Sjake#define	KSTACK_CHECK \
15488781Sjake	dec	16, ASP_REG ; \
15588781Sjake	stx	%g1, [ASP_REG + 0] ; \
15688781Sjake	stx	%g2, [ASP_REG + 8] ; \
15788781Sjake	add	%sp, SPOFF, %g1 ; \
15888781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
15988781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
16088781Sjake	 inc	16, ASP_REG ; \
16188781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
16288781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
16388781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
16488781Sjake	subcc	%g1, %g2, %g1 ; \
16588781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
16688781Sjake	 inc	16, ASP_REG ; \
16788781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
16888781Sjake	cmp	%g1, %g2 ; \
16988781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
17088781Sjake	 inc	16, ASP_REG ; \
17188781Sjake	ldx	[ASP_REG + 8], %g2 ; \
17288781Sjake	ldx	[ASP_REG + 0], %g1 ; \
17388781Sjake	inc	16, ASP_REG
17488781Sjake
175155839Smarius	.globl	tl_text_begin
176155839Smariustl_text_begin:
177155839Smarius	nop
178155839Smarius
17988781SjakeENTRY(tl1_kstack_fault)
18088781Sjake	rdpr	%tl, %g1
18197263Sjake1:	cmp	%g1, 2
18297263Sjake	be,a	2f
18388781Sjake	 nop
18488781Sjake
18588781Sjake#if KTR_COMPILE & KTR_TRAP
18688781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
18797263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
18897263Sjake	rdpr	%tl, %g3
18997263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19097263Sjake	rdpr	%tpc, %g3
19197263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19297263Sjake	rdpr	%tnpc, %g3
19397263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19488781Sjake9:
19588781Sjake#endif
19688781Sjake
19797263Sjake	sub	%g1, 1, %g1
19897263Sjake	wrpr	%g1, 0, %tl
19997263Sjake	ba,a	%xcc, 1b
20097263Sjake	 nop
20197263Sjake
20288781Sjake2:
20388781Sjake#if KTR_COMPILE & KTR_TRAP
20488781Sjake	CATR(KTR_TRAP,
20588781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
20688781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
20788781Sjake	add	%sp, SPOFF, %g2
20888781Sjake	stx	%g2, [%g1 + KTR_PARM1]
20988781Sjake	ldx	[PCPU(CURTHREAD)], %g2
21088781Sjake	ldx	[%g2 + TD_KSTACK], %g2
21188781Sjake	stx	%g2, [%g1 + KTR_PARM2]
21288781Sjake	rdpr	%canrestore, %g2
21388781Sjake	stx	%g2, [%g1 + KTR_PARM3]
21488781Sjake	rdpr	%cansave, %g2
21588781Sjake	stx	%g2, [%g1 + KTR_PARM4]
21688781Sjake	rdpr	%otherwin, %g2
21788781Sjake	stx	%g2, [%g1 + KTR_PARM5]
21888781Sjake	rdpr	%wstate, %g2
21988781Sjake	stx	%g2, [%g1 + KTR_PARM6]
22088781Sjake9:
22188781Sjake#endif
22288781Sjake
22388781Sjake	wrpr	%g0, 0, %canrestore
22488781Sjake	wrpr	%g0, 6, %cansave
22588781Sjake	wrpr	%g0, 0, %otherwin
22688781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
22788781Sjake
22889048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
22988781Sjake	clr	%fp
23088781Sjake
231103921Sjake	set	trap, %o2
232116589Sjake	ba	%xcc, tl1_trap
23388781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
23488781SjakeEND(tl1_kstack_fault)
23588781Sjake
23682906Sjake/*
23782906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
238182020Smarius * MMU fault during a spill or a fill, this macro will detect the fault and
23988644Sjake * resume at a set instruction offset in the trap handler.
24082906Sjake *
24188644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
24288644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
24382906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
24482906Sjake * tl bit allows us to detect both ranges with one test.
24582906Sjake *
24682906Sjake * This is:
24788644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
24882906Sjake *
24982906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
25082906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
25182906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
25282906Sjake *
25382906Sjake *	0x7f ^ 0x1f == 0x60
25482906Sjake *	0x1f == (0x80 - 0x60) - 1
25582906Sjake *
25686519Sjake * Which are the offset and xor value used to resume from alignment faults.
25782906Sjake */
25882906Sjake
25982906Sjake/*
26088644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
26188644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
26288644Sjake * alternate globals.
26382906Sjake */
26488644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
26588644Sjake	dec	16, ASP_REG ; \
26688644Sjake	stx	%g1, [ASP_REG + 0] ; \
26788644Sjake	stx	%g2, [ASP_REG + 8] ; \
26888644Sjake	rdpr	%tpc, %g1 ; \
26988644Sjake	ERRATUM50(%g1) ; \
27088644Sjake	rdpr	%tba, %g2 ; \
27188644Sjake	sub	%g1, %g2, %g2 ; \
27288644Sjake	srlx	%g2, 5, %g2 ; \
27388644Sjake	andn	%g2, 0x200, %g2 ; \
27488644Sjake	cmp	%g2, 0x80 ; \
27588644Sjake	blu,pt	%xcc, 9f ; \
27688644Sjake	 cmp	%g2, 0x100 ; \
27788644Sjake	bgeu,pt	%xcc, 9f ; \
27888644Sjake	 or	%g1, 0x7f, %g1 ; \
27988644Sjake	wrpr	%g1, xor, %tnpc ; \
28088644Sjake	stxa_g0_sfsr ; \
28188644Sjake	ldx	[ASP_REG + 8], %g2 ; \
28288644Sjake	ldx	[ASP_REG + 0], %g1 ; \
28388644Sjake	inc	16, ASP_REG ; \
28488644Sjake	done ; \
28588644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
28688644Sjake	ldx	[ASP_REG + 0], %g1 ; \
28788644Sjake	inc	16, ASP_REG
28882906Sjake
28988644Sjake/*
290182020Smarius * For certain faults we need to clear the SFSR MMU register before returning.
29188644Sjake */
29288644Sjake#define	RSF_CLR_SFSR \
29388644Sjake	wr	%g0, ASI_DMMU, %asi ; \
29488644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
29588644Sjake
29682906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
29782906Sjake
29882906Sjake/*
29982906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
30082906Sjake * nested traps, and corresponding xor constants for wrpr.
30182906Sjake */
30286519Sjake#define	RSF_OFF_ALIGN	0x60
30386519Sjake#define	RSF_OFF_MMU	0x70
30482906Sjake
30588644Sjake#define	RESUME_SPILLFILL_ALIGN \
30688644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
30788644Sjake#define	RESUME_SPILLFILL_MMU \
30888644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
30988644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
31088644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
31182906Sjake
31282906Sjake/*
31382906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
31488644Sjake * user mode.
31582906Sjake */
31682906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
31782906Sjake
31882906Sjake/*
31982906Sjake * Retry a spill or fill with a different wstate due to an alignment fault.
32082906Sjake * We may just be using the wrong stack offset.
32182906Sjake */
32282906Sjake#define	RSF_ALIGN_RETRY(ws) \
32382906Sjake	wrpr	%g0, (ws), %wstate ; \
32482906Sjake	retry ; \
32582906Sjake	.align	16
32682906Sjake
32782906Sjake/*
32882906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
32982906Sjake */
33082906Sjake#define	RSF_TRAP(type) \
331116589Sjake	ba	%xcc, tl0_sftrap ; \
33282906Sjake	 mov	type, %g2 ; \
33382906Sjake	.align	16
33482906Sjake
33582906Sjake/*
33682906Sjake * Game over if the window operation fails.
33782906Sjake */
33882906Sjake#define	RSF_FATAL(type) \
339116589Sjake	ba	%xcc, rsf_fatal ; \
34088781Sjake	 mov	type, %g2 ; \
34182906Sjake	.align	16
34282906Sjake
34382906Sjake/*
34482906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
34582906Sjake * restore.  This is used on return from the kernel to usermode.
34682906Sjake */
34782906Sjake#define	RSF_FILL_MAGIC \
34882906Sjake	rdpr	%tnpc, %g1 ; \
34982906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
35082906Sjake	wrpr	%g1, 0, %tnpc ; \
35182906Sjake	done ; \
35282906Sjake	.align	16
35382906Sjake
35482906Sjake/*
35582906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
35682906Sjake */
35782906Sjake#define	RSF_SPILL_TOPCB \
358116589Sjake	ba,a	%xcc, tl1_spill_topcb ; \
35982906Sjake	 nop ; \
36082906Sjake	.align	16
36182906Sjake
36288781SjakeENTRY(rsf_fatal)
36388781Sjake#if KTR_COMPILE & KTR_TRAP
36488781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
36588781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
36688781Sjake	rdpr	%tt, %g3
36788781Sjake	stx	%g3, [%g1 + KTR_PARM1]
36888781Sjake	stx	%g2, [%g1 + KTR_PARM2]
36988781Sjake9:
37088781Sjake#endif
37188781Sjake
37288781Sjake	KSTACK_CHECK
37388781Sjake
37488781Sjake	sir
37588781SjakeEND(rsf_fatal)
37688781Sjake
377117658Sjmg	.comm	intrnames, IV_NAMLEN
37885243Sjake	.comm	eintrnames, 0
37980709Sjake
38097265Sjake	.comm	intrcnt, IV_MAX * 8
38185243Sjake	.comm	eintrcnt, 0
38280709Sjake
38382906Sjake/*
38482906Sjake * Trap table and associated macros
38582906Sjake *
38682906Sjake * Due to its size a trap table is an inherently hard thing to represent in
38782906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
38882906Sjake * instructions each, many of which are identical.  The way that this is
38982906Sjake * layed out is the instructions (8 or 32) for the actual trap vector appear
39082906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
39182906Sjake * but if not supporting code can be placed just after the definition of the
39282906Sjake * macro.  The macros are then instantiated in a different section (.trap),
39382906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
39482906Sjake * code around the macros is moved to the end of trap table.  In this way the
39582906Sjake * code that must be sequential in memory can be split up, and located near
39682906Sjake * its supporting code so that it is easier to follow.
39782906Sjake */
39882906Sjake
39982906Sjake	/*
40082906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
40182906Sjake	 * is not leaked between address spaces in registers.
40282906Sjake	 */
40380709Sjake	.macro	clean_window
40480709Sjake	clr	%o0
40580709Sjake	clr	%o1
40680709Sjake	clr	%o2
40780709Sjake	clr	%o3
40880709Sjake	clr	%o4
40980709Sjake	clr	%o5
41080709Sjake	clr	%o6
41180709Sjake	clr	%o7
41280709Sjake	clr	%l0
41380709Sjake	clr	%l1
41480709Sjake	clr	%l2
41580709Sjake	clr	%l3
41680709Sjake	clr	%l4
41780709Sjake	clr	%l5
41880709Sjake	clr	%l6
41980709Sjake	rdpr	%cleanwin, %l7
42080709Sjake	inc	%l7
42180709Sjake	wrpr	%l7, 0, %cleanwin
42280709Sjake	clr	%l7
42380709Sjake	retry
42480709Sjake	.align	128
42580709Sjake	.endm
42680709Sjake
42781380Sjake	/*
42882906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
42982906Sjake	 * user stack, and with its live registers, so we must save soon.  We
43082906Sjake	 * are on alternate globals so we do have some registers.  Set the
43188644Sjake	 * transitional window state, and do the save.  If this traps we
432181701Smarius	 * attempt to spill a window to the user stack.  If this fails, we
433181701Smarius	 * spill the window to the pcb and continue.  Spilling to the pcb
43488644Sjake	 * must not fail.
43582906Sjake	 *
43682906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
43781380Sjake	 */
43882906Sjake
43988644Sjake	.macro	tl0_split
44082906Sjake	rdpr	%wstate, %g1
44182906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
44281380Sjake	save
44381380Sjake	.endm
44481380Sjake
44582906Sjake	.macro	tl0_setup	type
44688644Sjake	tl0_split
447108374Sjake	clr	%o1
448103921Sjake	set	trap, %o2
449103897Sjake	ba	%xcc, tl0_utrap
45082906Sjake	 mov	\type, %o0
45181380Sjake	.endm
45281380Sjake
45381380Sjake	/*
45482906Sjake	 * Generic trap type.  Call trap() with the specified type.
45581380Sjake	 */
45680709Sjake	.macro	tl0_gen		type
45782906Sjake	tl0_setup \type
45880709Sjake	.align	32
45980709Sjake	.endm
46080709Sjake
46182906Sjake	/*
46282906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
46382906Sjake	 * Generates count "reserved" trap vectors.
46482906Sjake	 */
46580709Sjake	.macro	tl0_reserved	count
46680709Sjake	.rept	\count
46780709Sjake	tl0_gen	T_RESERVED
46880709Sjake	.endr
46980709Sjake	.endm
47080709Sjake
471109810Sjake	.macro	tl1_split
472109810Sjake	rdpr	%wstate, %g1
473109810Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
474109810Sjake	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
475109810Sjake	.endm
476109810Sjake
477109810Sjake	.macro	tl1_setup	type
478109810Sjake	tl1_split
479109810Sjake	clr	%o1
480109810Sjake	set	trap, %o2
481116589Sjake	ba	%xcc, tl1_trap
482109810Sjake	 mov	\type | T_KERNEL, %o0
483109810Sjake	.endm
484109810Sjake
485109810Sjake	.macro	tl1_gen		type
486109810Sjake	tl1_setup \type
487109810Sjake	.align	32
488109810Sjake	.endm
489109810Sjake
490109810Sjake	.macro	tl1_reserved	count
491109810Sjake	.rept	\count
492109810Sjake	tl1_gen	T_RESERVED
493109810Sjake	.endr
494109810Sjake	.endm
495109810Sjake
49688644Sjake	.macro	tl0_insn_excptn
497101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
49888644Sjake	wr	%g0, ASI_IMMU, %asi
49988644Sjake	rdpr	%tpc, %g3
50088644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
50188644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
50288644Sjake	membar	#Sync
503116589Sjake	ba	%xcc, tl0_sfsr_trap
50488644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
50588644Sjake	.align	32
50688644Sjake	.endm
50788644Sjake
50882906Sjake	.macro	tl0_data_excptn
509101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
51082906Sjake	wr	%g0, ASI_DMMU, %asi
51182906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
51282906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
51388644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
51488644Sjake	membar	#Sync
515116589Sjake	ba	%xcc, tl0_sfsr_trap
51688644Sjake	 mov	T_DATA_EXCEPTION, %g2
51782906Sjake	.align	32
51882906Sjake	.endm
51982906Sjake
52082005Sjake	.macro	tl0_align
52182906Sjake	wr	%g0, ASI_DMMU, %asi
52282906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
52382906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
52488644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
52588644Sjake	membar	#Sync
526116589Sjake	ba	%xcc, tl0_sfsr_trap
52788644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
52882005Sjake	.align	32
52982005Sjake	.endm
53082005Sjake
53182005SjakeENTRY(tl0_sfsr_trap)
53288644Sjake	tl0_split
533108374Sjake	clr	%o1
534103921Sjake	set	trap, %o2
53588644Sjake	mov	%g3, %o4
53688644Sjake	mov	%g4, %o5
537103897Sjake	ba	%xcc, tl0_utrap
53882906Sjake	 mov	%g2, %o0
53982005SjakeEND(tl0_sfsr_trap)
54082005Sjake
54182906Sjake	.macro	tl0_intr level, mask
54288644Sjake	tl0_split
54391246Sjake	set	\mask, %o1
544116589Sjake	ba	%xcc, tl0_intr
54591246Sjake	 mov	\level, %o0
54681380Sjake	.align	32
54781380Sjake	.endm
54881380Sjake
54981380Sjake#define	INTR(level, traplvl)						\
55082906Sjake	tl ## traplvl ## _intr	level, 1 << level
55181380Sjake
55281380Sjake#define	TICK(traplvl) \
553182743Smarius	tl ## traplvl ## _intr	PIL_TICK, 0x10001
55481380Sjake
55581380Sjake#define	INTR_LEVEL(tl)							\
55681380Sjake	INTR(1, tl) ;							\
55781380Sjake	INTR(2, tl) ;							\
55881380Sjake	INTR(3, tl) ;							\
55981380Sjake	INTR(4, tl) ;							\
56081380Sjake	INTR(5, tl) ;							\
56181380Sjake	INTR(6, tl) ;							\
56281380Sjake	INTR(7, tl) ;							\
56381380Sjake	INTR(8, tl) ;							\
56481380Sjake	INTR(9, tl) ;							\
56581380Sjake	INTR(10, tl) ;							\
56681380Sjake	INTR(11, tl) ;							\
56781380Sjake	INTR(12, tl) ;							\
56881380Sjake	INTR(13, tl) ;							\
56981380Sjake	TICK(tl) ;							\
57081380Sjake	INTR(15, tl) ;
57181380Sjake
57280709Sjake	.macro	tl0_intr_level
57381380Sjake	INTR_LEVEL(0)
57480709Sjake	.endm
57580709Sjake
57697265Sjake	.macro	intr_vector
57797265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
57897265Sjake	andcc	%g1, IRSR_BUSY, %g0
579104075Sjake	bnz,a,pt %xcc, intr_vector
58097265Sjake	 nop
58197265Sjake	sir
58281380Sjake	.align	32
58380709Sjake	.endm
58480709Sjake
585109860Sjake	.macro	tl0_immu_miss
58681380Sjake	/*
587181701Smarius	 * Load the context and the virtual page number from the tag access
588109860Sjake	 * register.  We ignore the context.
589109860Sjake	 */
590109860Sjake	wr	%g0, ASI_IMMU, %asi
591109860Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
592109860Sjake
593109860Sjake	/*
594102040Sjake	 * Initialize the page size walker.
595102040Sjake	 */
596102040Sjake	mov	TS_MIN, %g2
597102040Sjake
598102040Sjake	/*
599102040Sjake	 * Loop over all supported page sizes.
600102040Sjake	 */
601102040Sjake
602102040Sjake	/*
603102040Sjake	 * Compute the page shift for the page size we are currently looking
604102040Sjake	 * for.
605102040Sjake	 */
606102040Sjake1:	add	%g2, %g2, %g3
607102040Sjake	add	%g3, %g2, %g3
608102040Sjake	add	%g3, PAGE_SHIFT, %g3
609102040Sjake
610102040Sjake	/*
61191224Sjake	 * Extract the virtual page number from the contents of the tag
61291224Sjake	 * access register.
61381380Sjake	 */
614102040Sjake	srlx	%g1, %g3, %g3
61581380Sjake
61681380Sjake	/*
617181701Smarius	 * Compute the TTE bucket address.
61881380Sjake	 */
619102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
620102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
621102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
622102040Sjake	add	%g4, %g5, %g4
62381380Sjake
62481380Sjake	/*
625181701Smarius	 * Compute the TTE tag target.
62681380Sjake	 */
627102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
628102040Sjake	or	%g3, %g2, %g3
62981380Sjake
63081380Sjake	/*
631181701Smarius	 * Loop over the TTEs in this bucket.
63281380Sjake	 */
63381380Sjake
63481380Sjake	/*
635181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
636102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
637102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
638102040Sjake	 * completes successfully.
63981380Sjake	 */
640102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
64181380Sjake
64281380Sjake	/*
643181701Smarius	 * Check that it's valid and executable and that the TTE tags match.
64481380Sjake	 */
645102040Sjake	brgez,pn %g7, 3f
646102040Sjake	 andcc	%g7, TD_EXEC, %g0
647102040Sjake	bz,pn	%xcc, 3f
648102040Sjake	 cmp	%g3, %g6
649102040Sjake	bne,pn	%xcc, 3f
65088644Sjake	 EMPTY
65181380Sjake
65281380Sjake	/*
653181701Smarius	 * We matched a TTE, load the TLB.
65481380Sjake	 */
65581380Sjake
65681380Sjake	/*
65781380Sjake	 * Set the reference bit, if it's currently clear.
65881380Sjake	 */
659102040Sjake	 andcc	%g7, TD_REF, %g0
66082906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
66181380Sjake	 nop
66281380Sjake
66381380Sjake	/*
664181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
66581380Sjake	 */
666102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
667102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
66881380Sjake	retry
66981380Sjake
67081380Sjake	/*
671181701Smarius	 * Advance to the next TTE in this bucket, and check the low bits
672102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
67381380Sjake	 */
674102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
675102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
676102040Sjake	bnz,pt	%xcc, 2b
677102040Sjake	 EMPTY
67891224Sjake
67991224Sjake	/*
680102040Sjake	 * See if we just checked the largest page size, and advance to the
681102040Sjake	 * next one if not.
68291224Sjake	 */
683102040Sjake	 cmp	%g2, TS_MAX
684102040Sjake	bne,pt	%xcc, 1b
685102040Sjake	 add	%g2, 1, %g2
68691224Sjake
68796207Sjake	/*
688181701Smarius	 * Not in user TSB, call C code.
689102040Sjake	 */
690102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
69181380Sjake	.align	128
69280709Sjake	.endm
69380709Sjake
69482906SjakeENTRY(tl0_immu_miss_set_ref)
69581380Sjake	/*
69681380Sjake	 * Set the reference bit.
69781380Sjake	 */
698102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
69981380Sjake
70081380Sjake	/*
701102040Sjake	 * May have become invalid during casxa, in which case start over.
70281380Sjake	 */
703102040Sjake	brgez,pn %g2, 1f
704102040Sjake	 nop
70581380Sjake
70681380Sjake	/*
707181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
70881380Sjake	 */
709102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
710102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
71191224Sjake1:	retry
71282906SjakeEND(tl0_immu_miss_set_ref)
71381380Sjake
71482906SjakeENTRY(tl0_immu_miss_trap)
71581380Sjake	/*
71696207Sjake	 * Put back the contents of the tag access register, in case we
71796207Sjake	 * faulted.
71896207Sjake	 */
719102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
72096207Sjake	membar	#Sync
72196207Sjake
72296207Sjake	/*
72382906Sjake	 * Switch to alternate globals.
72482906Sjake	 */
72582906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
72682906Sjake
72782906Sjake	/*
72891224Sjake	 * Reload the tag access register.
72981380Sjake	 */
73091224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
73181380Sjake
73281380Sjake	/*
73391224Sjake	 * Save the tag access register, and call common trap code.
73481380Sjake	 */
73588644Sjake	tl0_split
736108374Sjake	clr	%o1
737103921Sjake	set	trap, %o2
73891224Sjake	mov	%g2, %o3
739114257Sjake	ba	%xcc, tl0_utrap
74088644Sjake	 mov	T_INSTRUCTION_MISS, %o0
74182906SjakeEND(tl0_immu_miss_trap)
74281380Sjake
743109860Sjake	.macro	tl0_dmmu_miss
74481180Sjake	/*
745181701Smarius	 * Load the context and the virtual page number from the tag access
746109860Sjake	 * register.  We ignore the context.
747109860Sjake	 */
748109860Sjake	wr	%g0, ASI_DMMU, %asi
749109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
750109860Sjake
751109860Sjake	/*
752102040Sjake	 * Initialize the page size walker.
753102040Sjake	 */
754109860Sjaketl1_dmmu_miss_user:
755102040Sjake	mov	TS_MIN, %g2
756102040Sjake
757102040Sjake	/*
758102040Sjake	 * Loop over all supported page sizes.
759102040Sjake	 */
760102040Sjake
761102040Sjake	/*
762102040Sjake	 * Compute the page shift for the page size we are currently looking
763102040Sjake	 * for.
764102040Sjake	 */
765102040Sjake1:	add	%g2, %g2, %g3
766102040Sjake	add	%g3, %g2, %g3
767102040Sjake	add	%g3, PAGE_SHIFT, %g3
768102040Sjake
769102040Sjake	/*
77091224Sjake	 * Extract the virtual page number from the contents of the tag
77191224Sjake	 * access register.
77291224Sjake	 */
773102040Sjake	srlx	%g1, %g3, %g3
77491224Sjake
77591224Sjake	/*
776181701Smarius	 * Compute the TTE bucket address.
77781180Sjake	 */
778102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
779102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
780102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
781102040Sjake	add	%g4, %g5, %g4
78281180Sjake
78381180Sjake	/*
784181701Smarius	 * Compute the TTE tag target.
78581180Sjake	 */
786102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
787102040Sjake	or	%g3, %g2, %g3
78881180Sjake
78981180Sjake	/*
790181701Smarius	 * Loop over the TTEs in this bucket.
79181180Sjake	 */
79281180Sjake
79381180Sjake	/*
794181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
795102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
796102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
797102040Sjake	 * completes successfully.
79881180Sjake	 */
799102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
80081180Sjake
80181180Sjake	/*
802181701Smarius	 * Check that it's valid and that the virtual page numbers match.
80381180Sjake	 */
804102040Sjake	brgez,pn %g7, 3f
805102040Sjake	 cmp	%g3, %g6
806102040Sjake	bne,pn	%xcc, 3f
80788644Sjake	 EMPTY
80881180Sjake
80981180Sjake	/*
810181701Smarius	 * We matched a TTE, load the TLB.
81181180Sjake	 */
81281180Sjake
81381180Sjake	/*
81481180Sjake	 * Set the reference bit, if it's currently clear.
81581180Sjake	 */
816102040Sjake	 andcc	%g7, TD_REF, %g0
817109860Sjake	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
81881180Sjake	 nop
81981180Sjake
82081180Sjake	/*
821181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
82281180Sjake	 */
823102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
824102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
82581180Sjake	retry
82681180Sjake
82781180Sjake	/*
828181701Smarius	 * Advance to the next TTE in this bucket, and check the low bits
829102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
83081180Sjake	 */
831102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
832102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
833102040Sjake	bnz,pt	%xcc, 2b
834102040Sjake	 EMPTY
835102040Sjake
836102040Sjake	/*
837102040Sjake	 * See if we just checked the largest page size, and advance to the
838102040Sjake	 * next one if not.
839102040Sjake	 */
840102040Sjake	 cmp	%g2, TS_MAX
841102040Sjake	bne,pt	%xcc, 1b
842102040Sjake	 add	%g2, 1, %g2
843109860Sjake
844109860Sjake	/*
845181701Smarius	 * Not in user TSB, call C code.
846109860Sjake	 */
847109860Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
848109860Sjake	.align	128
84981180Sjake	.endm
85081180Sjake
851109860SjakeENTRY(tl0_dmmu_miss_set_ref)
85281180Sjake	/*
85381180Sjake	 * Set the reference bit.
85481180Sjake	 */
855102040Sjake	TTE_SET_REF(%g4, %g2, %g3)
85681180Sjake
85781180Sjake	/*
858102040Sjake	 * May have become invalid during casxa, in which case start over.
85981180Sjake	 */
860102040Sjake	brgez,pn %g2, 1f
861102040Sjake	 nop
86281180Sjake
86381180Sjake	/*
864181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
86581180Sjake	 */
866102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
867102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
86891224Sjake1:	retry
869109860SjakeEND(tl0_dmmu_miss_set_ref)
87081180Sjake
87181180SjakeENTRY(tl0_dmmu_miss_trap)
87282005Sjake	/*
87396207Sjake	 * Put back the contents of the tag access register, in case we
87496207Sjake	 * faulted.
87596207Sjake	 */
876102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
87796207Sjake	membar	#Sync
87896207Sjake
87996207Sjake	/*
88082906Sjake	 * Switch to alternate globals.
88182005Sjake	 */
88282906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
88382005Sjake
88482005Sjake	/*
885109860Sjake	 * Check if we actually came from the kernel.
886109860Sjake	 */
887109860Sjake	rdpr	%tl, %g1
888109860Sjake	cmp	%g1, 1
889109860Sjake	bgt,a,pn %xcc, 1f
890109860Sjake	 nop
891109860Sjake
892109860Sjake	/*
89391224Sjake	 * Reload the tag access register.
89482005Sjake	 */
89591224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
89681180Sjake
89781180Sjake	/*
89891224Sjake	 * Save the tag access register and call common trap code.
89981180Sjake	 */
90088644Sjake	tl0_split
901108374Sjake	clr	%o1
902103921Sjake	set	trap, %o2
90391224Sjake	mov	%g2, %o3
904114257Sjake	ba	%xcc, tl0_utrap
90588644Sjake	 mov	T_DATA_MISS, %o0
906109860Sjake
907109860Sjake	/*
908109860Sjake	 * Handle faults during window spill/fill.
909109860Sjake	 */
910109860Sjake1:	RESUME_SPILLFILL_MMU
911109860Sjake
912109860Sjake	/*
913109860Sjake	 * Reload the tag access register.
914109860Sjake	 */
915109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
916109860Sjake
917109860Sjake	tl1_split
918109860Sjake	clr	%o1
919109860Sjake	set	trap, %o2
920109860Sjake	mov	%g2, %o3
921116589Sjake	ba	%xcc, tl1_trap
922109860Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
92382906SjakeEND(tl0_dmmu_miss_trap)
92481180Sjake
925109860Sjake	.macro	tl0_dmmu_prot
926109860Sjake	ba,a	%xcc, tl0_dmmu_prot_1
927109860Sjake	 nop
928109860Sjake	.align	128
929109860Sjake	.endm
930109860Sjake
931109860SjakeENTRY(tl0_dmmu_prot_1)
93288644Sjake	/*
933181701Smarius	 * Load the context and the virtual page number from the tag access
934109860Sjake	 * register.  We ignore the context.
935109860Sjake	 */
936109860Sjake	wr	%g0, ASI_DMMU, %asi
937109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
938109860Sjake
939109860Sjake	/*
940102040Sjake	 * Initialize the page size walker.
941102040Sjake	 */
942109860Sjaketl1_dmmu_prot_user:
943102040Sjake	mov	TS_MIN, %g2
944102040Sjake
945102040Sjake	/*
946102040Sjake	 * Loop over all supported page sizes.
947102040Sjake	 */
948102040Sjake
949102040Sjake	/*
950102040Sjake	 * Compute the page shift for the page size we are currently looking
951102040Sjake	 * for.
952102040Sjake	 */
953102040Sjake1:	add	%g2, %g2, %g3
954102040Sjake	add	%g3, %g2, %g3
955102040Sjake	add	%g3, PAGE_SHIFT, %g3
956102040Sjake
957102040Sjake	/*
95891224Sjake	 * Extract the virtual page number from the contents of the tag
95991224Sjake	 * access register.
96091224Sjake	 */
961102040Sjake	srlx	%g1, %g3, %g3
96291224Sjake
96391224Sjake	/*
964181701Smarius	 * Compute the TTE bucket address.
96588644Sjake	 */
966102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
967102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
968102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
969102040Sjake	add	%g4, %g5, %g4
97088644Sjake
97188644Sjake	/*
972181701Smarius	 * Compute the TTE tag target.
97388644Sjake	 */
974102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
975102040Sjake	or	%g3, %g2, %g3
97688644Sjake
97788644Sjake	/*
978181701Smarius	 * Loop over the TTEs in this bucket.
97988644Sjake	 */
98088644Sjake
98188644Sjake	/*
982181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
983102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
984102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
985102040Sjake	 * completes successfully.
98688644Sjake	 */
987102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
98888644Sjake
98988644Sjake	/*
990181701Smarius	 * Check that it's valid and writable and that the virtual page
99191224Sjake	 * numbers match.
99288644Sjake	 */
993102040Sjake	brgez,pn %g7, 4f
994102040Sjake	 andcc	%g7, TD_SW, %g0
995102040Sjake	bz,pn	%xcc, 4f
996102040Sjake	 cmp	%g3, %g6
997102040Sjake	bne,pn	%xcc, 4f
99888644Sjake	 nop
99988644Sjake
100091224Sjake	/*
100191224Sjake	 * Set the hardware write bit.
100291224Sjake	 */
1003102040Sjake	TTE_SET_W(%g4, %g2, %g3)
100488644Sjake
100588644Sjake	/*
1006181701Smarius	 * Delete the old TLB entry and clear the SFSR.
100788644Sjake	 */
1008102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
1009102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
1010102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1011102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1012102040Sjake	membar	#Sync
101388644Sjake
101481180Sjake	/*
1015102040Sjake	 * May have become invalid during casxa, in which case start over.
101688644Sjake	 */
1017102040Sjake	brgez,pn %g2, 3f
1018102040Sjake	 or	%g2, TD_W, %g2
101988644Sjake
102088644Sjake	/*
1021181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
102296207Sjake	 */
1023102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1024102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
1025102040Sjake3:	retry
102696207Sjake
102796207Sjake	/*
1028102040Sjake	 * Check the low bits to see if we've finished the bucket.
102988644Sjake	 */
1030102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
1031102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1032102040Sjake	bnz,pt	%xcc, 2b
1033102040Sjake	 EMPTY
103488644Sjake
103588644Sjake	/*
1036102040Sjake	 * See if we just checked the largest page size, and advance to the
1037102040Sjake	 * next one if not.
103888644Sjake	 */
1039102040Sjake	 cmp	%g2, TS_MAX
1040102040Sjake	bne,pt	%xcc, 1b
1041102040Sjake	 add	%g2, 1, %g2
1042102040Sjake
104388644Sjake	/*
1044181701Smarius	 * Not in user TSB, call C code.
104591224Sjake	 */
1046116589Sjake	ba,a	%xcc, tl0_dmmu_prot_trap
1047102040Sjake	 nop
1048102040SjakeEND(tl0_dmmu_prot_1)
104991224Sjake
105088644SjakeENTRY(tl0_dmmu_prot_trap)
105188644Sjake	/*
105296207Sjake	 * Put back the contents of the tag access register, in case we
105396207Sjake	 * faulted.
105496207Sjake	 */
1055102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
105696207Sjake	membar	#Sync
105796207Sjake
105896207Sjake	/*
105982906Sjake	 * Switch to alternate globals.
106081180Sjake	 */
106182906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
106281180Sjake
106381180Sjake	/*
1064109860Sjake	 * Check if we actually came from the kernel.
1065109860Sjake	 */
1066109860Sjake	rdpr	%tl, %g1
1067109860Sjake	cmp	%g1, 1
1068109860Sjake	bgt,a,pn %xcc, 1f
1069109860Sjake	 nop
1070109860Sjake
1071109860Sjake	/*
1072181701Smarius	 * Load the SFAR, SFSR and TAR.
107382005Sjake	 */
107488644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
107588644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
107688644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
107785243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
107882005Sjake	membar	#Sync
107982005Sjake
108082005Sjake	/*
1081182020Smarius	 * Save the MMU registers and call common trap code.
108282005Sjake	 */
108388644Sjake	tl0_split
1084108374Sjake	clr	%o1
1085103921Sjake	set	trap, %o2
108688644Sjake	mov	%g2, %o3
108788644Sjake	mov	%g3, %o4
108888644Sjake	mov	%g4, %o5
1089103897Sjake	ba	%xcc, tl0_utrap
109088644Sjake	 mov	T_DATA_PROTECTION, %o0
1091109860Sjake
1092109860Sjake	/*
1093109860Sjake	 * Handle faults during window spill/fill.
1094109860Sjake	 */
1095109860Sjake1:	RESUME_SPILLFILL_MMU_CLR_SFSR
1096109860Sjake
1097109860Sjake	/*
1098181701Smarius	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1099109860Sjake	 */
1100109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1101109860Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1102109860Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1103109860Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1104109860Sjake	membar	#Sync
1105109860Sjake
1106109860Sjake	tl1_split
1107109860Sjake	clr	%o1
1108109860Sjake	set	trap, %o2
1109109860Sjake	mov	%g2, %o3
1110109860Sjake	mov	%g3, %o4
1111109860Sjake	mov	%g4, %o5
1112116589Sjake	ba	%xcc, tl1_trap
1113109860Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
111488644SjakeEND(tl0_dmmu_prot_trap)
111581180Sjake
111680709Sjake	.macro	tl0_spill_0_n
111791246Sjake	wr	%g0, ASI_AIUP, %asi
111891246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
111980709Sjake	saved
112080709Sjake	retry
112182906Sjake	.align	32
112282906Sjake	RSF_TRAP(T_SPILL)
112382906Sjake	RSF_TRAP(T_SPILL)
112480709Sjake	.endm
112580709Sjake
112682906Sjake	.macro	tl0_spill_1_n
112791246Sjake	wr	%g0, ASI_AIUP, %asi
112882906Sjake	SPILL(stwa, %sp, 4, %asi)
112982906Sjake	saved
113082906Sjake	retry
113182906Sjake	.align	32
113282906Sjake	RSF_TRAP(T_SPILL)
113382906Sjake	RSF_TRAP(T_SPILL)
113482906Sjake	.endm
113582005Sjake
113691246Sjake	.macro	tl0_fill_0_n
113782906Sjake	wr	%g0, ASI_AIUP, %asi
113891246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
113982906Sjake	restored
114082906Sjake	retry
114182906Sjake	.align	32
114282906Sjake	RSF_TRAP(T_FILL)
114382906Sjake	RSF_TRAP(T_FILL)
114480709Sjake	.endm
114580709Sjake
114682906Sjake	.macro	tl0_fill_1_n
114791246Sjake	wr	%g0, ASI_AIUP, %asi
114882906Sjake	FILL(lduwa, %sp, 4, %asi)
114982906Sjake	restored
115082906Sjake	retry
115182906Sjake	.align	32
115282906Sjake	RSF_TRAP(T_FILL)
115382906Sjake	RSF_TRAP(T_FILL)
115482906Sjake	.endm
115582906Sjake
115682906SjakeENTRY(tl0_sftrap)
115782906Sjake	rdpr	%tstate, %g1
115882906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
115982906Sjake	wrpr	%g1, 0, %cwp
116088644Sjake	tl0_split
1161108374Sjake	clr	%o1
1162103921Sjake	set	trap, %o2
1163116589Sjake	ba	%xcc, tl0_trap
116482906Sjake	 mov	%g2, %o0
116582906SjakeEND(tl0_sftrap)
116682906Sjake
116782906Sjake	.macro	tl0_spill_bad	count
116882906Sjake	.rept	\count
116988644Sjake	sir
117088644Sjake	.align	128
117182906Sjake	.endr
117282906Sjake	.endm
117382906Sjake
117480709Sjake	.macro	tl0_fill_bad	count
117580709Sjake	.rept	\count
117688644Sjake	sir
117788644Sjake	.align	128
117880709Sjake	.endr
117980709Sjake	.endm
118080709Sjake
118184186Sjake	.macro	tl0_syscall
118288644Sjake	tl0_split
1183108374Sjake	clr	%o1
1184103921Sjake	set	syscall, %o2
1185103921Sjake	ba	%xcc, tl0_trap
118684186Sjake	 mov	T_SYSCALL, %o0
118788784Sjake	.align	32
118884186Sjake	.endm
118984186Sjake
1190112920Sjake	.macro	tl0_fp_restore
1191112920Sjake	ba,a	%xcc, tl0_fp_restore
1192112920Sjake	 nop
1193112920Sjake	.align	32
1194112920Sjake	.endm
1195112920Sjake
1196112920SjakeENTRY(tl0_fp_restore)
1197112924Sjake	ldx	[PCB_REG + PCB_FLAGS], %g1
1198112924Sjake	andn	%g1, PCB_FEF, %g1
1199112924Sjake	stx	%g1, [PCB_REG + PCB_FLAGS]
1200112924Sjake
1201112920Sjake	wr	%g0, FPRS_FEF, %fprs
1202112920Sjake	wr	%g0, ASI_BLK_S, %asi
1203112920Sjake	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1204112920Sjake	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1205112920Sjake	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1206112920Sjake	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1207112920Sjake	membar	#Sync
1208112920Sjake	done
1209112920SjakeEND(tl0_fp_restore)
1210112920Sjake
121180709Sjake	.macro	tl1_insn_excptn
1212101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
121388644Sjake	wr	%g0, ASI_IMMU, %asi
121488644Sjake	rdpr	%tpc, %g3
121588644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
121688644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
121788644Sjake	membar	#Sync
1218116589Sjake	ba	%xcc, tl1_insn_exceptn_trap
121988644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
122080709Sjake	.align	32
122180709Sjake	.endm
122280709Sjake
122388644SjakeENTRY(tl1_insn_exceptn_trap)
122491246Sjake	tl1_split
1225103921Sjake	clr	%o1
1226103921Sjake	set	trap, %o2
122788644Sjake	mov	%g3, %o4
122888644Sjake	mov	%g4, %o5
1229116589Sjake	ba	%xcc, tl1_trap
123088644Sjake	 mov	%g2, %o0
123188644SjakeEND(tl1_insn_exceptn_trap)
123288644Sjake
1233113024Sjake	.macro	tl1_fp_disabled
1234113024Sjake	ba,a	%xcc, tl1_fp_disabled_1
1235113024Sjake	 nop
1236113024Sjake	.align	32
1237113024Sjake	.endm
1238113024Sjake
1239113024SjakeENTRY(tl1_fp_disabled_1)
1240113024Sjake	rdpr	%tpc, %g1
1241113024Sjake	set	fpu_fault_begin, %g2
1242113024Sjake	sub	%g1, %g2, %g1
1243113024Sjake	cmp	%g1, fpu_fault_size
1244113024Sjake	bgeu,a,pn %xcc, 1f
1245113024Sjake	 nop
1246113024Sjake
1247113024Sjake	wr	%g0, FPRS_FEF, %fprs
1248113024Sjake	wr	%g0, ASI_BLK_S, %asi
1249113024Sjake	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1250113024Sjake	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1251113024Sjake	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1252113024Sjake	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1253113024Sjake	membar	#Sync
1254113024Sjake	retry
1255113024Sjake
1256113024Sjake1:	tl1_split
1257113024Sjake	clr	%o1
1258113024Sjake	set	trap, %o2
1259113024Sjake	ba	%xcc, tl1_trap
1260113024Sjake	 mov	T_FP_DISABLED | T_KERNEL, %o0
1261113024SjakeEND(tl1_fp_disabled_1)
1262113024Sjake
126382005Sjake	.macro	tl1_data_excptn
1264101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
1265116589Sjake	ba,a	%xcc, tl1_data_excptn_trap
126682906Sjake	 nop
126782005Sjake	.align	32
126882005Sjake	.endm
126982005Sjake
127088644SjakeENTRY(tl1_data_excptn_trap)
127188644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
1272116589Sjake	ba	%xcc, tl1_sfsr_trap
127388644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
127488644SjakeEND(tl1_data_excptn_trap)
127582906Sjake
127680709Sjake	.macro	tl1_align
1277116589Sjake	ba,a	%xcc, tl1_align_trap
127888644Sjake	 nop
127980709Sjake	.align	32
128080709Sjake	.endm
128180709Sjake
128282906SjakeENTRY(tl1_align_trap)
128388644Sjake	RESUME_SPILLFILL_ALIGN
1284116589Sjake	ba	%xcc, tl1_sfsr_trap
128588644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
128688644SjakeEND(tl1_data_excptn_trap)
128782906Sjake
128880709SjakeENTRY(tl1_sfsr_trap)
128988644Sjake	wr	%g0, ASI_DMMU, %asi
129088644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
129188644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
129280709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
129380709Sjake	membar	#Sync
129482005Sjake
129591246Sjake	tl1_split
1296103921Sjake	clr	%o1
1297103921Sjake	set	trap, %o2
129888644Sjake	mov	%g3, %o4
129988644Sjake	mov	%g4, %o5
1300116589Sjake	ba	%xcc, tl1_trap
130188644Sjake	 mov	%g2, %o0
130288644SjakeEND(tl1_sfsr_trap)
130380709Sjake
130484186Sjake	.macro	tl1_intr level, mask
130591246Sjake	tl1_split
130691246Sjake	set	\mask, %o1
1307116589Sjake	ba	%xcc, tl1_intr
130891246Sjake	 mov	\level, %o0
130981380Sjake	.align	32
131081380Sjake	.endm
131181380Sjake
131280709Sjake	.macro	tl1_intr_level
131381380Sjake	INTR_LEVEL(1)
131480709Sjake	.endm
131580709Sjake
131680709Sjake	.macro	tl1_immu_miss
131791224Sjake	/*
131891224Sjake	 * Load the context and the virtual page number from the tag access
131991224Sjake	 * register.  We ignore the context.
132091224Sjake	 */
132191224Sjake	wr	%g0, ASI_IMMU, %asi
1322102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
132385585Sjake
132491224Sjake	/*
1325181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1326181701Smarius	 * TSB are patched at startup.
132791224Sjake	 */
1328102040Sjake	.globl	tl1_immu_miss_patch_1
1329102040Sjaketl1_immu_miss_patch_1:
1330102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1331102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1332102040Sjake	sethi	%hi(TSB_KERNEL), %g7
133385585Sjake
1334102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1335102040Sjake	and	%g5, %g6, %g6
1336102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1337102040Sjake	add	%g6, %g7, %g6
133885585Sjake
133985585Sjake	/*
1340181701Smarius	 * Load the TTE.
134191224Sjake	 */
1342102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
134391224Sjake
134491224Sjake	/*
1345181701Smarius	 * Check that it's valid and executable and that the virtual page
134691224Sjake	 * numbers match.
134791224Sjake	 */
1348102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1349102040Sjake	 andcc	%g7, TD_EXEC, %g0
135091224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1351102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1352102040Sjake	cmp	%g5, %g6
135391224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
135485585Sjake	 EMPTY
135585585Sjake
135685585Sjake	/*
1357181701Smarius	 * Set the reference bit if it's currently clear.
135885585Sjake	 */
1359102040Sjake	 andcc	%g7, TD_REF, %g0
1360102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
136191224Sjake	 nop
136285585Sjake
136391224Sjake	/*
1364181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
136591224Sjake	 */
1366102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1367102040Sjake	retry
1368102040Sjake	.align	128
1369102040Sjake	.endm
137088644Sjake
1371102040SjakeENTRY(tl1_immu_miss_set_ref)
137285585Sjake	/*
1373181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1374181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1375102040Sjake	 */
1376102040Sjake	.globl	tl1_immu_miss_patch_2
1377102040Sjaketl1_immu_miss_patch_2:
1378102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1379102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1380102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1381102040Sjake
1382102040Sjake	and	%g5, %g6, %g5
1383102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1384102040Sjake	add	%g5, %g7, %g5
1385102040Sjake
1386102040Sjake	/*
1387102040Sjake	 * Set the reference bit.
1388102040Sjake	 */
1389102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1390102040Sjake
1391102040Sjake	/*
1392102040Sjake	 * May have become invalid during casxa, in which case start over.
1393102040Sjake	 */
1394102040Sjake	brgez,pn %g6, 1f
1395102040Sjake	 nop
1396102040Sjake
1397102040Sjake	/*
1398181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
139985585Sjake	 */
1400102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1401102040Sjake1:	retry
1402102040SjakeEND(tl1_immu_miss_set_ref)
140385585Sjake
140491224SjakeENTRY(tl1_immu_miss_trap)
140585585Sjake	/*
140685585Sjake	 * Switch to alternate globals.
140785585Sjake	 */
140891224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
140985585Sjake
141091224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
141185585Sjake
141291246Sjake	tl1_split
1413103921Sjake	clr	%o1
1414103921Sjake	set	trap, %o2
141591224Sjake	mov	%g2, %o3
1416116589Sjake	ba	%xcc, tl1_trap
141788644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
141891224SjakeEND(tl1_immu_miss_trap)
141991224Sjake
142091224Sjake	.macro	tl1_dmmu_miss
142191224Sjake	/*
142291224Sjake	 * Load the context and the virtual page number from the tag access
142391224Sjake	 * register.
142491224Sjake	 */
142591224Sjake	wr	%g0, ASI_DMMU, %asi
1426102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
142780709Sjake
142891224Sjake	/*
142991224Sjake	 * Extract the context from the contents of the tag access register.
1430181701Smarius	 * If it's non-zero this is a fault on a user address.  Note that the
1431108195Sjake	 * faulting address is passed in %g1.
143291224Sjake	 */
1433102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1434102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1435102040Sjake	 mov	%g5, %g1
143680709Sjake
143791224Sjake	/*
1438100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1439100771Sjake	 * the high bit set so they are negative.
1440100771Sjake	 */
1441102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1442100771Sjake	 EMPTY
1443100771Sjake
1444100771Sjake	/*
1445181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1446181701Smarius	 * TSB are patched at startup.
144791224Sjake	 */
1448102040Sjake	.globl	tl1_dmmu_miss_patch_1
1449102040Sjaketl1_dmmu_miss_patch_1:
1450102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1451102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1452102040Sjake	sethi	%hi(TSB_KERNEL), %g7
145384186Sjake
1454102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1455102040Sjake	and	%g5, %g6, %g6
1456102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1457102040Sjake	add	%g6, %g7, %g6
145891224Sjake
145991224Sjake	/*
1460181701Smarius	 * Load the TTE.
146191224Sjake	 */
1462102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
146391224Sjake
146491224Sjake	/*
1465181701Smarius	 * Check that it's valid and that the virtual page numbers match.
146691224Sjake	 */
1467102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1468102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1469102040Sjake	cmp	%g5, %g6
147091224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
147180709Sjake	 EMPTY
147280709Sjake
147380709Sjake	/*
1474181701Smarius	 * Set the reference bit if it's currently clear.
147580709Sjake	 */
1476102040Sjake	 andcc	%g7, TD_REF, %g0
1477102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
147891224Sjake	 nop
147980709Sjake
148091224Sjake	/*
1481181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
148291224Sjake	 */
1483102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1484102040Sjake	retry
1485102040Sjake	.align	128
1486102040Sjake	.endm
148788644Sjake
1488102040SjakeENTRY(tl1_dmmu_miss_set_ref)
148980709Sjake	/*
1490181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1491181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1492102040Sjake	 */
1493102040Sjake	.globl	tl1_dmmu_miss_patch_2
1494102040Sjaketl1_dmmu_miss_patch_2:
1495102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1496102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1497102040Sjake	sethi	%hi(TSB_KERNEL), %g7
1498102040Sjake
1499102040Sjake	and	%g5, %g6, %g5
1500102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1501102040Sjake	add	%g5, %g7, %g5
1502102040Sjake
1503102040Sjake	/*
1504102040Sjake	 * Set the reference bit.
1505102040Sjake	 */
1506102040Sjake	TTE_SET_REF(%g5, %g6, %g7)
1507102040Sjake
1508102040Sjake	/*
1509102040Sjake	 * May have become invalid during casxa, in which case start over.
1510102040Sjake	 */
1511102040Sjake	brgez,pn %g6, 1f
1512102040Sjake	 nop
1513102040Sjake
1514102040Sjake	/*
1515181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
151680709Sjake	 */
1517102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1518102040Sjake1:	retry
1519102040SjakeEND(tl1_dmmu_miss_set_ref)
152080709Sjake
152191224SjakeENTRY(tl1_dmmu_miss_trap)
152280709Sjake	/*
152382906Sjake	 * Switch to alternate globals.
152480709Sjake	 */
152591224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
152680709Sjake
1527108195Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1528108195Sjake
152988781Sjake	KSTACK_CHECK
153088781Sjake
153191246Sjake	tl1_split
1532103921Sjake	clr	%o1
1533103921Sjake	set	trap, %o2
153491224Sjake	mov	%g2, %o3
1535116589Sjake	ba	%xcc, tl1_trap
153688644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
153788781SjakeEND(tl1_dmmu_miss_trap)
153880709Sjake
1539100771SjakeENTRY(tl1_dmmu_miss_direct)
1540100771Sjake	/*
1541100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1542181701Smarius	 * address, and or in the TTE bits.  The virtual address bits that
1543181701Smarius	 * correspond to the TTE valid and page size bits are left set, so
1544181701Smarius	 * they don't have to be included in the TTE bits below.  We know they
1545108245Sjake	 * are set because the virtual address is in the upper va hole.
1546100771Sjake	 */
1547108245Sjake	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1548108245Sjake	and	%g5, %g6, %g5
1549108245Sjake	or	%g5, TD_CP | TD_CV | TD_W, %g5
1550100771Sjake
1551100771Sjake	/*
1552181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
1553100771Sjake	 */
1554102040Sjake	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1555100771Sjake	retry
1556100771SjakeEND(tl1_dmmu_miss_direct)
1557100771Sjake
155882906Sjake	.macro	tl1_dmmu_prot
1559102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1560102040Sjake	 nop
1561102040Sjake	.align	128
1562102040Sjake	.endm
1563102040Sjake
1564102040SjakeENTRY(tl1_dmmu_prot_1)
156591224Sjake	/*
156691224Sjake	 * Load the context and the virtual page number from the tag access
156791224Sjake	 * register.
156891224Sjake	 */
156991224Sjake	wr	%g0, ASI_DMMU, %asi
1570102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
157188644Sjake
157291224Sjake	/*
157391224Sjake	 * Extract the context from the contents of the tag access register.
1574181701Smarius	 * If it's non-zero this is a fault on a user address.  Note that the
1575108195Sjake	 * faulting address is passed in %g1.
157691224Sjake	 */
1577102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1578102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1579102040Sjake	 mov	%g5, %g1
158088644Sjake
158191224Sjake	/*
1582181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1583181701Smarius	 * TSB are patched at startup.
158491224Sjake	 */
1585102040Sjake	.globl	tl1_dmmu_prot_patch_1
1586102040Sjaketl1_dmmu_prot_patch_1:
1587102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1588102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1589102040Sjake	sethi	%hi(TSB_KERNEL), %g7
159088644Sjake
1591102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1592102040Sjake	and	%g5, %g6, %g6
1593102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1594102040Sjake	add	%g6, %g7, %g6
159591224Sjake
159691224Sjake	/*
1597181701Smarius	 * Load the TTE.
159891224Sjake	 */
1599102040Sjake	ldda	[%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
160091224Sjake
160191224Sjake	/*
1602181701Smarius	 * Check that it's valid and writeable and that the virtual page
160391224Sjake	 * numbers match.
160491224Sjake	 */
1605102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1606102040Sjake	 andcc	%g7, TD_SW, %g0
160791224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1608102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1609102040Sjake	cmp	%g5, %g6
161091224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
161188644Sjake	 EMPTY
161288644Sjake
161388644Sjake	/*
1614181701Smarius	 * Delete the old TLB entry and clear the SFSR.
161588644Sjake	 */
1616102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
161791224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
161891224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
161981180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
162091224Sjake	membar	#Sync
162181180Sjake
1622102040Sjake	/*
1623181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1624181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1625102040Sjake	 */
1626102040Sjake	.globl	tl1_dmmu_prot_patch_2
1627102040Sjaketl1_dmmu_prot_patch_2:
1628102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1629102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1630102040Sjake	sethi	%hi(TSB_KERNEL), %g7
163196207Sjake
1632102040Sjake	and	%g5, %g6, %g5
1633102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1634102040Sjake	add	%g5, %g7, %g5
1635102040Sjake
163681180Sjake	/*
163791224Sjake	 * Set the hardware write bit.
163891224Sjake	 */
1639102040Sjake	TTE_SET_W(%g5, %g6, %g7)
164091224Sjake
164191224Sjake	/*
1642102040Sjake	 * May have become invalid during casxa, in which case start over.
1643102040Sjake	 */
1644102040Sjake	brgez,pn %g6, 1f
1645102040Sjake	 or	%g6, TD_W, %g6
1646102040Sjake
1647102040Sjake	/*
1648181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
164988644Sjake	 */
1650102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1651102040Sjake1:	retry
1652102040SjakeEND(tl1_dmmu_prot_1)
165388644Sjake
165488644SjakeENTRY(tl1_dmmu_prot_trap)
165581180Sjake	/*
165691224Sjake	 * Switch to alternate globals.
165791224Sjake	 */
165891224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
165991224Sjake
166091224Sjake	/*
1661181701Smarius	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
166281180Sjake	 */
166388644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
166488644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
166588644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
166681180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
166781180Sjake	membar	#Sync
166881180Sjake
166991246Sjake	tl1_split
1670103921Sjake	clr	%o1
1671103921Sjake	set	trap, %o2
167288644Sjake	mov	%g2, %o3
167388644Sjake	mov	%g3, %o4
167488644Sjake	mov	%g4, %o5
1675116589Sjake	ba	%xcc, tl1_trap
167688644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
167788644SjakeEND(tl1_dmmu_prot_trap)
167881180Sjake
167980709Sjake	.macro	tl1_spill_0_n
168082906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
168180709Sjake	saved
168280709Sjake	retry
168382906Sjake	.align	32
168482906Sjake	RSF_FATAL(T_SPILL)
168582906Sjake	RSF_FATAL(T_SPILL)
168680709Sjake	.endm
168780709Sjake
168891246Sjake	.macro	tl1_spill_2_n
168991246Sjake	wr	%g0, ASI_AIUP, %asi
169091246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
169182906Sjake	saved
169282906Sjake	retry
169382906Sjake	.align	32
169482906Sjake	RSF_SPILL_TOPCB
169582906Sjake	RSF_SPILL_TOPCB
169681380Sjake	.endm
169781380Sjake
169891246Sjake	.macro	tl1_spill_3_n
169991246Sjake	wr	%g0, ASI_AIUP, %asi
170092200Sjake	SPILL(stwa, %sp, 4, %asi)
170182906Sjake	saved
170282906Sjake	retry
170382906Sjake	.align	32
170482906Sjake	RSF_SPILL_TOPCB
170582906Sjake	RSF_SPILL_TOPCB
170682906Sjake	.endm
170782906Sjake
170891246Sjake	.macro	tl1_spill_0_o
170982906Sjake	wr	%g0, ASI_AIUP, %asi
171082906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
171182906Sjake	saved
171282906Sjake	retry
171382906Sjake	.align	32
171482906Sjake	RSF_SPILL_TOPCB
171582906Sjake	RSF_SPILL_TOPCB
171682906Sjake	.endm
171782906Sjake
171882906Sjake	.macro	tl1_spill_1_o
171991246Sjake	wr	%g0, ASI_AIUP, %asi
172082906Sjake	SPILL(stwa, %sp, 4, %asi)
172182005Sjake	saved
172282005Sjake	retry
172382906Sjake	.align	32
172482906Sjake	RSF_SPILL_TOPCB
172582906Sjake	RSF_SPILL_TOPCB
172682906Sjake	.endm
172782005Sjake
172882906Sjake	.macro	tl1_spill_2_o
172982906Sjake	RSF_SPILL_TOPCB
173091246Sjake	.align	128
173180709Sjake	.endm
173280709Sjake
173380709Sjake	.macro	tl1_fill_0_n
173482906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
173580709Sjake	restored
173680709Sjake	retry
173782906Sjake	.align	32
173882906Sjake	RSF_FATAL(T_FILL)
173982906Sjake	RSF_FATAL(T_FILL)
174080709Sjake	.endm
174180709Sjake
174291246Sjake	.macro	tl1_fill_2_n
174382906Sjake	wr	%g0, ASI_AIUP, %asi
174482906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
174582906Sjake	restored
174682906Sjake	retry
174782906Sjake	.align 32
174882906Sjake	RSF_FILL_MAGIC
174991246Sjake	RSF_FILL_MAGIC
175082906Sjake	.endm
175182906Sjake
175291246Sjake	.macro	tl1_fill_3_n
175382906Sjake	wr	%g0, ASI_AIUP, %asi
175482906Sjake	FILL(lduwa, %sp, 4, %asi)
175582906Sjake	restored
175682906Sjake	retry
175782906Sjake	.align 32
175882906Sjake	RSF_FILL_MAGIC
175991246Sjake	RSF_FILL_MAGIC
176082906Sjake	.endm
176182906Sjake
176282005Sjake/*
176382906Sjake * This is used to spill windows that are still occupied with user
176482906Sjake * data on kernel entry to the pcb.
176582005Sjake */
176682906SjakeENTRY(tl1_spill_topcb)
176782906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
176882906Sjake
176982005Sjake	/* Free some globals for our use. */
177088644Sjake	dec	24, ASP_REG
177188644Sjake	stx	%g1, [ASP_REG + 0]
177288644Sjake	stx	%g2, [ASP_REG + 8]
177388644Sjake	stx	%g3, [ASP_REG + 16]
177482906Sjake
177588644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
177682906Sjake
177788644Sjake	sllx	%g1, PTR_SHIFT, %g2
177888644Sjake	add	%g2, PCB_REG, %g2
177988644Sjake	stx	%sp, [%g2 + PCB_RWSP]
178082906Sjake
178188644Sjake	sllx	%g1, RW_SHIFT, %g2
178288644Sjake	add	%g2, PCB_REG, %g2
178388644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
178482906Sjake
178588644Sjake	inc	%g1
178688644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
178782906Sjake
178885243Sjake#if KTR_COMPILE & KTR_TRAP
178988785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
179082906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
179182906Sjake	rdpr	%tpc, %g2
179282906Sjake	stx	%g2, [%g1 + KTR_PARM1]
179388785Sjake	rdpr	%tnpc, %g2
179488785Sjake	stx	%g2, [%g1 + KTR_PARM2]
179588785Sjake	stx	%sp, [%g1 + KTR_PARM3]
179688644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
179788785Sjake	stx	%g2, [%g1 + KTR_PARM4]
179882906Sjake9:
179982906Sjake#endif
180082906Sjake
180182906Sjake	saved
180282906Sjake
180388644Sjake	ldx	[ASP_REG + 16], %g3
180488644Sjake	ldx	[ASP_REG + 8], %g2
180588644Sjake	ldx	[ASP_REG + 0], %g1
180688644Sjake	inc	24, ASP_REG
180782005Sjake	retry
180882906SjakeEND(tl1_spill_topcb)
180982005Sjake
181082906Sjake	.macro	tl1_spill_bad	count
181182906Sjake	.rept	\count
181288644Sjake	sir
181388644Sjake	.align	128
181482906Sjake	.endr
181582906Sjake	.endm
181682906Sjake
181780709Sjake	.macro	tl1_fill_bad	count
181880709Sjake	.rept	\count
181988644Sjake	sir
182088644Sjake	.align	128
182180709Sjake	.endr
182280709Sjake	.endm
182380709Sjake
182480709Sjake	.macro	tl1_soft	count
182582906Sjake	.rept	\count
182682906Sjake	tl1_gen	T_SOFT | T_KERNEL
182782906Sjake	.endr
182880709Sjake	.endm
182980709Sjake
183080709Sjake	.sect	.trap
1831155839Smarius	.globl	tl_trap_begin
1832155839Smariustl_trap_begin:
1833155839Smarius	nop
1834155839Smarius
183580709Sjake	.align	0x8000
183680709Sjake	.globl	tl0_base
183780709Sjake
183880709Sjaketl0_base:
183988779Sjake	tl0_reserved	8				! 0x0-0x7
184080709Sjaketl0_insn_excptn:
184188779Sjake	tl0_insn_excptn					! 0x8
184288779Sjake	tl0_reserved	1				! 0x9
184380709Sjaketl0_insn_error:
184488779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
184588779Sjake	tl0_reserved	5				! 0xb-0xf
184680709Sjaketl0_insn_illegal:
184788779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
184880709Sjaketl0_priv_opcode:
184988779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
185088779Sjake	tl0_reserved	14				! 0x12-0x1f
185180709Sjaketl0_fp_disabled:
185288779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
185380709Sjaketl0_fp_ieee:
185488779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
185580709Sjaketl0_fp_other:
185688779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
185780709Sjaketl0_tag_ovflw:
1858154419Skris	tl0_gen		T_TAG_OVERFLOW			! 0x23
185980709Sjaketl0_clean_window:
186088779Sjake	clean_window					! 0x24
186180709Sjaketl0_divide:
186288779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
186388779Sjake	tl0_reserved	7				! 0x29-0x2f
186480709Sjaketl0_data_excptn:
186588779Sjake	tl0_data_excptn					! 0x30
186688779Sjake	tl0_reserved	1				! 0x31
186780709Sjaketl0_data_error:
186888779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
186988779Sjake	tl0_reserved	1				! 0x33
187080709Sjaketl0_align:
187188779Sjake	tl0_align					! 0x34
187280709Sjaketl0_align_lddf:
187388779Sjake	tl0_gen		T_RESERVED			! 0x35
187480709Sjaketl0_align_stdf:
187588779Sjake	tl0_gen		T_RESERVED			! 0x36
187680709Sjaketl0_priv_action:
187788779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
187888779Sjake	tl0_reserved	9				! 0x38-0x40
187980709Sjaketl0_intr_level:
188088779Sjake	tl0_intr_level					! 0x41-0x4f
188188779Sjake	tl0_reserved	16				! 0x50-0x5f
188280709Sjaketl0_intr_vector:
188397265Sjake	intr_vector					! 0x60
188480709Sjaketl0_watch_phys:
188588779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
188680709Sjaketl0_watch_virt:
188788779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
188880709Sjaketl0_ecc:
188988779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
189080709Sjaketl0_immu_miss:
189188779Sjake	tl0_immu_miss					! 0x64
189280709Sjaketl0_dmmu_miss:
189388779Sjake	tl0_dmmu_miss					! 0x68
189480709Sjaketl0_dmmu_prot:
189588779Sjake	tl0_dmmu_prot					! 0x6c
189688779Sjake	tl0_reserved	16				! 0x70-0x7f
189780709Sjaketl0_spill_0_n:
189888779Sjake	tl0_spill_0_n					! 0x80
189982906Sjaketl0_spill_1_n:
190088779Sjake	tl0_spill_1_n					! 0x84
190191246Sjake	tl0_spill_bad	14				! 0x88-0xbf
190280709Sjaketl0_fill_0_n:
190388779Sjake	tl0_fill_0_n					! 0xc0
190482906Sjaketl0_fill_1_n:
190588779Sjake	tl0_fill_1_n					! 0xc4
190691246Sjake	tl0_fill_bad	14				! 0xc8-0xff
190788644Sjaketl0_soft:
1908106050Sjake	tl0_gen		T_SYSCALL			! 0x100
190988779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
191088779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
191188779Sjake	tl0_reserved	1				! 0x103
191288779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
191388779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
191488779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
191588779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
1916106050Sjake	tl0_gen		T_SYSCALL			! 0x108
1917106050Sjake	tl0_gen		T_SYSCALL			! 0x109
191888779Sjake	tl0_fp_restore					! 0x10a
191988779Sjake	tl0_reserved	5				! 0x10b-0x10f
192088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
192188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
192288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
192388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
192488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
192588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
192688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
192788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
192888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
192988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
193088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
193188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
193288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
193388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
193488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
193588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
1936106050Sjake	tl0_reserved	32				! 0x120-0x13f
1937106050Sjake	tl0_gen		T_SYSCALL			! 0x140
1938106050Sjake	tl0_syscall					! 0x141
1939106050Sjake	tl0_gen		T_SYSCALL			! 0x142
1940106050Sjake	tl0_gen		T_SYSCALL			! 0x143
1941106050Sjake	tl0_reserved	188				! 0x144-0x1ff
194280709Sjake
194380709Sjaketl1_base:
194488779Sjake	tl1_reserved	8				! 0x200-0x207
194580709Sjaketl1_insn_excptn:
194688779Sjake	tl1_insn_excptn					! 0x208
194788779Sjake	tl1_reserved	1				! 0x209
194880709Sjaketl1_insn_error:
194988779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
195088779Sjake	tl1_reserved	5				! 0x20b-0x20f
195180709Sjaketl1_insn_illegal:
195288779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
195380709Sjaketl1_priv_opcode:
195488779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
195588779Sjake	tl1_reserved	14				! 0x212-0x21f
195680709Sjaketl1_fp_disabled:
1957113024Sjake	tl1_fp_disabled					! 0x220
195880709Sjaketl1_fp_ieee:
195988779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
196080709Sjaketl1_fp_other:
196188779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
196280709Sjaketl1_tag_ovflw:
1963154419Skris	tl1_gen		T_TAG_OVERFLOW			! 0x223
196480709Sjaketl1_clean_window:
196588779Sjake	clean_window					! 0x224
196680709Sjaketl1_divide:
196788779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
196888779Sjake	tl1_reserved	7				! 0x229-0x22f
196980709Sjaketl1_data_excptn:
197088779Sjake	tl1_data_excptn					! 0x230
197188779Sjake	tl1_reserved	1				! 0x231
197280709Sjaketl1_data_error:
197388779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
197488779Sjake	tl1_reserved	1				! 0x233
197580709Sjaketl1_align:
197688779Sjake	tl1_align					! 0x234
197780709Sjaketl1_align_lddf:
197888779Sjake	tl1_gen		T_RESERVED			! 0x235
197980709Sjaketl1_align_stdf:
198088779Sjake	tl1_gen		T_RESERVED			! 0x236
198180709Sjaketl1_priv_action:
198288779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
198388779Sjake	tl1_reserved	9				! 0x238-0x240
198480709Sjaketl1_intr_level:
198588779Sjake	tl1_intr_level					! 0x241-0x24f
198688779Sjake	tl1_reserved	16				! 0x250-0x25f
198780709Sjaketl1_intr_vector:
198897265Sjake	intr_vector					! 0x260
198980709Sjaketl1_watch_phys:
199088779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
199180709Sjaketl1_watch_virt:
199288779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
199380709Sjaketl1_ecc:
199488779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
199580709Sjaketl1_immu_miss:
199688779Sjake	tl1_immu_miss					! 0x264
199780709Sjaketl1_dmmu_miss:
199888779Sjake	tl1_dmmu_miss					! 0x268
199980709Sjaketl1_dmmu_prot:
200088779Sjake	tl1_dmmu_prot					! 0x26c
200188779Sjake	tl1_reserved	16				! 0x270-0x27f
200280709Sjaketl1_spill_0_n:
200388779Sjake	tl1_spill_0_n					! 0x280
200491246Sjake	tl1_spill_bad	1				! 0x284
200591246Sjaketl1_spill_2_n:
200691246Sjake	tl1_spill_2_n					! 0x288
200791246Sjaketl1_spill_3_n:
200891246Sjake	tl1_spill_3_n					! 0x29c
200991246Sjake	tl1_spill_bad	4				! 0x290-0x29f
201081380Sjaketl1_spill_0_o:
201188779Sjake	tl1_spill_0_o					! 0x2a0
201282906Sjaketl1_spill_1_o:
201388779Sjake	tl1_spill_1_o					! 0x2a4
201482906Sjaketl1_spill_2_o:
201588779Sjake	tl1_spill_2_o					! 0x2a8
201691246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
201780709Sjaketl1_fill_0_n:
201888779Sjake	tl1_fill_0_n					! 0x2c0
201991246Sjake	tl1_fill_bad	1				! 0x2c4
202091246Sjaketl1_fill_2_n:
202191246Sjake	tl1_fill_2_n					! 0x2d0
202291246Sjaketl1_fill_3_n:
202391246Sjake	tl1_fill_3_n					! 0x2d4
202491246Sjake	tl1_fill_bad	12				! 0x2d8-0x2ff
202588779Sjake	tl1_reserved	1				! 0x300
202680709Sjaketl1_breakpoint:
202788779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
202888779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
202988779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
203088779Sjake	tl1_reserved	252				! 0x304-0x3ff
203180709Sjake
2032155839Smarius	.globl	tl_trap_end
2033155839Smariustl_trap_end:
2034155839Smarius	nop
2035155839Smarius
203681380Sjake/*
2037181701Smarius * User trap entry point
203882906Sjake *
2039103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2040181701Smarius *     u_long sfsr)
2041103897Sjake *
2042103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
2043103897Sjake * program must have first registered a trap handler with the kernel using
2044103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2045103897Sjake * for it to return to the trapping code directly, it will not return through
2046103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2047103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2048103897Sjake * parameters passed in out registers may be used by the user trap handler.
2049103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2050103897Sjake *
2051103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2052103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2053103897Sjake */
2054103897SjakeENTRY(tl0_utrap)
2055103897Sjake	/*
2056103897Sjake	 * Check if the trap type allows user traps.
2057103897Sjake	 */
2058103897Sjake	cmp	%o0, UT_MAX
2059103897Sjake	bge,a,pt %xcc, tl0_trap
2060103897Sjake	 nop
2061103897Sjake
2062103897Sjake	/*
2063103897Sjake	 * Load the user trap handler from the utrap table.
2064103897Sjake	 */
2065103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2066103897Sjake	ldx	[%l0 + TD_PROC], %l0
2067103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2068103897Sjake	brz,pt	%l0, tl0_trap
2069103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2070103897Sjake	ldx	[%l0 + %l1], %l0
2071103897Sjake	brz,a,pt %l0, tl0_trap
2072103897Sjake	 nop
2073103897Sjake
2074103897Sjake	/*
2075103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2076103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2077103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2078103897Sjake	 * not be able to find them, since the user trap handler returns
2079103897Sjake	 * directly to the trapping code.  Note that we only support precise
2080103897Sjake	 * user traps, which implies that the condition that caused the trap
2081103897Sjake	 * in the first place is still valid, so it will occur again when we
2082103897Sjake	 * re-execute the trapping instruction.
2083181701Smarius	 */
2084103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2085103897Sjake	brnz,a,pn %l1, tl0_trap
2086103897Sjake	 mov	T_SPILL, %o0
2087103897Sjake
2088103897Sjake	/*
2089103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2090103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2091103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2092103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2093103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2094103897Sjake	 * temporary stack for that.
2095103897Sjake	 */
2096103897Sjake	rd	%fprs, %l1
2097103897Sjake	or	%l1, FPRS_FEF, %l2
2098103897Sjake	wr	%l2, 0, %fprs
2099103897Sjake	dec	8, ASP_REG
2100103897Sjake	stx	%fsr, [ASP_REG]
2101103897Sjake	ldx	[ASP_REG], %l4
2102103897Sjake	inc	8, ASP_REG
2103103897Sjake	wr	%l1, 0, %fprs
2104103897Sjake
2105103897Sjake	rdpr	%tstate, %l5
2106103897Sjake	rdpr	%tpc, %l6
2107103897Sjake	rdpr	%tnpc, %l7
2108103897Sjake
2109103897Sjake	/*
2110103897Sjake	 * Setup %tnpc to return to.
2111103897Sjake	 */
2112103897Sjake	wrpr	%l0, 0, %tnpc
2113103897Sjake
2114103897Sjake	/*
2115103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2116103897Sjake	 */
2117103897Sjake	rdpr	%wstate, %l1
2118103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2119103897Sjake	wrpr	%l1, 0, %wstate
2120103897Sjake
2121103897Sjake	/*
2122103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2123103897Sjake	 * current window instead of the window at the time of the trap.
2124103897Sjake	 */
2125103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2126103897Sjake	rdpr	%cwp, %l2
2127103897Sjake	wrpr	%l1, %l2, %tstate
2128103897Sjake
2129103897Sjake	/*
2130103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2131103897Sjake	 */
2132103897Sjake	sub	%fp, CCFSZ, %sp
2133103897Sjake
2134103897Sjake	/*
2135103897Sjake	 * Execute the user trap handler.
2136103897Sjake	 */
2137103897Sjake	done
2138103897SjakeEND(tl0_utrap)
2139103897Sjake
2140103897Sjake/*
2141181701Smarius * (Real) User trap entry point
2142103897Sjake *
2143181701Smarius * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2144181701Smarius *     u_int sfsr)
214582906Sjake *
214682906Sjake * The following setup has been performed:
214782906Sjake *	- the windows have been split and the active user window has been saved
214882906Sjake *	  (maybe just to the pcb)
214982906Sjake *	- we are on alternate globals and interrupts are disabled
215082906Sjake *
215189050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
215288644Sjake * globals, enable interrupts and call trap.
215382906Sjake *
215482906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
215582906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
215682906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
215787702Sjhb * of cpu migration and using the wrong pcpup.
215881380Sjake */
215982005SjakeENTRY(tl0_trap)
216082906Sjake	/*
216182906Sjake	 * Force kernel store order.
216282906Sjake	 */
216382906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
216480709Sjake
216581380Sjake	rdpr	%tstate, %l0
216688644Sjake	rdpr	%tpc, %l1
216788644Sjake	rdpr	%tnpc, %l2
216888644Sjake	rd	%y, %l3
216988644Sjake	rd	%fprs, %l4
217088644Sjake	rdpr	%wstate, %l5
217188644Sjake
217288644Sjake#if KTR_COMPILE & KTR_TRAP
217388644Sjake	CATR(KTR_TRAP,
217488644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
217588644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
217688644Sjake	ldx	[PCPU(CURTHREAD)], %g2
217788644Sjake	stx	%g2, [%g1 + KTR_PARM1]
217888644Sjake	stx	%o0, [%g1 + KTR_PARM2]
217988644Sjake	rdpr	%pil, %g2
218088644Sjake	stx	%g2, [%g1 + KTR_PARM3]
218188644Sjake	stx	%l1, [%g1 + KTR_PARM4]
218288644Sjake	stx	%l2, [%g1 + KTR_PARM5]
218388644Sjake	stx	%i6, [%g1 + KTR_PARM6]
218488644Sjake9:
218588644Sjake#endif
218688644Sjake
2187103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2188103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
218988644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
219088644Sjake	rdpr	%canrestore, %l6
219188644Sjake	wrpr	%l6, 0, %otherwin
219288644Sjake	wrpr	%g0, 0, %canrestore
219388644Sjake
219488644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
219588644Sjake
2196105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2197105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
219888644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
219988644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2200105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
220188644Sjake
220281380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
220381380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
220481380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2205105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2206105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2207105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
220881380Sjake
220988644Sjake	wr	%g0, FPRS_FEF, %fprs
221088644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2211108379Sjake	rd	%gsr, %l6
2212105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
221388644Sjake	wr	%g0, 0, %fprs
221482906Sjake
221589050Sjake	mov	PCB_REG, %l0
221689050Sjake	mov	PCPU_REG, %l1
221782906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
221882005Sjake
221982005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
222082005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
222182005Sjake
222289050Sjake	mov	%l0, PCB_REG
222389050Sjake	mov	%l1, PCPU_REG
222488644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
222584186Sjake
222684186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
222784186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
222884186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
222984186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
223084186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
223184186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
223284186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
223384186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
223484186Sjake
2235108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2236108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2237108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2238108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2239108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2240108377Sjake
2241103921Sjake	set	tl0_ret - 8, %o7
2242103921Sjake	jmpl	%o2, %g0
224384186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
224484186SjakeEND(tl0_trap)
224584186Sjake
224688644Sjake/*
224791246Sjake * void tl0_intr(u_int level, u_int mask)
224891246Sjake */
224984186SjakeENTRY(tl0_intr)
225084186Sjake	/*
225184186Sjake	 * Force kernel store order.
225284186Sjake	 */
225384186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
225484186Sjake
225584186Sjake	rdpr	%tstate, %l0
225688644Sjake	rdpr	%tpc, %l1
225788644Sjake	rdpr	%tnpc, %l2
225888644Sjake	rd	%y, %l3
225988644Sjake	rd	%fprs, %l4
226088644Sjake	rdpr	%wstate, %l5
226188644Sjake
226288644Sjake#if KTR_COMPILE & KTR_INTR
226388644Sjake	CATR(KTR_INTR,
226491246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
226588644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
226688644Sjake	ldx	[PCPU(CURTHREAD)], %g2
226788644Sjake	stx	%g2, [%g1 + KTR_PARM1]
226888644Sjake	stx	%o0, [%g1 + KTR_PARM2]
226988644Sjake	rdpr	%pil, %g2
227088644Sjake	stx	%g2, [%g1 + KTR_PARM3]
227188644Sjake	stx	%l1, [%g1 + KTR_PARM4]
227288644Sjake	stx	%l2, [%g1 + KTR_PARM5]
227388644Sjake	stx	%i6, [%g1 + KTR_PARM6]
227488644Sjake9:
227588644Sjake#endif
227688644Sjake
227791246Sjake	wrpr	%o0, 0, %pil
2278108379Sjake	wr	%o1, 0, %clear_softint
227991246Sjake
228088644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
228188644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
228288644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
228388644Sjake	rdpr	%canrestore, %l6
228488644Sjake	wrpr	%l6, 0, %otherwin
228588644Sjake	wrpr	%g0, 0, %canrestore
228688644Sjake
228788644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
228888644Sjake
228984186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
229084186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
229184186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2292105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2293105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2294105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
229581380Sjake
229688644Sjake	wr	%g0, FPRS_FEF, %fprs
229788644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2298108379Sjake	rd	%gsr, %l6
2299105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
230088644Sjake	wr	%g0, 0, %fprs
230184186Sjake
230291246Sjake	mov	%o0, %l3
230391246Sjake	mov	T_INTERRUPT, %o1
230489050Sjake
2305105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2306105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
230788644Sjake
230889050Sjake	mov	PCB_REG, %l0
230989050Sjake	mov	PCPU_REG, %l1
231084186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
231184186Sjake
231284186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
231384186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
231484186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
231584186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
231684186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
231784186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
231884186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
231984186Sjake
232089050Sjake	mov	%l0, PCB_REG
232189050Sjake	mov	%l1, PCPU_REG
232288644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
232384186Sjake
232484186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
232584186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
232684186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
232784186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
232884186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
232984186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
233084186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
233184186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
233284186Sjake
2333157825Smarius	SET(intr_handlers, %l1, %l0)
2334157825Smarius	sllx	%l3, IH_SHIFT, %l1
2335157825Smarius	ldx	[%l0 + %l1], %l1
2336157825Smarius	KASSERT(%l1, "tl0_intr: ih null")
2337157825Smarius	call	%l1
2338157825Smarius	 add	%sp, CCFSZ + SPOFF, %o0
2339157825Smarius
2340117658Sjmg	/* %l3 contains PIL */
2341117658Sjmg	SET(intrcnt, %l1, %l2)
2342117658Sjmg	prefetcha [%l2] ASI_N, 1
2343117658Sjmg	SET(pil_countp, %l1, %l0)
2344117658Sjmg	sllx	%l3, 1, %l1
2345117658Sjmg	lduh	[%l0 + %l1], %l0
2346117658Sjmg	sllx	%l0, 3, %l0
2347117658Sjmg	add	%l0, %l2, %l0
2348145153Smarius	ldx	[%l0], %l1
2349145153Smarius	inc	%l1
2350145153Smarius	stx	%l1, [%l0]
2351117658Sjmg
2352145153Smarius	lduw	[PCPU(CNT) + V_INTR], %l0
2353145153Smarius	inc	%l0
2354145153Smarius	stw	%l0, [PCPU(CNT) + V_INTR]
235584186Sjake
2356116589Sjake	ba,a	%xcc, tl0_ret
235784186Sjake	 nop
235884186SjakeEND(tl0_intr)
235984186Sjake
2360105733Sjake/*
2361105733Sjake * Initiate return to usermode.
2362105733Sjake *
2363105733Sjake * Called with a trapframe on the stack.  The window that was setup in
2364105733Sjake * tl0_trap may have been used by "fast" trap handlers that pretend to be
2365105733Sjake * leaf functions, so all ins and locals may have been clobbered since
2366105733Sjake * then.
2367105733Sjake *
2368105733Sjake * This code is rather long and complicated.
2369105733Sjake */
237082005SjakeENTRY(tl0_ret)
237193389Sjake	/*
237293389Sjake	 * Check for pending asts atomically with returning.  We must raise
2373182020Smarius	 * the PIL before checking, and if no asts are found the PIL must
237493389Sjake	 * remain raised until the retry is executed, or we risk missing asts
2375182020Smarius	 * caused by interrupts occuring after the test.  If the PIL is
2376182020Smarius	 * lowered, as it is when we call ast, the check must be re-executed.
237793389Sjake	 */
2378103784Sjake	wrpr	%g0, PIL_TICK, %pil
237984186Sjake	ldx	[PCPU(CURTHREAD)], %l0
2380111032Sjulian	lduw	[%l0 + TD_FLAGS], %l1
2381111032Sjulian	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2382111032Sjulian	and	%l1, %l2, %l1
2383111032Sjulian	brz,a,pt %l1, 1f
238482906Sjake	 nop
2385105733Sjake
2386105733Sjake	/*
2387182020Smarius	 * We have an AST.  Re-enable interrupts and handle it, then restart
2388105733Sjake	 * the return sequence.
2389105733Sjake	 */
239093389Sjake	wrpr	%g0, 0, %pil
239182906Sjake	call	ast
239282906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2393103784Sjake	ba,a	%xcc, tl0_ret
239493389Sjake	 nop
239582906Sjake
239693389Sjake	/*
239793389Sjake	 * Check for windows that were spilled to the pcb and need to be
239893389Sjake	 * copied out.  This must be the last thing that is done before the
239993389Sjake	 * return to usermode.  If there are still user windows in the cpu
240093389Sjake	 * and we call a nested function after this, which causes them to be
240193389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
240293389Sjake	 * be inconsistent.
240393389Sjake	 */
2404103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2405103784Sjake	brz,a,pt %l1, 2f
2406103784Sjake	 nop
2407103784Sjake	wrpr	%g0, 0, %pil
240893389Sjake	mov	T_SPILL, %o0
2409105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2410103784Sjake	call	trap
2411103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2412103784Sjake	ba,a	%xcc, tl0_ret
2413103784Sjake	 nop
241482906Sjake
2415105733Sjake	/*
2416108377Sjake	 * Restore the out and most global registers from the trapframe.
2417108377Sjake	 * The ins will become the outs when we restore below.
2418105733Sjake	 */
2419103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
242082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
242182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
242282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
242382906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
242482906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
242582906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
242682906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
242781380Sjake
2428108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2429108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2430108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2431108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2432108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2433108377Sjake
2434105733Sjake	/*
2435105733Sjake	 * Load everything we need to restore below before disabling
2436105733Sjake	 * interrupts.
2437105733Sjake	 */
2438105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2439105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
244085243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2441105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2442105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2443105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2444105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
244582906Sjake
2446105733Sjake	/*
2447108377Sjake	 * Disable interrupts to restore the special globals.  They are not
2448108377Sjake	 * saved and restored for all kernel traps, so an interrupt at the
2449108377Sjake	 * wrong time would clobber them.
2450105733Sjake	 */
245189050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
245289050Sjake
245389050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
245489050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
245589050Sjake
2456105733Sjake	/*
2457105733Sjake	 * Switch to alternate globals.  This frees up some registers we
2458105733Sjake	 * can use after the restore changes our window.
2459105733Sjake	 */
246082906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
246182906Sjake
2462105733Sjake	/*
2463105733Sjake	 * Drop %pil to zero.  It must have been zero at the time of the
2464105733Sjake	 * trap, since we were in usermode, but it was raised above in
2465105733Sjake	 * order to check for asts atomically.  We have interrupts disabled
2466105733Sjake	 * so any interrupts will not be serviced until we complete the
2467105733Sjake	 * return to usermode.
2468105733Sjake	 */
246988644Sjake	wrpr	%g0, 0, %pil
2470105733Sjake
2471105733Sjake	/*
2472105733Sjake	 * Save %fprs in an alternate global so it can be restored after the
2473105733Sjake	 * restore instruction below.  If we restore it before the restore,
2474105733Sjake	 * and the restore traps we may run for a while with floating point
2475105733Sjake	 * enabled in the kernel, which we want to avoid.
2476105733Sjake	 */
2477105733Sjake	mov	%l0, %g1
2478105733Sjake
2479105733Sjake	/*
2480105733Sjake	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2481105733Sjake	 * so we set it temporarily and then clear it.
2482105733Sjake	 */
2483105733Sjake	wr	%g0, FPRS_FEF, %fprs
2484105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2485108379Sjake	wr	%l1, 0, %gsr
2486105733Sjake	wr	%g0, 0, %fprs
2487105733Sjake
2488105733Sjake	/*
2489105733Sjake	 * Restore program counters.  This could be done after the restore
2490105733Sjake	 * but we're out of alternate globals to store them in...
2491105733Sjake	 */
249288644Sjake	wrpr	%l2, 0, %tnpc
2493105733Sjake	wrpr	%l3, 0, %tpc
249482906Sjake
2495105733Sjake	/*
2496105733Sjake	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2497105733Sjake	 * will be affected by the restore below and we need to make sure it
2498105733Sjake	 * points to the current window at that time, not the window that was
2499105733Sjake	 * active at the time of the trap.
2500105733Sjake	 */
2501105733Sjake	andn	%l4, TSTATE_CWP_MASK, %g2
250282906Sjake
2503105733Sjake	/*
2504105733Sjake	 * Restore %y.  Could also be below if we had more alternate globals.
2505105733Sjake	 */
2506105733Sjake	wr	%l5, 0, %y
2507105733Sjake
2508105733Sjake	/*
2509105733Sjake	 * Setup %wstate for return.  We need to restore the user window state
2510105733Sjake	 * which we saved in wstate.other when we trapped.  We also need to
2511105733Sjake	 * set the transition bit so the restore will be handled specially
2512105733Sjake	 * if it traps, use the xor feature of wrpr to do that.
2513105733Sjake	 */
2514105733Sjake	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
251588644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
2516105733Sjake
2517105733Sjake	/*
2518105733Sjake	 * Setup window management registers for return.  If not all user
2519105733Sjake	 * windows were spilled in the kernel %otherwin will be non-zero,
2520105733Sjake	 * so we need to transfer it to %canrestore to correctly restore
2521105733Sjake	 * those windows.  Otherwise everything gets set to zero and the
2522105733Sjake	 * restore below will fill a window directly from the user stack.
2523105733Sjake	 */
252488644Sjake	rdpr	%otherwin, %o0
252588644Sjake	wrpr	%o0, 0, %canrestore
252682906Sjake	wrpr	%g0, 0, %otherwin
252788644Sjake	wrpr	%o0, 0, %cleanwin
252881380Sjake
252982005Sjake	/*
2530105733Sjake	 * Now do the restore.  If this instruction causes a fill trap which
2531105733Sjake	 * fails to fill a window from the user stack, we will resume at
2532105733Sjake	 * tl0_ret_fill_end and call back into the kernel.
253382005Sjake	 */
253482906Sjake	restore
253582906Sjaketl0_ret_fill:
253681380Sjake
2537105733Sjake	/*
2538105733Sjake	 * We made it.  We're back in the window that was active at the time
2539105733Sjake	 * of the trap, and ready to return to usermode.
2540105733Sjake	 */
2541105733Sjake
2542105733Sjake	/*
2543105733Sjake	 * Restore %frps.  This was saved in an alternate global above.
2544105733Sjake	 */
2545105733Sjake	wr	%g1, 0, %fprs
2546105733Sjake
2547105733Sjake	/*
2548105733Sjake	 * Fixup %tstate so the saved %cwp points to the current window and
2549105733Sjake	 * restore it.
2550105733Sjake	 */
255188644Sjake	rdpr	%cwp, %g4
2552105733Sjake	wrpr	%g2, %g4, %tstate
2553105733Sjake
2554105733Sjake	/*
2555105733Sjake	 * Restore the user window state.  The transition bit was set above
2556105733Sjake	 * for special handling of the restore, this clears it.
2557105733Sjake	 */
255888644Sjake	wrpr	%g3, 0, %wstate
255985243Sjake
256084186Sjake#if KTR_COMPILE & KTR_TRAP
256188644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
256282906Sjake	    , %g2, %g3, %g4, 7, 8, 9)
256383366Sjulian	ldx	[PCPU(CURTHREAD)], %g3
256482906Sjake	stx	%g3, [%g2 + KTR_PARM1]
256585243Sjake	rdpr	%pil, %g3
256685243Sjake	stx	%g3, [%g2 + KTR_PARM2]
256788644Sjake	rdpr	%tpc, %g3
256884186Sjake	stx	%g3, [%g2 + KTR_PARM3]
256988644Sjake	rdpr	%tnpc, %g3
257084186Sjake	stx	%g3, [%g2 + KTR_PARM4]
257184186Sjake	stx	%sp, [%g2 + KTR_PARM5]
257282906Sjake9:
257382906Sjake#endif
257481380Sjake
2575105733Sjake	/*
2576105733Sjake	 * Return to usermode.
2577105733Sjake	 */
257882906Sjake	retry
257982906Sjaketl0_ret_fill_end:
258082005Sjake
258184186Sjake#if KTR_COMPILE & KTR_TRAP
258288785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
258382906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
258488785Sjake	rdpr	%pstate, %l1
258588785Sjake	stx	%l1, [%l0 + KTR_PARM1]
258688785Sjake	stx	%l5, [%l0 + KTR_PARM2]
258788785Sjake	stx	%sp, [%l0 + KTR_PARM3]
258882906Sjake9:
258982906Sjake#endif
259082906Sjake
259182906Sjake	/*
2592105733Sjake	 * The restore above caused a fill trap and the fill handler was
2593105733Sjake	 * unable to fill a window from the user stack.  The special fill
2594105733Sjake	 * handler recognized this and punted, sending us here.  We need
2595105733Sjake	 * to carefully undo any state that was restored before the restore
2596105733Sjake	 * was executed and call trap again.  Trap will copyin a window
2597105733Sjake	 * from the user stack which will fault in the page we need so the
2598105733Sjake	 * restore above will succeed when we try again.  If this fails
2599105733Sjake	 * the process has trashed its stack, so we kill it.
260082906Sjake	 */
2601105733Sjake
2602105733Sjake	/*
2603105733Sjake	 * Restore the kernel window state.  This was saved in %l6 above, and
2604105733Sjake	 * since the restore failed we're back in the same window.
2605105733Sjake	 */
2606105733Sjake	wrpr	%l6, 0, %wstate
2607105733Sjake
2608105733Sjake	/*
2609105733Sjake	 * Restore the normal globals which have predefined values in the
2610105733Sjake	 * kernel.  We clobbered them above restoring the user's globals
2611105733Sjake	 * so this is very important.
2612105733Sjake	 * XXX PSTATE_ALT must already be set.
2613105733Sjake	 */
261488785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
261589050Sjake	mov	PCB_REG, %o0
261689050Sjake	mov	PCPU_REG, %o1
261788785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
261889050Sjake	mov	%o0, PCB_REG
261989050Sjake	mov	%o1, PCPU_REG
262088644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2621105733Sjake
2622105733Sjake	/*
2623105733Sjake	 * Simulate a fill trap and then start the whole return sequence over
2624105733Sjake	 * again.  This is special because it only copies in 1 window, not 2
2625105733Sjake	 * as we would for a normal failed fill.  This may be the first time
2626105733Sjake	 * the process has been run, so there may not be 2 windows worth of
2627105733Sjake	 * stack to copyin.
2628105733Sjake	 */
2629103784Sjake	mov	T_FILL_RET, %o0
2630105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2631103784Sjake	call	trap
2632103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2633103784Sjake	ba,a	%xcc, tl0_ret
2634103784Sjake	 nop
263582005SjakeEND(tl0_ret)
263681380Sjake
263780709Sjake/*
263882906Sjake * Kernel trap entry point
263982906Sjake *
264091246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2641181701Smarius *      u_int sfsr)
264282906Sjake *
264382906Sjake * This is easy because the stack is already setup and the windows don't need
264482906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
264582906Sjake * the outs don't need to be saved.
264680709Sjake */
264780709SjakeENTRY(tl1_trap)
264880709Sjake	rdpr	%tstate, %l0
264980709Sjake	rdpr	%tpc, %l1
265080709Sjake	rdpr	%tnpc, %l2
265191246Sjake	rdpr	%pil, %l3
265291316Sjake	rd	%y, %l4
265391316Sjake	rdpr	%wstate, %l5
265480709Sjake
265584186Sjake#if KTR_COMPILE & KTR_TRAP
265688644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
265788644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
265888644Sjake	ldx	[PCPU(CURTHREAD)], %g2
265988644Sjake	stx	%g2, [%g1 + KTR_PARM1]
266097265Sjake	stx	%o0, [%g1 + KTR_PARM2]
266191246Sjake	stx	%l3, [%g1 + KTR_PARM3]
266288644Sjake	stx	%l1, [%g1 + KTR_PARM4]
266388644Sjake	stx	%i6, [%g1 + KTR_PARM5]
266482906Sjake9:
266582906Sjake#endif
266682906Sjake
266780709Sjake	wrpr	%g0, 1, %tl
266888644Sjake
266991316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
267091316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
267191246Sjake
2672105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2673105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2674103919Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2675103919Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2676105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2677103919Sjake
267888644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
267988644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
268088644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2681105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2682105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
268388644Sjake
2684103919Sjake	mov	PCB_REG, %l0
2685103919Sjake	mov	PCPU_REG, %l1
268691158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
268791158Sjake
2688108377Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2689108377Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
269080709Sjake
2691103919Sjake	mov	%l0, PCB_REG
2692103919Sjake	mov	%l1, PCPU_REG
269391158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
269491158Sjake
2695103919Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2696103919Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2697103919Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2698103919Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2699103919Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2700103919Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2701103919Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2702103919Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2703103919Sjake
2704108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2705108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2706108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2707108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2708108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2709108377Sjake
2710103921Sjake	set	tl1_ret - 8, %o7
2711103921Sjake	jmpl	%o2, %g0
271280709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2713103921SjakeEND(tl1_trap)
271480709Sjake
2715103921SjakeENTRY(tl1_ret)
2716103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2717103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2718103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2719103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2720103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2721103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2722103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2723103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2724103919Sjake
2725108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2726108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2727108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2728108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2729108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2730108377Sjake
273188644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
273288644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
273388644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2734105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2735105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
273688644Sjake
2737108377Sjake	set	VM_MIN_PROM_ADDRESS, %l5
2738108377Sjake	cmp	%l1, %l5
2739108377Sjake	bl,a,pt	%xcc, 1f
2740108377Sjake	 nop
2741182774Smarius	set	VM_MAX_PROM_ADDRESS, %l5
2742182774Smarius	cmp	%l1, %l5
2743182774Smarius	bg,a,pt	%xcc, 1f
2744182774Smarius	 nop
274580709Sjake
2746108377Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
274780709Sjake
2748108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2749108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2750108377Sjake
2751108377Sjake1:	wrpr	%g0, PSTATE_ALT, %pstate
2752108377Sjake
275388644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
275486519Sjake	mov	%l1, %g2
275586519Sjake	mov	%l2, %g3
275681380Sjake
275788644Sjake	wrpr	%l3, 0, %pil
275891316Sjake	wr	%l4, 0, %y
275986519Sjake
276086519Sjake	restore
276186519Sjake
276280709Sjake	wrpr	%g0, 2, %tl
276380709Sjake
276488644Sjake	rdpr	%cwp, %g4
276588644Sjake	wrpr	%g1, %g4, %tstate
276686519Sjake	wrpr	%g2, 0, %tpc
276786519Sjake	wrpr	%g3, 0, %tnpc
276886519Sjake
276984186Sjake#if KTR_COMPILE & KTR_TRAP
2770103921Sjake	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
277186519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
277286519Sjake	ldx	[PCPU(CURTHREAD)], %g3
277386519Sjake	stx	%g3, [%g2 + KTR_PARM1]
277486519Sjake	rdpr	%pil, %g3
277586519Sjake	stx	%g3, [%g2 + KTR_PARM2]
277686519Sjake	rdpr	%tstate, %g3
277786519Sjake	stx	%g3, [%g2 + KTR_PARM3]
277886519Sjake	rdpr	%tpc, %g3
277986519Sjake	stx	%g3, [%g2 + KTR_PARM4]
278086519Sjake	stx	%sp, [%g2 + KTR_PARM5]
278182906Sjake9:
278282906Sjake#endif
278382906Sjake
278480709Sjake	retry
2785103921SjakeEND(tl1_ret)
278680709Sjake
278791246Sjake/*
278891246Sjake * void tl1_intr(u_int level, u_int mask)
278991246Sjake */
279084186SjakeENTRY(tl1_intr)
279184186Sjake	rdpr	%tstate, %l0
279284186Sjake	rdpr	%tpc, %l1
279384186Sjake	rdpr	%tnpc, %l2
279491246Sjake	rdpr	%pil, %l3
279591316Sjake	rd	%y, %l4
279691316Sjake	rdpr	%wstate, %l5
279784186Sjake
279884186Sjake#if KTR_COMPILE & KTR_INTR
279989050Sjake	CATR(KTR_INTR,
2800145153Smarius	    "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
280188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
280288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
280388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
280491246Sjake	stx	%o0, [%g1 + KTR_PARM2]
280591246Sjake	stx	%l3, [%g1 + KTR_PARM3]
280691246Sjake	stx	%l1, [%g1 + KTR_PARM4]
280791246Sjake	stx	%i6, [%g1 + KTR_PARM5]
280884186Sjake9:
280984186Sjake#endif
281084186Sjake
281191246Sjake	wrpr	%o0, 0, %pil
2812108379Sjake	wr	%o1, 0, %clear_softint
281391246Sjake
281484186Sjake	wrpr	%g0, 1, %tl
281588644Sjake
281691316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
281791316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
281891246Sjake
281988644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
282088644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
282188644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2822105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2823105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
282488644Sjake
282591246Sjake	mov	%o0, %l7
282691246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
282789050Sjake
2828105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2829105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
283088644Sjake
283188644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
283288644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
283388644Sjake
283491158Sjake	mov	PCB_REG, %l4
283591158Sjake	mov	PCPU_REG, %l5
283691158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
283791158Sjake
283884186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
283984186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
284084186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
284184186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
284284186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
284384186Sjake
284491158Sjake	mov	%l4, PCB_REG
284591158Sjake	mov	%l5, PCPU_REG
284691158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
284791158Sjake
2848157825Smarius	SET(intr_handlers, %l5, %l4)
2849157825Smarius	sllx	%l7, IH_SHIFT, %l5
2850157825Smarius	ldx	[%l4 + %l5], %l5
2851157825Smarius	KASSERT(%l5, "tl1_intr: ih null")
2852157825Smarius	call	%l5
2853157825Smarius	 add	%sp, CCFSZ + SPOFF, %o0
2854157825Smarius
2855145153Smarius	/* %l7 contains PIL */
2856117658Sjmg	SET(intrcnt, %l5, %l4)
2857117658Sjmg	prefetcha [%l4] ASI_N, 1
2858117658Sjmg	SET(pil_countp, %l5, %l6)
2859117658Sjmg	sllx	%l7, 1, %l5
2860117658Sjmg	lduh	[%l5 + %l6], %l5
2861117658Sjmg	sllx	%l5, 3, %l5
2862117658Sjmg	add	%l5, %l4, %l4
2863145153Smarius	ldx	[%l4], %l5
2864145153Smarius	inc	%l5
2865145153Smarius	stx	%l5, [%l4]
2866117658Sjmg
2867145153Smarius	lduw	[PCPU(CNT) + V_INTR], %l4
2868145153Smarius	inc	%l4
2869145153Smarius	stw	%l4, [PCPU(CNT) + V_INTR]
287088644Sjake
2871105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
287291316Sjake
287384186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
287484186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
287584186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
287684186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
287784186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
287884186Sjake
287984186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
288084186Sjake
288188644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
288286519Sjake	mov	%l1, %g2
288386519Sjake	mov	%l2, %g3
288488644Sjake	wrpr	%l3, 0, %pil
288591316Sjake	wr	%l4, 0, %y
288684186Sjake
288786519Sjake	restore
288886519Sjake
288984186Sjake	wrpr	%g0, 2, %tl
289084186Sjake
289188644Sjake	rdpr	%cwp, %g4
289288644Sjake	wrpr	%g1, %g4, %tstate
289386519Sjake	wrpr	%g2, 0, %tpc
289486519Sjake	wrpr	%g3, 0, %tnpc
289586519Sjake
289688644Sjake#if KTR_COMPILE & KTR_INTR
2897145153Smarius	CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
289886519Sjake	    , %g2, %g3, %g4, 7, 8, 9)
289986519Sjake	ldx	[PCPU(CURTHREAD)], %g3
290086519Sjake	stx	%g3, [%g2 + KTR_PARM1]
290186519Sjake	rdpr	%pil, %g3
290286519Sjake	stx	%g3, [%g2 + KTR_PARM2]
290386519Sjake	rdpr	%tstate, %g3
290486519Sjake	stx	%g3, [%g2 + KTR_PARM3]
290586519Sjake	rdpr	%tpc, %g3
290686519Sjake	stx	%g3, [%g2 + KTR_PARM4]
290786519Sjake	stx	%sp, [%g2 + KTR_PARM5]
290884186Sjake9:
290984186Sjake#endif
291084186Sjake
291184186Sjake	retry
291284186SjakeEND(tl1_intr)
291384186Sjake
2914155839Smarius	.globl	tl_text_end
2915155839Smariustl_text_end:
2916155839Smarius	nop
2917155839Smarius
291882906Sjake/*
291982906Sjake * Freshly forked processes come here when switched to for the first time.
292082906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
292182906Sjake * them to the outs.
292282906Sjake */
292380709SjakeENTRY(fork_trampoline)
292484186Sjake#if KTR_COMPILE & KTR_PROC
292584186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
292682906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
292783366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
292882906Sjake	stx	%g2, [%g1 + KTR_PARM1]
292984186Sjake	ldx	[%g2 + TD_PROC], %g2
293082906Sjake	add	%g2, P_COMM, %g2
293182906Sjake	stx	%g2, [%g1 + KTR_PARM2]
293282906Sjake	rdpr	%cwp, %g2
293382906Sjake	stx	%g2, [%g1 + KTR_PARM3]
293482906Sjake9:
293582906Sjake#endif
293680709Sjake	mov	%l0, %o0
293780709Sjake	mov	%l1, %o1
293880709Sjake	call	fork_exit
293988644Sjake	 mov	%l2, %o2
2940116589Sjake	ba,a	%xcc, tl0_ret
294184186Sjake	 nop
294280709SjakeEND(fork_trampoline)
2943