exception.S revision 224187
180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
28114085Sobrien *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake */
5580709Sjake
56114188Sjake#include <machine/asm.h>
57114188Sjake__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/exception.S 224187 2011-07-18 15:19:40Z attilio $");
58114188Sjake
59106050Sjake#include "opt_compat.h"
6080709Sjake#include "opt_ddb.h"
6180709Sjake
6280709Sjake#include <machine/asi.h>
6380709Sjake#include <machine/asmacros.h>
64166105Smarius#include <machine/frame.h>
65166105Smarius#include <machine/fsr.h>
66166105Smarius#include <machine/intr_machdep.h>
6782906Sjake#include <machine/ktr.h>
68166105Smarius#include <machine/pcb.h>
6982906Sjake#include <machine/pstate.h>
7080709Sjake#include <machine/trap.h>
71166105Smarius#include <machine/tsb.h>
7282906Sjake#include <machine/tstate.h>
73166105Smarius#include <machine/utrap.h>
7482906Sjake#include <machine/wstate.h>
7580709Sjake
7680709Sjake#include "assym.s"
7780709Sjake
78216803Smarius#define	TSB_ASI			0x0
79216803Smarius#define	TSB_KERNEL		0x0
80216803Smarius#define	TSB_KERNEL_MASK		0x0
81216803Smarius#define	TSB_KERNEL_PHYS		0x0
82216803Smarius#define	TSB_KERNEL_PHYS_END	0x0
83216803Smarius#define	TSB_QUAD_LDD		0x0
84101653Sjake
8588644Sjake	.register %g2,#ignore
8688644Sjake	.register %g3,#ignore
8788644Sjake	.register %g6,#ignore
8888644Sjake	.register %g7,#ignore
8988644Sjake
9082005Sjake/*
91216803Smarius * Atomically set a bit in a TTE.
9288644Sjake */
93216803Smarius#define	TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
9488644Sjake	add	r1, TTE_DATA, r1 ; \
95216803Smarius	LD(x, a) [r1] asi, r2 ; \
9688644Sjake9:	or	r2, bit, r3 ; \
97216803Smarius	CAS(x, a) [r1] asi, r2, r3 ; \
9888644Sjake	cmp	r2, r3 ; \
9988644Sjake	bne,pn	%xcc, 9b ; \
10088644Sjake	 mov	r3, r2
10188644Sjake
102216803Smarius#define	TTE_SET_REF(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
103216803Smarius#define	TTE_SET_W(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
10488644Sjake
10588644Sjake/*
10682906Sjake * Macros for spilling and filling live windows.
10782906Sjake *
10882906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
10982906Sjake * handler will not use more than 24 instructions total, to leave room for
11082906Sjake * resume vectors which occupy the last 8 instructions.
11182005Sjake */
11280709Sjake
11382906Sjake#define	SPILL(storer, base, size, asi) \
11482906Sjake	storer	%l0, [base + (0 * size)] asi ; \
11582906Sjake	storer	%l1, [base + (1 * size)] asi ; \
11682906Sjake	storer	%l2, [base + (2 * size)] asi ; \
11782906Sjake	storer	%l3, [base + (3 * size)] asi ; \
11882906Sjake	storer	%l4, [base + (4 * size)] asi ; \
11982906Sjake	storer	%l5, [base + (5 * size)] asi ; \
12082906Sjake	storer	%l6, [base + (6 * size)] asi ; \
12182906Sjake	storer	%l7, [base + (7 * size)] asi ; \
12282906Sjake	storer	%i0, [base + (8 * size)] asi ; \
12382906Sjake	storer	%i1, [base + (9 * size)] asi ; \
12482906Sjake	storer	%i2, [base + (10 * size)] asi ; \
12582906Sjake	storer	%i3, [base + (11 * size)] asi ; \
12682906Sjake	storer	%i4, [base + (12 * size)] asi ; \
12782906Sjake	storer	%i5, [base + (13 * size)] asi ; \
12882906Sjake	storer	%i6, [base + (14 * size)] asi ; \
12982906Sjake	storer	%i7, [base + (15 * size)] asi
13080709Sjake
13182906Sjake#define	FILL(loader, base, size, asi) \
13282906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
13382906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
13482906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
13582906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
13682906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
13782906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
13882906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
13982906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
14082906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
14182906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
14282906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
14382906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
14482906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
14582906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
14682906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
14782906Sjake	loader	[base + (15 * size)] asi, %i7
14882005Sjake
14982906Sjake#define	ERRATUM50(reg)	mov reg, reg
15082906Sjake
15188781Sjake#define	KSTACK_SLOP	1024
15288781Sjake
15389048Sjake/*
154181701Smarius * Sanity check the kernel stack and bail out if it's wrong.
15589048Sjake * XXX: doesn't handle being on the panic stack.
15689048Sjake */
15788781Sjake#define	KSTACK_CHECK \
15888781Sjake	dec	16, ASP_REG ; \
15988781Sjake	stx	%g1, [ASP_REG + 0] ; \
16088781Sjake	stx	%g2, [ASP_REG + 8] ; \
16188781Sjake	add	%sp, SPOFF, %g1 ; \
16288781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
16388781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
16488781Sjake	 inc	16, ASP_REG ; \
16588781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
16688781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
16788781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
16888781Sjake	subcc	%g1, %g2, %g1 ; \
16988781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
17088781Sjake	 inc	16, ASP_REG ; \
17188781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
17288781Sjake	cmp	%g1, %g2 ; \
17388781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
17488781Sjake	 inc	16, ASP_REG ; \
17588781Sjake	ldx	[ASP_REG + 8], %g2 ; \
17688781Sjake	ldx	[ASP_REG + 0], %g1 ; \
17788781Sjake	inc	16, ASP_REG
17888781Sjake
179155839Smarius	.globl	tl_text_begin
180155839Smariustl_text_begin:
181155839Smarius	nop
182155839Smarius
18388781SjakeENTRY(tl1_kstack_fault)
18488781Sjake	rdpr	%tl, %g1
18597263Sjake1:	cmp	%g1, 2
18697263Sjake	be,a	2f
18788781Sjake	 nop
18888781Sjake
18988781Sjake#if KTR_COMPILE & KTR_TRAP
19088781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
19197263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
19297263Sjake	rdpr	%tl, %g3
19397263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19497263Sjake	rdpr	%tpc, %g3
19597263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19697263Sjake	rdpr	%tnpc, %g3
19797263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19888781Sjake9:
19988781Sjake#endif
20088781Sjake
20197263Sjake	sub	%g1, 1, %g1
20297263Sjake	wrpr	%g1, 0, %tl
20397263Sjake	ba,a	%xcc, 1b
20497263Sjake	 nop
20597263Sjake
20688781Sjake2:
20788781Sjake#if KTR_COMPILE & KTR_TRAP
20888781Sjake	CATR(KTR_TRAP,
20988781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
21088781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
21188781Sjake	add	%sp, SPOFF, %g2
21288781Sjake	stx	%g2, [%g1 + KTR_PARM1]
21388781Sjake	ldx	[PCPU(CURTHREAD)], %g2
21488781Sjake	ldx	[%g2 + TD_KSTACK], %g2
21588781Sjake	stx	%g2, [%g1 + KTR_PARM2]
21688781Sjake	rdpr	%canrestore, %g2
21788781Sjake	stx	%g2, [%g1 + KTR_PARM3]
21888781Sjake	rdpr	%cansave, %g2
21988781Sjake	stx	%g2, [%g1 + KTR_PARM4]
22088781Sjake	rdpr	%otherwin, %g2
22188781Sjake	stx	%g2, [%g1 + KTR_PARM5]
22288781Sjake	rdpr	%wstate, %g2
22388781Sjake	stx	%g2, [%g1 + KTR_PARM6]
22488781Sjake9:
22588781Sjake#endif
22688781Sjake
22788781Sjake	wrpr	%g0, 0, %canrestore
22888781Sjake	wrpr	%g0, 6, %cansave
22988781Sjake	wrpr	%g0, 0, %otherwin
23088781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
23188781Sjake
23289048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
23388781Sjake	clr	%fp
23488781Sjake
235103921Sjake	set	trap, %o2
236116589Sjake	ba	%xcc, tl1_trap
23788781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
23888781SjakeEND(tl1_kstack_fault)
23988781Sjake
24082906Sjake/*
24182906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
242182020Smarius * MMU fault during a spill or a fill, this macro will detect the fault and
24388644Sjake * resume at a set instruction offset in the trap handler.
24482906Sjake *
24588644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
24688644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
24782906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
24882906Sjake * tl bit allows us to detect both ranges with one test.
24982906Sjake *
25082906Sjake * This is:
25188644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
25282906Sjake *
25382906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
25482906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
25582906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
25682906Sjake *
25782906Sjake *	0x7f ^ 0x1f == 0x60
25882906Sjake *	0x1f == (0x80 - 0x60) - 1
25982906Sjake *
26086519Sjake * Which are the offset and xor value used to resume from alignment faults.
26182906Sjake */
26282906Sjake
26382906Sjake/*
26488644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
26588644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
26688644Sjake * alternate globals.
26782906Sjake */
26888644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
26988644Sjake	dec	16, ASP_REG ; \
27088644Sjake	stx	%g1, [ASP_REG + 0] ; \
27188644Sjake	stx	%g2, [ASP_REG + 8] ; \
27288644Sjake	rdpr	%tpc, %g1 ; \
27388644Sjake	ERRATUM50(%g1) ; \
27488644Sjake	rdpr	%tba, %g2 ; \
27588644Sjake	sub	%g1, %g2, %g2 ; \
27688644Sjake	srlx	%g2, 5, %g2 ; \
27788644Sjake	andn	%g2, 0x200, %g2 ; \
27888644Sjake	cmp	%g2, 0x80 ; \
27988644Sjake	blu,pt	%xcc, 9f ; \
28088644Sjake	 cmp	%g2, 0x100 ; \
28188644Sjake	bgeu,pt	%xcc, 9f ; \
28288644Sjake	 or	%g1, 0x7f, %g1 ; \
28388644Sjake	wrpr	%g1, xor, %tnpc ; \
28488644Sjake	stxa_g0_sfsr ; \
28588644Sjake	ldx	[ASP_REG + 8], %g2 ; \
28688644Sjake	ldx	[ASP_REG + 0], %g1 ; \
28788644Sjake	inc	16, ASP_REG ; \
28888644Sjake	done ; \
28988644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
29088644Sjake	ldx	[ASP_REG + 0], %g1 ; \
29188644Sjake	inc	16, ASP_REG
29282906Sjake
29388644Sjake/*
294182020Smarius * For certain faults we need to clear the SFSR MMU register before returning.
29588644Sjake */
29688644Sjake#define	RSF_CLR_SFSR \
29788644Sjake	wr	%g0, ASI_DMMU, %asi ; \
29888644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
29988644Sjake
30082906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
30182906Sjake
30282906Sjake/*
30382906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
30482906Sjake * nested traps, and corresponding xor constants for wrpr.
30582906Sjake */
30686519Sjake#define	RSF_OFF_ALIGN	0x60
30786519Sjake#define	RSF_OFF_MMU	0x70
30882906Sjake
30988644Sjake#define	RESUME_SPILLFILL_ALIGN \
31088644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
31188644Sjake#define	RESUME_SPILLFILL_MMU \
31288644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
31388644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
31488644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
31582906Sjake
31682906Sjake/*
31782906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
31888644Sjake * user mode.
31982906Sjake */
32082906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
32182906Sjake
32282906Sjake/*
32382906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
32482906Sjake */
32582906Sjake#define	RSF_TRAP(type) \
326116589Sjake	ba	%xcc, tl0_sftrap ; \
32782906Sjake	 mov	type, %g2 ; \
32882906Sjake	.align	16
32982906Sjake
33082906Sjake/*
33182906Sjake * Game over if the window operation fails.
33282906Sjake */
33382906Sjake#define	RSF_FATAL(type) \
334116589Sjake	ba	%xcc, rsf_fatal ; \
33588781Sjake	 mov	type, %g2 ; \
33682906Sjake	.align	16
33782906Sjake
33882906Sjake/*
33982906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
34082906Sjake * restore.  This is used on return from the kernel to usermode.
34182906Sjake */
34282906Sjake#define	RSF_FILL_MAGIC \
34382906Sjake	rdpr	%tnpc, %g1 ; \
34482906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
34582906Sjake	wrpr	%g1, 0, %tnpc ; \
34682906Sjake	done ; \
34782906Sjake	.align	16
34882906Sjake
34982906Sjake/*
35082906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
35182906Sjake */
35282906Sjake#define	RSF_SPILL_TOPCB \
353116589Sjake	ba,a	%xcc, tl1_spill_topcb ; \
35482906Sjake	 nop ; \
35582906Sjake	.align	16
35682906Sjake
35788781SjakeENTRY(rsf_fatal)
35888781Sjake#if KTR_COMPILE & KTR_TRAP
35988781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
36088781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
36188781Sjake	rdpr	%tt, %g3
36288781Sjake	stx	%g3, [%g1 + KTR_PARM1]
36388781Sjake	stx	%g2, [%g1 + KTR_PARM2]
36488781Sjake9:
36588781Sjake#endif
36688781Sjake
36788781Sjake	KSTACK_CHECK
36888781Sjake
36988781Sjake	sir
37088781SjakeEND(rsf_fatal)
37188781Sjake
372223718Smarius	.data
373223718Smarius	_ALIGN_DATA
374224187Sattilio	.globl	intrnames, sintrnames
375223718Smariusintrnames:
376223718Smarius	.space	IV_MAX * (MAXCOMLEN + 1)
377224187Sattiliosintrnames:
378224187Sattilio	.word	IV_MAX * (MAXCOMLEN + 1)
379224187Sattilio
380224187Sattilio	.globl	intrcnt, sintrcnt
381223718Smariusintrcnt:
382223718Smarius	.space	IV_MAX * 8
383224187Sattiliosintrcnt:
384224187Sattilio	.word	IV_MAX * 8
38580709Sjake
386223718Smarius	.text
38780709Sjake
38882906Sjake/*
38982906Sjake * Trap table and associated macros
39082906Sjake *
39182906Sjake * Due to its size a trap table is an inherently hard thing to represent in
39282906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
39382906Sjake * instructions each, many of which are identical.  The way that this is
394220939Smarius * laid out is the instructions (8 or 32) for the actual trap vector appear
39582906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
39682906Sjake * but if not supporting code can be placed just after the definition of the
39782906Sjake * macro.  The macros are then instantiated in a different section (.trap),
39882906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
39982906Sjake * code around the macros is moved to the end of trap table.  In this way the
40082906Sjake * code that must be sequential in memory can be split up, and located near
40182906Sjake * its supporting code so that it is easier to follow.
40282906Sjake */
40382906Sjake
40482906Sjake	/*
40582906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
40682906Sjake	 * is not leaked between address spaces in registers.
40782906Sjake	 */
40880709Sjake	.macro	clean_window
40980709Sjake	clr	%o0
41080709Sjake	clr	%o1
41180709Sjake	clr	%o2
41280709Sjake	clr	%o3
41380709Sjake	clr	%o4
41480709Sjake	clr	%o5
41580709Sjake	clr	%o6
41680709Sjake	clr	%o7
41780709Sjake	clr	%l0
41880709Sjake	clr	%l1
41980709Sjake	clr	%l2
42080709Sjake	clr	%l3
42180709Sjake	clr	%l4
42280709Sjake	clr	%l5
42380709Sjake	clr	%l6
42480709Sjake	rdpr	%cleanwin, %l7
42580709Sjake	inc	%l7
42680709Sjake	wrpr	%l7, 0, %cleanwin
42780709Sjake	clr	%l7
42880709Sjake	retry
42980709Sjake	.align	128
43080709Sjake	.endm
43180709Sjake
43281380Sjake	/*
43382906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
43482906Sjake	 * user stack, and with its live registers, so we must save soon.  We
43582906Sjake	 * are on alternate globals so we do have some registers.  Set the
43688644Sjake	 * transitional window state, and do the save.  If this traps we
437181701Smarius	 * attempt to spill a window to the user stack.  If this fails, we
438181701Smarius	 * spill the window to the pcb and continue.  Spilling to the pcb
43988644Sjake	 * must not fail.
44082906Sjake	 *
44182906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
44281380Sjake	 */
44382906Sjake
44488644Sjake	.macro	tl0_split
44582906Sjake	rdpr	%wstate, %g1
44682906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
44781380Sjake	save
44881380Sjake	.endm
44981380Sjake
45082906Sjake	.macro	tl0_setup	type
45188644Sjake	tl0_split
452108374Sjake	clr	%o1
453103921Sjake	set	trap, %o2
454103897Sjake	ba	%xcc, tl0_utrap
45582906Sjake	 mov	\type, %o0
45681380Sjake	.endm
45781380Sjake
45881380Sjake	/*
45982906Sjake	 * Generic trap type.  Call trap() with the specified type.
46081380Sjake	 */
46180709Sjake	.macro	tl0_gen		type
46282906Sjake	tl0_setup \type
46380709Sjake	.align	32
46480709Sjake	.endm
46580709Sjake
46682906Sjake	/*
46782906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
46882906Sjake	 * Generates count "reserved" trap vectors.
46982906Sjake	 */
47080709Sjake	.macro	tl0_reserved	count
47180709Sjake	.rept	\count
47280709Sjake	tl0_gen	T_RESERVED
47380709Sjake	.endr
47480709Sjake	.endm
47580709Sjake
476109810Sjake	.macro	tl1_split
477109810Sjake	rdpr	%wstate, %g1
478109810Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
479109810Sjake	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
480109810Sjake	.endm
481109810Sjake
482109810Sjake	.macro	tl1_setup	type
483109810Sjake	tl1_split
484109810Sjake	clr	%o1
485109810Sjake	set	trap, %o2
486116589Sjake	ba	%xcc, tl1_trap
487109810Sjake	 mov	\type | T_KERNEL, %o0
488109810Sjake	.endm
489109810Sjake
490109810Sjake	.macro	tl1_gen		type
491109810Sjake	tl1_setup \type
492109810Sjake	.align	32
493109810Sjake	.endm
494109810Sjake
495109810Sjake	.macro	tl1_reserved	count
496109810Sjake	.rept	\count
497109810Sjake	tl1_gen	T_RESERVED
498109810Sjake	.endr
499109810Sjake	.endm
500109810Sjake
50188644Sjake	.macro	tl0_insn_excptn
502101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
50388644Sjake	wr	%g0, ASI_IMMU, %asi
50488644Sjake	rdpr	%tpc, %g3
50588644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
506182877Smarius	/*
507182877Smarius	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
508182877Smarius	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
509182877Smarius	 * this triggers a RED state exception though.
510182877Smarius	 */
51188644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
51288644Sjake	membar	#Sync
513116589Sjake	ba	%xcc, tl0_sfsr_trap
51488644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
51588644Sjake	.align	32
51688644Sjake	.endm
51788644Sjake
51882906Sjake	.macro	tl0_data_excptn
519101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
52082906Sjake	wr	%g0, ASI_DMMU, %asi
52182906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
52282906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
52388644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
52488644Sjake	membar	#Sync
525116589Sjake	ba	%xcc, tl0_sfsr_trap
52688644Sjake	 mov	T_DATA_EXCEPTION, %g2
52782906Sjake	.align	32
52882906Sjake	.endm
52982906Sjake
53082005Sjake	.macro	tl0_align
53182906Sjake	wr	%g0, ASI_DMMU, %asi
53282906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
53382906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
53488644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
53588644Sjake	membar	#Sync
536116589Sjake	ba	%xcc, tl0_sfsr_trap
53788644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
53882005Sjake	.align	32
53982005Sjake	.endm
54082005Sjake
54182005SjakeENTRY(tl0_sfsr_trap)
54288644Sjake	tl0_split
543108374Sjake	clr	%o1
544103921Sjake	set	trap, %o2
54588644Sjake	mov	%g3, %o4
54688644Sjake	mov	%g4, %o5
547103897Sjake	ba	%xcc, tl0_utrap
54882906Sjake	 mov	%g2, %o0
54982005SjakeEND(tl0_sfsr_trap)
55082005Sjake
55182906Sjake	.macro	tl0_intr level, mask
55288644Sjake	tl0_split
55391246Sjake	set	\mask, %o1
554116589Sjake	ba	%xcc, tl0_intr
55591246Sjake	 mov	\level, %o0
55681380Sjake	.align	32
55781380Sjake	.endm
55881380Sjake
55981380Sjake#define	INTR(level, traplvl)						\
56082906Sjake	tl ## traplvl ## _intr	level, 1 << level
56181380Sjake
56281380Sjake#define	TICK(traplvl) \
563182743Smarius	tl ## traplvl ## _intr	PIL_TICK, 0x10001
56481380Sjake
56581380Sjake#define	INTR_LEVEL(tl)							\
56681380Sjake	INTR(1, tl) ;							\
56781380Sjake	INTR(2, tl) ;							\
56881380Sjake	INTR(3, tl) ;							\
56981380Sjake	INTR(4, tl) ;							\
57081380Sjake	INTR(5, tl) ;							\
57181380Sjake	INTR(6, tl) ;							\
57281380Sjake	INTR(7, tl) ;							\
57381380Sjake	INTR(8, tl) ;							\
57481380Sjake	INTR(9, tl) ;							\
57581380Sjake	INTR(10, tl) ;							\
57681380Sjake	INTR(11, tl) ;							\
57781380Sjake	INTR(12, tl) ;							\
57881380Sjake	INTR(13, tl) ;							\
57981380Sjake	TICK(tl) ;							\
58081380Sjake	INTR(15, tl) ;
58181380Sjake
58280709Sjake	.macro	tl0_intr_level
58381380Sjake	INTR_LEVEL(0)
58480709Sjake	.endm
58580709Sjake
58697265Sjake	.macro	intr_vector
58797265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
58897265Sjake	andcc	%g1, IRSR_BUSY, %g0
589104075Sjake	bnz,a,pt %xcc, intr_vector
59097265Sjake	 nop
591223721Smarius	ba,a,pt	%xcc, intr_vector_stray
592223721Smarius	 nop
59381380Sjake	.align	32
59480709Sjake	.endm
59580709Sjake
596109860Sjake	.macro	tl0_immu_miss
59781380Sjake	/*
598181701Smarius	 * Load the context and the virtual page number from the tag access
599109860Sjake	 * register.  We ignore the context.
600109860Sjake	 */
601109860Sjake	wr	%g0, ASI_IMMU, %asi
602109860Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
603109860Sjake
604109860Sjake	/*
605102040Sjake	 * Initialize the page size walker.
606102040Sjake	 */
607102040Sjake	mov	TS_MIN, %g2
608102040Sjake
609102040Sjake	/*
610102040Sjake	 * Loop over all supported page sizes.
611102040Sjake	 */
612102040Sjake
613102040Sjake	/*
614102040Sjake	 * Compute the page shift for the page size we are currently looking
615102040Sjake	 * for.
616102040Sjake	 */
617102040Sjake1:	add	%g2, %g2, %g3
618102040Sjake	add	%g3, %g2, %g3
619102040Sjake	add	%g3, PAGE_SHIFT, %g3
620102040Sjake
621102040Sjake	/*
62291224Sjake	 * Extract the virtual page number from the contents of the tag
62391224Sjake	 * access register.
62481380Sjake	 */
625102040Sjake	srlx	%g1, %g3, %g3
62681380Sjake
62781380Sjake	/*
628181701Smarius	 * Compute the TTE bucket address.
62981380Sjake	 */
630102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
631102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
632102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
633102040Sjake	add	%g4, %g5, %g4
63481380Sjake
63581380Sjake	/*
636181701Smarius	 * Compute the TTE tag target.
63781380Sjake	 */
638102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
639102040Sjake	or	%g3, %g2, %g3
64081380Sjake
64181380Sjake	/*
642181701Smarius	 * Loop over the TTEs in this bucket.
64381380Sjake	 */
64481380Sjake
64581380Sjake	/*
646181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
647102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
648102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
649102040Sjake	 * completes successfully.
65081380Sjake	 */
651102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
65281380Sjake
65381380Sjake	/*
654181701Smarius	 * Check that it's valid and executable and that the TTE tags match.
65581380Sjake	 */
656102040Sjake	brgez,pn %g7, 3f
657102040Sjake	 andcc	%g7, TD_EXEC, %g0
658102040Sjake	bz,pn	%xcc, 3f
659102040Sjake	 cmp	%g3, %g6
660102040Sjake	bne,pn	%xcc, 3f
66188644Sjake	 EMPTY
66281380Sjake
66381380Sjake	/*
664181701Smarius	 * We matched a TTE, load the TLB.
66581380Sjake	 */
66681380Sjake
66781380Sjake	/*
66881380Sjake	 * Set the reference bit, if it's currently clear.
66981380Sjake	 */
670102040Sjake	 andcc	%g7, TD_REF, %g0
67182906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
67281380Sjake	 nop
67381380Sjake
67481380Sjake	/*
675181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
67681380Sjake	 */
677102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
678102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
67981380Sjake	retry
68081380Sjake
68181380Sjake	/*
682181701Smarius	 * Advance to the next TTE in this bucket, and check the low bits
683102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
68481380Sjake	 */
685102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
686102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
687102040Sjake	bnz,pt	%xcc, 2b
688102040Sjake	 EMPTY
68991224Sjake
69091224Sjake	/*
691102040Sjake	 * See if we just checked the largest page size, and advance to the
692102040Sjake	 * next one if not.
69391224Sjake	 */
694102040Sjake	 cmp	%g2, TS_MAX
695102040Sjake	bne,pt	%xcc, 1b
696102040Sjake	 add	%g2, 1, %g2
69791224Sjake
69896207Sjake	/*
699181701Smarius	 * Not in user TSB, call C code.
700102040Sjake	 */
701102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
70281380Sjake	.align	128
70380709Sjake	.endm
70480709Sjake
70582906SjakeENTRY(tl0_immu_miss_set_ref)
70681380Sjake	/*
70781380Sjake	 * Set the reference bit.
70881380Sjake	 */
709216803Smarius	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
71081380Sjake
71181380Sjake	/*
712102040Sjake	 * May have become invalid during casxa, in which case start over.
71381380Sjake	 */
714102040Sjake	brgez,pn %g2, 1f
715102040Sjake	 nop
71681380Sjake
71781380Sjake	/*
718181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
71981380Sjake	 */
720102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
721102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
72291224Sjake1:	retry
72382906SjakeEND(tl0_immu_miss_set_ref)
72481380Sjake
72582906SjakeENTRY(tl0_immu_miss_trap)
72681380Sjake	/*
72796207Sjake	 * Put back the contents of the tag access register, in case we
72896207Sjake	 * faulted.
72996207Sjake	 */
730182877Smarius	sethi	%hi(KERNBASE), %g2
731102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
732182877Smarius	flush	%g2
73396207Sjake
73496207Sjake	/*
73582906Sjake	 * Switch to alternate globals.
73682906Sjake	 */
73782906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
73882906Sjake
73982906Sjake	/*
74091224Sjake	 * Reload the tag access register.
74181380Sjake	 */
74291224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
74381380Sjake
74481380Sjake	/*
74591224Sjake	 * Save the tag access register, and call common trap code.
74681380Sjake	 */
74788644Sjake	tl0_split
748108374Sjake	clr	%o1
749103921Sjake	set	trap, %o2
75091224Sjake	mov	%g2, %o3
751114257Sjake	ba	%xcc, tl0_utrap
75288644Sjake	 mov	T_INSTRUCTION_MISS, %o0
75382906SjakeEND(tl0_immu_miss_trap)
75481380Sjake
755109860Sjake	.macro	tl0_dmmu_miss
75681180Sjake	/*
757181701Smarius	 * Load the context and the virtual page number from the tag access
758109860Sjake	 * register.  We ignore the context.
759109860Sjake	 */
760109860Sjake	wr	%g0, ASI_DMMU, %asi
761109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
762109860Sjake
763109860Sjake	/*
764102040Sjake	 * Initialize the page size walker.
765102040Sjake	 */
766109860Sjaketl1_dmmu_miss_user:
767102040Sjake	mov	TS_MIN, %g2
768102040Sjake
769102040Sjake	/*
770102040Sjake	 * Loop over all supported page sizes.
771102040Sjake	 */
772102040Sjake
773102040Sjake	/*
774102040Sjake	 * Compute the page shift for the page size we are currently looking
775102040Sjake	 * for.
776102040Sjake	 */
777102040Sjake1:	add	%g2, %g2, %g3
778102040Sjake	add	%g3, %g2, %g3
779102040Sjake	add	%g3, PAGE_SHIFT, %g3
780102040Sjake
781102040Sjake	/*
78291224Sjake	 * Extract the virtual page number from the contents of the tag
78391224Sjake	 * access register.
78491224Sjake	 */
785102040Sjake	srlx	%g1, %g3, %g3
78691224Sjake
78791224Sjake	/*
788181701Smarius	 * Compute the TTE bucket address.
78981180Sjake	 */
790102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
791102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
792102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
793102040Sjake	add	%g4, %g5, %g4
79481180Sjake
79581180Sjake	/*
796181701Smarius	 * Compute the TTE tag target.
79781180Sjake	 */
798102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
799102040Sjake	or	%g3, %g2, %g3
80081180Sjake
80181180Sjake	/*
802181701Smarius	 * Loop over the TTEs in this bucket.
80381180Sjake	 */
80481180Sjake
80581180Sjake	/*
806181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
807102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
808102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
809102040Sjake	 * completes successfully.
81081180Sjake	 */
811102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
81281180Sjake
81381180Sjake	/*
814181701Smarius	 * Check that it's valid and that the virtual page numbers match.
81581180Sjake	 */
816102040Sjake	brgez,pn %g7, 3f
817102040Sjake	 cmp	%g3, %g6
818102040Sjake	bne,pn	%xcc, 3f
81988644Sjake	 EMPTY
82081180Sjake
82181180Sjake	/*
822181701Smarius	 * We matched a TTE, load the TLB.
82381180Sjake	 */
82481180Sjake
82581180Sjake	/*
82681180Sjake	 * Set the reference bit, if it's currently clear.
82781180Sjake	 */
828102040Sjake	 andcc	%g7, TD_REF, %g0
829109860Sjake	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
83081180Sjake	 nop
83181180Sjake
83281180Sjake	/*
833181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
83481180Sjake	 */
835102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
836102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
83781180Sjake	retry
83881180Sjake
83981180Sjake	/*
840181701Smarius	 * Advance to the next TTE in this bucket, and check the low bits
841102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
84281180Sjake	 */
843102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
844102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
845102040Sjake	bnz,pt	%xcc, 2b
846102040Sjake	 EMPTY
847102040Sjake
848102040Sjake	/*
849102040Sjake	 * See if we just checked the largest page size, and advance to the
850102040Sjake	 * next one if not.
851102040Sjake	 */
852102040Sjake	 cmp	%g2, TS_MAX
853102040Sjake	bne,pt	%xcc, 1b
854102040Sjake	 add	%g2, 1, %g2
855109860Sjake
856109860Sjake	/*
857181701Smarius	 * Not in user TSB, call C code.
858109860Sjake	 */
859109860Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
860109860Sjake	.align	128
86181180Sjake	.endm
86281180Sjake
863109860SjakeENTRY(tl0_dmmu_miss_set_ref)
86481180Sjake	/*
86581180Sjake	 * Set the reference bit.
86681180Sjake	 */
867216803Smarius	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
86881180Sjake
86981180Sjake	/*
870102040Sjake	 * May have become invalid during casxa, in which case start over.
87181180Sjake	 */
872102040Sjake	brgez,pn %g2, 1f
873102040Sjake	 nop
87481180Sjake
87581180Sjake	/*
876181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
87781180Sjake	 */
878102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
879102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
88091224Sjake1:	retry
881109860SjakeEND(tl0_dmmu_miss_set_ref)
88281180Sjake
88381180SjakeENTRY(tl0_dmmu_miss_trap)
88482005Sjake	/*
88596207Sjake	 * Put back the contents of the tag access register, in case we
88696207Sjake	 * faulted.
88796207Sjake	 */
888102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
88996207Sjake	membar	#Sync
89096207Sjake
89196207Sjake	/*
89282906Sjake	 * Switch to alternate globals.
89382005Sjake	 */
89482906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
89582005Sjake
89682005Sjake	/*
897109860Sjake	 * Check if we actually came from the kernel.
898109860Sjake	 */
899109860Sjake	rdpr	%tl, %g1
900109860Sjake	cmp	%g1, 1
901109860Sjake	bgt,a,pn %xcc, 1f
902109860Sjake	 nop
903109860Sjake
904109860Sjake	/*
90591224Sjake	 * Reload the tag access register.
90682005Sjake	 */
90791224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
90881180Sjake
90981180Sjake	/*
91091224Sjake	 * Save the tag access register and call common trap code.
91181180Sjake	 */
91288644Sjake	tl0_split
913108374Sjake	clr	%o1
914103921Sjake	set	trap, %o2
91591224Sjake	mov	%g2, %o3
916114257Sjake	ba	%xcc, tl0_utrap
91788644Sjake	 mov	T_DATA_MISS, %o0
918109860Sjake
919109860Sjake	/*
920109860Sjake	 * Handle faults during window spill/fill.
921109860Sjake	 */
922109860Sjake1:	RESUME_SPILLFILL_MMU
923109860Sjake
924109860Sjake	/*
925109860Sjake	 * Reload the tag access register.
926109860Sjake	 */
927109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
928109860Sjake
929109860Sjake	tl1_split
930109860Sjake	clr	%o1
931109860Sjake	set	trap, %o2
932109860Sjake	mov	%g2, %o3
933116589Sjake	ba	%xcc, tl1_trap
934109860Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
93582906SjakeEND(tl0_dmmu_miss_trap)
93681180Sjake
937109860Sjake	.macro	tl0_dmmu_prot
938109860Sjake	ba,a	%xcc, tl0_dmmu_prot_1
939109860Sjake	 nop
940109860Sjake	.align	128
941109860Sjake	.endm
942109860Sjake
943109860SjakeENTRY(tl0_dmmu_prot_1)
94488644Sjake	/*
945181701Smarius	 * Load the context and the virtual page number from the tag access
946109860Sjake	 * register.  We ignore the context.
947109860Sjake	 */
948109860Sjake	wr	%g0, ASI_DMMU, %asi
949109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
950109860Sjake
951109860Sjake	/*
952102040Sjake	 * Initialize the page size walker.
953102040Sjake	 */
954109860Sjaketl1_dmmu_prot_user:
955102040Sjake	mov	TS_MIN, %g2
956102040Sjake
957102040Sjake	/*
958102040Sjake	 * Loop over all supported page sizes.
959102040Sjake	 */
960102040Sjake
961102040Sjake	/*
962102040Sjake	 * Compute the page shift for the page size we are currently looking
963102040Sjake	 * for.
964102040Sjake	 */
965102040Sjake1:	add	%g2, %g2, %g3
966102040Sjake	add	%g3, %g2, %g3
967102040Sjake	add	%g3, PAGE_SHIFT, %g3
968102040Sjake
969102040Sjake	/*
97091224Sjake	 * Extract the virtual page number from the contents of the tag
97191224Sjake	 * access register.
97291224Sjake	 */
973102040Sjake	srlx	%g1, %g3, %g3
97491224Sjake
97591224Sjake	/*
976181701Smarius	 * Compute the TTE bucket address.
97788644Sjake	 */
978102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
979102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
980102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
981102040Sjake	add	%g4, %g5, %g4
98288644Sjake
98388644Sjake	/*
984181701Smarius	 * Compute the TTE tag target.
98588644Sjake	 */
986102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
987102040Sjake	or	%g3, %g2, %g3
98888644Sjake
98988644Sjake	/*
990181701Smarius	 * Loop over the TTEs in this bucket.
99188644Sjake	 */
99288644Sjake
99388644Sjake	/*
994181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
995102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
996102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
997102040Sjake	 * completes successfully.
99888644Sjake	 */
999102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
100088644Sjake
100188644Sjake	/*
1002181701Smarius	 * Check that it's valid and writable and that the virtual page
100391224Sjake	 * numbers match.
100488644Sjake	 */
1005102040Sjake	brgez,pn %g7, 4f
1006102040Sjake	 andcc	%g7, TD_SW, %g0
1007102040Sjake	bz,pn	%xcc, 4f
1008102040Sjake	 cmp	%g3, %g6
1009102040Sjake	bne,pn	%xcc, 4f
101088644Sjake	 nop
101188644Sjake
101291224Sjake	/*
101391224Sjake	 * Set the hardware write bit.
101491224Sjake	 */
1015216803Smarius	TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
101688644Sjake
101788644Sjake	/*
1018181701Smarius	 * Delete the old TLB entry and clear the SFSR.
101988644Sjake	 */
1020102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
1021102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
1022102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1023102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1024102040Sjake	membar	#Sync
102588644Sjake
102681180Sjake	/*
1027102040Sjake	 * May have become invalid during casxa, in which case start over.
102888644Sjake	 */
1029102040Sjake	brgez,pn %g2, 3f
1030102040Sjake	 or	%g2, TD_W, %g2
103188644Sjake
103288644Sjake	/*
1033181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
103496207Sjake	 */
1035102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1036102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
1037102040Sjake3:	retry
103896207Sjake
103996207Sjake	/*
1040102040Sjake	 * Check the low bits to see if we've finished the bucket.
104188644Sjake	 */
1042102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
1043102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1044102040Sjake	bnz,pt	%xcc, 2b
1045102040Sjake	 EMPTY
104688644Sjake
104788644Sjake	/*
1048102040Sjake	 * See if we just checked the largest page size, and advance to the
1049102040Sjake	 * next one if not.
105088644Sjake	 */
1051102040Sjake	 cmp	%g2, TS_MAX
1052102040Sjake	bne,pt	%xcc, 1b
1053102040Sjake	 add	%g2, 1, %g2
1054102040Sjake
105588644Sjake	/*
1056181701Smarius	 * Not in user TSB, call C code.
105791224Sjake	 */
1058116589Sjake	ba,a	%xcc, tl0_dmmu_prot_trap
1059102040Sjake	 nop
1060102040SjakeEND(tl0_dmmu_prot_1)
106191224Sjake
106288644SjakeENTRY(tl0_dmmu_prot_trap)
106388644Sjake	/*
106496207Sjake	 * Put back the contents of the tag access register, in case we
106596207Sjake	 * faulted.
106696207Sjake	 */
1067102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
106896207Sjake	membar	#Sync
106996207Sjake
107096207Sjake	/*
107182906Sjake	 * Switch to alternate globals.
107281180Sjake	 */
107382906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
107481180Sjake
107581180Sjake	/*
1076109860Sjake	 * Check if we actually came from the kernel.
1077109860Sjake	 */
1078109860Sjake	rdpr	%tl, %g1
1079109860Sjake	cmp	%g1, 1
1080109860Sjake	bgt,a,pn %xcc, 1f
1081109860Sjake	 nop
1082109860Sjake
1083109860Sjake	/*
1084181701Smarius	 * Load the SFAR, SFSR and TAR.
108582005Sjake	 */
108688644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
108788644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
108888644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
108985243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
109082005Sjake	membar	#Sync
109182005Sjake
109282005Sjake	/*
1093182020Smarius	 * Save the MMU registers and call common trap code.
109482005Sjake	 */
109588644Sjake	tl0_split
1096108374Sjake	clr	%o1
1097103921Sjake	set	trap, %o2
109888644Sjake	mov	%g2, %o3
109988644Sjake	mov	%g3, %o4
110088644Sjake	mov	%g4, %o5
1101103897Sjake	ba	%xcc, tl0_utrap
110288644Sjake	 mov	T_DATA_PROTECTION, %o0
1103109860Sjake
1104109860Sjake	/*
1105109860Sjake	 * Handle faults during window spill/fill.
1106109860Sjake	 */
1107109860Sjake1:	RESUME_SPILLFILL_MMU_CLR_SFSR
1108109860Sjake
1109109860Sjake	/*
1110181701Smarius	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1111109860Sjake	 */
1112109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1113109860Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1114109860Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1115109860Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1116109860Sjake	membar	#Sync
1117109860Sjake
1118109860Sjake	tl1_split
1119109860Sjake	clr	%o1
1120109860Sjake	set	trap, %o2
1121109860Sjake	mov	%g2, %o3
1122109860Sjake	mov	%g3, %o4
1123109860Sjake	mov	%g4, %o5
1124116589Sjake	ba	%xcc, tl1_trap
1125109860Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
112688644SjakeEND(tl0_dmmu_prot_trap)
112781180Sjake
112880709Sjake	.macro	tl0_spill_0_n
112991246Sjake	wr	%g0, ASI_AIUP, %asi
113091246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
113180709Sjake	saved
113280709Sjake	retry
113382906Sjake	.align	32
113482906Sjake	RSF_TRAP(T_SPILL)
113582906Sjake	RSF_TRAP(T_SPILL)
113680709Sjake	.endm
113780709Sjake
113882906Sjake	.macro	tl0_spill_1_n
113991246Sjake	wr	%g0, ASI_AIUP, %asi
114082906Sjake	SPILL(stwa, %sp, 4, %asi)
114182906Sjake	saved
114282906Sjake	retry
114382906Sjake	.align	32
114482906Sjake	RSF_TRAP(T_SPILL)
114582906Sjake	RSF_TRAP(T_SPILL)
114682906Sjake	.endm
114782005Sjake
114891246Sjake	.macro	tl0_fill_0_n
114982906Sjake	wr	%g0, ASI_AIUP, %asi
115091246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
115182906Sjake	restored
115282906Sjake	retry
115382906Sjake	.align	32
115482906Sjake	RSF_TRAP(T_FILL)
115582906Sjake	RSF_TRAP(T_FILL)
115680709Sjake	.endm
115780709Sjake
115882906Sjake	.macro	tl0_fill_1_n
115991246Sjake	wr	%g0, ASI_AIUP, %asi
116082906Sjake	FILL(lduwa, %sp, 4, %asi)
116182906Sjake	restored
116282906Sjake	retry
116382906Sjake	.align	32
116482906Sjake	RSF_TRAP(T_FILL)
116582906Sjake	RSF_TRAP(T_FILL)
116682906Sjake	.endm
116782906Sjake
116882906SjakeENTRY(tl0_sftrap)
116982906Sjake	rdpr	%tstate, %g1
117082906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
117182906Sjake	wrpr	%g1, 0, %cwp
117288644Sjake	tl0_split
1173108374Sjake	clr	%o1
1174103921Sjake	set	trap, %o2
1175116589Sjake	ba	%xcc, tl0_trap
117682906Sjake	 mov	%g2, %o0
117782906SjakeEND(tl0_sftrap)
117882906Sjake
117982906Sjake	.macro	tl0_spill_bad	count
118082906Sjake	.rept	\count
118188644Sjake	sir
118288644Sjake	.align	128
118382906Sjake	.endr
118482906Sjake	.endm
118582906Sjake
118680709Sjake	.macro	tl0_fill_bad	count
118780709Sjake	.rept	\count
118888644Sjake	sir
118988644Sjake	.align	128
119080709Sjake	.endr
119180709Sjake	.endm
119280709Sjake
119384186Sjake	.macro	tl0_syscall
119488644Sjake	tl0_split
1195108374Sjake	clr	%o1
1196103921Sjake	set	syscall, %o2
1197103921Sjake	ba	%xcc, tl0_trap
119884186Sjake	 mov	T_SYSCALL, %o0
119988784Sjake	.align	32
120084186Sjake	.endm
120184186Sjake
1202112920Sjake	.macro	tl0_fp_restore
1203112920Sjake	ba,a	%xcc, tl0_fp_restore
1204112920Sjake	 nop
1205112920Sjake	.align	32
1206112920Sjake	.endm
1207112920Sjake
1208112920SjakeENTRY(tl0_fp_restore)
1209112924Sjake	ldx	[PCB_REG + PCB_FLAGS], %g1
1210112924Sjake	andn	%g1, PCB_FEF, %g1
1211112924Sjake	stx	%g1, [PCB_REG + PCB_FLAGS]
1212112924Sjake
1213112920Sjake	wr	%g0, FPRS_FEF, %fprs
1214112920Sjake	wr	%g0, ASI_BLK_S, %asi
1215112920Sjake	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1216112920Sjake	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1217112920Sjake	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1218112920Sjake	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1219112920Sjake	membar	#Sync
1220112920Sjake	done
1221112920SjakeEND(tl0_fp_restore)
1222112920Sjake
122380709Sjake	.macro	tl1_insn_excptn
1224101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
122588644Sjake	wr	%g0, ASI_IMMU, %asi
122688644Sjake	rdpr	%tpc, %g3
122788644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1228182877Smarius	/*
1229182877Smarius	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1230182877Smarius	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
1231182877Smarius	 * this triggers a RED state exception though.
1232182877Smarius	 */
123388644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
123488644Sjake	membar	#Sync
1235116589Sjake	ba	%xcc, tl1_insn_exceptn_trap
123688644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
123780709Sjake	.align	32
123880709Sjake	.endm
123980709Sjake
124088644SjakeENTRY(tl1_insn_exceptn_trap)
124191246Sjake	tl1_split
1242103921Sjake	clr	%o1
1243103921Sjake	set	trap, %o2
124488644Sjake	mov	%g3, %o4
124588644Sjake	mov	%g4, %o5
1246116589Sjake	ba	%xcc, tl1_trap
124788644Sjake	 mov	%g2, %o0
124888644SjakeEND(tl1_insn_exceptn_trap)
124988644Sjake
1250113024Sjake	.macro	tl1_fp_disabled
1251113024Sjake	ba,a	%xcc, tl1_fp_disabled_1
1252113024Sjake	 nop
1253113024Sjake	.align	32
1254113024Sjake	.endm
1255113024Sjake
1256113024SjakeENTRY(tl1_fp_disabled_1)
1257113024Sjake	rdpr	%tpc, %g1
1258113024Sjake	set	fpu_fault_begin, %g2
1259113024Sjake	sub	%g1, %g2, %g1
1260113024Sjake	cmp	%g1, fpu_fault_size
1261113024Sjake	bgeu,a,pn %xcc, 1f
1262113024Sjake	 nop
1263113024Sjake
1264113024Sjake	wr	%g0, FPRS_FEF, %fprs
1265113024Sjake	wr	%g0, ASI_BLK_S, %asi
1266113024Sjake	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1267113024Sjake	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1268113024Sjake	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1269113024Sjake	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1270113024Sjake	membar	#Sync
1271113024Sjake	retry
1272113024Sjake
1273113024Sjake1:	tl1_split
1274113024Sjake	clr	%o1
1275113024Sjake	set	trap, %o2
1276113024Sjake	ba	%xcc, tl1_trap
1277113024Sjake	 mov	T_FP_DISABLED | T_KERNEL, %o0
1278113024SjakeEND(tl1_fp_disabled_1)
1279113024Sjake
128082005Sjake	.macro	tl1_data_excptn
1281101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
1282116589Sjake	ba,a	%xcc, tl1_data_excptn_trap
128382906Sjake	 nop
128482005Sjake	.align	32
128582005Sjake	.endm
128682005Sjake
128788644SjakeENTRY(tl1_data_excptn_trap)
128888644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
1289116589Sjake	ba	%xcc, tl1_sfsr_trap
129088644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
129188644SjakeEND(tl1_data_excptn_trap)
129282906Sjake
129380709Sjake	.macro	tl1_align
1294222840Smarius	wrpr	%g0, PSTATE_ALT, %pstate
1295116589Sjake	ba,a	%xcc, tl1_align_trap
129688644Sjake	 nop
129780709Sjake	.align	32
129880709Sjake	.endm
129980709Sjake
130082906SjakeENTRY(tl1_align_trap)
130188644Sjake	RESUME_SPILLFILL_ALIGN
1302116589Sjake	ba	%xcc, tl1_sfsr_trap
130388644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1304222840SmariusEND(tl1_align_trap)
130582906Sjake
130680709SjakeENTRY(tl1_sfsr_trap)
130788644Sjake	wr	%g0, ASI_DMMU, %asi
130888644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
130988644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
131080709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
131180709Sjake	membar	#Sync
131282005Sjake
131391246Sjake	tl1_split
1314103921Sjake	clr	%o1
1315103921Sjake	set	trap, %o2
131688644Sjake	mov	%g3, %o4
131788644Sjake	mov	%g4, %o5
1318116589Sjake	ba	%xcc, tl1_trap
131988644Sjake	 mov	%g2, %o0
132088644SjakeEND(tl1_sfsr_trap)
132180709Sjake
132284186Sjake	.macro	tl1_intr level, mask
132391246Sjake	tl1_split
132491246Sjake	set	\mask, %o1
1325116589Sjake	ba	%xcc, tl1_intr
132691246Sjake	 mov	\level, %o0
132781380Sjake	.align	32
132881380Sjake	.endm
132981380Sjake
133080709Sjake	.macro	tl1_intr_level
133181380Sjake	INTR_LEVEL(1)
133280709Sjake	.endm
133380709Sjake
133480709Sjake	.macro	tl1_immu_miss
133591224Sjake	/*
133691224Sjake	 * Load the context and the virtual page number from the tag access
133791224Sjake	 * register.  We ignore the context.
133891224Sjake	 */
133991224Sjake	wr	%g0, ASI_IMMU, %asi
1340102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
134185585Sjake
134291224Sjake	/*
1343181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1344181701Smarius	 * TSB are patched at startup.
134591224Sjake	 */
1346217514Smarius	.globl	tl1_immu_miss_patch_tsb_1
1347217514Smariustl1_immu_miss_patch_tsb_1:
1348217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1349217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1350217514Smarius	sllx	%g6, 32, %g6
1351217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1352217514Smarius	or	%g7, %g6, %g7
1353216803Smarius	.globl	tl1_immu_miss_patch_tsb_mask_1
1354216803Smariustl1_immu_miss_patch_tsb_mask_1:
1355102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1356102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
135785585Sjake
1358102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1359102040Sjake	and	%g5, %g6, %g6
1360102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1361102040Sjake	add	%g6, %g7, %g6
136285585Sjake
136385585Sjake	/*
1364181701Smarius	 * Load the TTE.
136591224Sjake	 */
1366216803Smarius	.globl	tl1_immu_miss_patch_quad_ldd_1
1367216803Smariustl1_immu_miss_patch_quad_ldd_1:
1368216803Smarius	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
136991224Sjake
137091224Sjake	/*
1371181701Smarius	 * Check that it's valid and executable and that the virtual page
137291224Sjake	 * numbers match.
137391224Sjake	 */
1374102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1375102040Sjake	 andcc	%g7, TD_EXEC, %g0
137691224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1377102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1378102040Sjake	cmp	%g5, %g6
137991224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
138085585Sjake	 EMPTY
138185585Sjake
138285585Sjake	/*
1383181701Smarius	 * Set the reference bit if it's currently clear.
138485585Sjake	 */
1385102040Sjake	 andcc	%g7, TD_REF, %g0
1386102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
138791224Sjake	 nop
138885585Sjake
138991224Sjake	/*
1390181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
139191224Sjake	 */
1392102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1393102040Sjake	retry
1394102040Sjake	.align	128
1395102040Sjake	.endm
139688644Sjake
1397102040SjakeENTRY(tl1_immu_miss_set_ref)
139885585Sjake	/*
1399181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1400181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1401102040Sjake	 */
1402217514Smarius	.globl	tl1_immu_miss_patch_tsb_2
1403217514Smariustl1_immu_miss_patch_tsb_2:
1404217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1405217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1406217514Smarius	sllx	%g6, 32, %g6
1407217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1408217514Smarius	or	%g7, %g6, %g7
1409216803Smarius	.globl	tl1_immu_miss_patch_tsb_mask_2
1410216803Smariustl1_immu_miss_patch_tsb_mask_2:
1411102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1412102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1413102040Sjake
1414102040Sjake	and	%g5, %g6, %g5
1415102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1416102040Sjake	add	%g5, %g7, %g5
1417102040Sjake
1418102040Sjake	/*
1419102040Sjake	 * Set the reference bit.
1420102040Sjake	 */
1421216803Smarius	.globl	tl1_immu_miss_patch_asi_1
1422216803Smariustl1_immu_miss_patch_asi_1:
1423216803Smarius	wr	%g0, TSB_ASI, %asi
1424216803Smarius	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1425102040Sjake
1426102040Sjake	/*
1427102040Sjake	 * May have become invalid during casxa, in which case start over.
1428102040Sjake	 */
1429102040Sjake	brgez,pn %g6, 1f
1430102040Sjake	 nop
1431102040Sjake
1432102040Sjake	/*
1433181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
143485585Sjake	 */
1435102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1436102040Sjake1:	retry
1437102040SjakeEND(tl1_immu_miss_set_ref)
143885585Sjake
143991224SjakeENTRY(tl1_immu_miss_trap)
144085585Sjake	/*
144185585Sjake	 * Switch to alternate globals.
144285585Sjake	 */
144391224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
144485585Sjake
144591224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
144685585Sjake
144791246Sjake	tl1_split
1448103921Sjake	clr	%o1
1449103921Sjake	set	trap, %o2
145091224Sjake	mov	%g2, %o3
1451116589Sjake	ba	%xcc, tl1_trap
145288644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
145391224SjakeEND(tl1_immu_miss_trap)
145491224Sjake
145591224Sjake	.macro	tl1_dmmu_miss
145691224Sjake	/*
145791224Sjake	 * Load the context and the virtual page number from the tag access
145891224Sjake	 * register.
145991224Sjake	 */
146091224Sjake	wr	%g0, ASI_DMMU, %asi
1461102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
146280709Sjake
146391224Sjake	/*
146491224Sjake	 * Extract the context from the contents of the tag access register.
1465181701Smarius	 * If it's non-zero this is a fault on a user address.  Note that the
1466108195Sjake	 * faulting address is passed in %g1.
146791224Sjake	 */
1468102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1469102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1470102040Sjake	 mov	%g5, %g1
147180709Sjake
147291224Sjake	/*
1473100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1474100771Sjake	 * the high bit set so they are negative.
1475100771Sjake	 */
1476102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1477100771Sjake	 EMPTY
1478100771Sjake
1479100771Sjake	/*
1480181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1481181701Smarius	 * TSB are patched at startup.
148291224Sjake	 */
1483217514Smarius	.globl	tl1_dmmu_miss_patch_tsb_1
1484217514Smariustl1_dmmu_miss_patch_tsb_1:
1485217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1486217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1487217514Smarius	sllx	%g6, 32, %g6
1488217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1489217514Smarius	or	%g7, %g6, %g7
1490216803Smarius	.globl	tl1_dmmu_miss_patch_tsb_mask_1
1491216803Smariustl1_dmmu_miss_patch_tsb_mask_1:
1492102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1493102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
149484186Sjake
1495102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1496102040Sjake	and	%g5, %g6, %g6
1497102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1498102040Sjake	add	%g6, %g7, %g6
149991224Sjake
150091224Sjake	/*
1501181701Smarius	 * Load the TTE.
150291224Sjake	 */
1503216803Smarius	.globl	tl1_dmmu_miss_patch_quad_ldd_1
1504216803Smariustl1_dmmu_miss_patch_quad_ldd_1:
1505216803Smarius	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
150691224Sjake
150791224Sjake	/*
1508181701Smarius	 * Check that it's valid and that the virtual page numbers match.
150991224Sjake	 */
1510102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1511102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1512102040Sjake	cmp	%g5, %g6
151391224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
151480709Sjake	 EMPTY
151580709Sjake
151680709Sjake	/*
1517181701Smarius	 * Set the reference bit if it's currently clear.
151880709Sjake	 */
1519102040Sjake	 andcc	%g7, TD_REF, %g0
1520102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
152191224Sjake	 nop
152280709Sjake
152391224Sjake	/*
1524181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
152591224Sjake	 */
1526102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1527102040Sjake	retry
1528102040Sjake	.align	128
1529102040Sjake	.endm
153088644Sjake
1531102040SjakeENTRY(tl1_dmmu_miss_set_ref)
153280709Sjake	/*
1533181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1534181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1535102040Sjake	 */
1536216803Smarius	.globl	tl1_dmmu_miss_patch_tsb_mask_2
1537217514Smariustl1_dmmu_miss_patch_tsb_2:
1538217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1539217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1540217514Smarius	sllx	%g6, 32, %g6
1541217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1542217514Smarius	or	%g7, %g6, %g7
1543217514Smarius	.globl	tl1_dmmu_miss_patch_tsb_2
1544216803Smariustl1_dmmu_miss_patch_tsb_mask_2:
1545102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1546102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1547102040Sjake
1548102040Sjake	and	%g5, %g6, %g5
1549102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1550102040Sjake	add	%g5, %g7, %g5
1551102040Sjake
1552102040Sjake	/*
1553102040Sjake	 * Set the reference bit.
1554102040Sjake	 */
1555216803Smarius	.globl	tl1_dmmu_miss_patch_asi_1
1556216803Smariustl1_dmmu_miss_patch_asi_1:
1557216803Smarius	wr	%g0, TSB_ASI, %asi
1558216803Smarius	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1559102040Sjake
1560102040Sjake	/*
1561102040Sjake	 * May have become invalid during casxa, in which case start over.
1562102040Sjake	 */
1563102040Sjake	brgez,pn %g6, 1f
1564102040Sjake	 nop
1565102040Sjake
1566102040Sjake	/*
1567181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
156880709Sjake	 */
1569102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1570102040Sjake1:	retry
1571102040SjakeEND(tl1_dmmu_miss_set_ref)
157280709Sjake
157391224SjakeENTRY(tl1_dmmu_miss_trap)
157480709Sjake	/*
157582906Sjake	 * Switch to alternate globals.
157680709Sjake	 */
157791224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
157880709Sjake
1579108195Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1580108195Sjake
158188781Sjake	KSTACK_CHECK
158288781Sjake
158391246Sjake	tl1_split
1584103921Sjake	clr	%o1
1585103921Sjake	set	trap, %o2
158691224Sjake	mov	%g2, %o3
1587116589Sjake	ba	%xcc, tl1_trap
158888644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
158988781SjakeEND(tl1_dmmu_miss_trap)
159080709Sjake
1591100771SjakeENTRY(tl1_dmmu_miss_direct)
1592100771Sjake	/*
1593100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1594181701Smarius	 * address, and or in the TTE bits.  The virtual address bits that
1595181701Smarius	 * correspond to the TTE valid and page size bits are left set, so
1596181701Smarius	 * they don't have to be included in the TTE bits below.  We know they
1597108245Sjake	 * are set because the virtual address is in the upper va hole.
1598216803Smarius	 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1599216803Smarius	 * and we get a miss on the directly accessed kernel TSB we must not
1600216803Smarius	 * set TD_CV in order to access it uniformly bypassing the D$.
1601100771Sjake	 */
1602216803Smarius	setx	TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1603216803Smarius	and	%g5, %g4, %g4
1604108245Sjake	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1605108245Sjake	and	%g5, %g6, %g5
1606216803Smarius	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_1
1607216803Smariustl1_dmmu_miss_direct_patch_tsb_phys_1:
1608217514Smarius	sethi	%uhi(TSB_KERNEL_PHYS), %g3
1609217514Smarius	or	%g3, %ulo(TSB_KERNEL_PHYS), %g3
1610217514Smarius	sllx	%g3, 32, %g3
1611217514Smarius	sethi	%hi(TSB_KERNEL_PHYS), %g3
1612217514Smarius	or	%g7, %g3, %g7
1613216803Smarius	cmp	%g4, %g7
1614216803Smarius	bl,pt	%xcc, 1f
1615216803Smarius	 or	%g5, TD_CP | TD_W, %g5
1616216803Smarius	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1617216803Smariustl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1618217514Smarius	sethi	%uhi(TSB_KERNEL_PHYS_END), %g3
1619217514Smarius	or	%g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1620217514Smarius	sllx	%g3, 32, %g3
1621216803Smarius	sethi	%hi(TSB_KERNEL_PHYS_END), %g7
1622217514Smarius	or	%g7, %g3, %g7
1623216803Smarius	cmp	%g4, %g7
1624216803Smarius	bg,a,pt	%xcc, 1f
1625216803Smarius	 nop
1626216803Smarius	ba,pt	%xcc, 2f
1627216803Smarius	 nop
1628216803Smarius1:	or	%g5, TD_CV, %g5
1629100771Sjake
1630100771Sjake	/*
1631181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
1632100771Sjake	 */
1633216803Smarius2:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1634100771Sjake	retry
1635100771SjakeEND(tl1_dmmu_miss_direct)
1636100771Sjake
163782906Sjake	.macro	tl1_dmmu_prot
1638102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1639102040Sjake	 nop
1640102040Sjake	.align	128
1641102040Sjake	.endm
1642102040Sjake
1643102040SjakeENTRY(tl1_dmmu_prot_1)
164491224Sjake	/*
164591224Sjake	 * Load the context and the virtual page number from the tag access
164691224Sjake	 * register.
164791224Sjake	 */
164891224Sjake	wr	%g0, ASI_DMMU, %asi
1649102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
165088644Sjake
165191224Sjake	/*
165291224Sjake	 * Extract the context from the contents of the tag access register.
1653181701Smarius	 * If it's non-zero this is a fault on a user address.  Note that the
1654108195Sjake	 * faulting address is passed in %g1.
165591224Sjake	 */
1656102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1657102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1658102040Sjake	 mov	%g5, %g1
165988644Sjake
166091224Sjake	/*
1661181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1662181701Smarius	 * TSB are patched at startup.
166391224Sjake	 */
1664217514Smarius	.globl	tl1_dmmu_prot_patch_tsb_1
1665217514Smariustl1_dmmu_prot_patch_tsb_1:
1666217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1667217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1668217514Smarius	sllx	%g6, 32, %g6
1669217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1670217514Smarius	or	%g7, %g6, %g7
1671216803Smarius	.globl	tl1_dmmu_prot_patch_tsb_mask_1
1672216803Smariustl1_dmmu_prot_patch_tsb_mask_1:
1673102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1674102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
167588644Sjake
1676102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1677102040Sjake	and	%g5, %g6, %g6
1678102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1679102040Sjake	add	%g6, %g7, %g6
168091224Sjake
168191224Sjake	/*
1682181701Smarius	 * Load the TTE.
168391224Sjake	 */
1684216803Smarius	.globl	tl1_dmmu_prot_patch_quad_ldd_1
1685216803Smariustl1_dmmu_prot_patch_quad_ldd_1:
1686216803Smarius	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
168791224Sjake
168891224Sjake	/*
1689181701Smarius	 * Check that it's valid and writeable and that the virtual page
169091224Sjake	 * numbers match.
169191224Sjake	 */
1692102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1693102040Sjake	 andcc	%g7, TD_SW, %g0
169491224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1695102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1696102040Sjake	cmp	%g5, %g6
169791224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
169888644Sjake	 EMPTY
169988644Sjake
170088644Sjake	/*
1701181701Smarius	 * Delete the old TLB entry and clear the SFSR.
170288644Sjake	 */
1703102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
170491224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
170591224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
170681180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
170791224Sjake	membar	#Sync
170881180Sjake
1709102040Sjake	/*
1710181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1711181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1712102040Sjake	 */
1713217514Smarius	.globl	tl1_dmmu_prot_patch_tsb_2
1714217514Smariustl1_dmmu_prot_patch_tsb_2:
1715217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1716217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1717217514Smarius	sllx	%g6, 32, %g6
1718217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1719217514Smarius	or	%g7, %g6, %g7
1720216803Smarius	.globl	tl1_dmmu_prot_patch_tsb_mask_2
1721216803Smariustl1_dmmu_prot_patch_tsb_mask_2:
1722102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1723102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1724102040Sjake	and	%g5, %g6, %g5
1725102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1726102040Sjake	add	%g5, %g7, %g5
1727102040Sjake
172881180Sjake	/*
172991224Sjake	 * Set the hardware write bit.
173091224Sjake	 */
1731216803Smarius	.globl	tl1_dmmu_prot_patch_asi_1
1732216803Smariustl1_dmmu_prot_patch_asi_1:
1733216803Smarius	wr	%g0, TSB_ASI, %asi
1734216803Smarius	TTE_SET_W(%g5, %g6, %g7, a, %asi)
173591224Sjake
173691224Sjake	/*
1737102040Sjake	 * May have become invalid during casxa, in which case start over.
1738102040Sjake	 */
1739102040Sjake	brgez,pn %g6, 1f
1740102040Sjake	 or	%g6, TD_W, %g6
1741102040Sjake
1742102040Sjake	/*
1743181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
174488644Sjake	 */
1745102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1746102040Sjake1:	retry
1747102040SjakeEND(tl1_dmmu_prot_1)
174888644Sjake
174988644SjakeENTRY(tl1_dmmu_prot_trap)
175081180Sjake	/*
175191224Sjake	 * Switch to alternate globals.
175291224Sjake	 */
175391224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
175491224Sjake
175591224Sjake	/*
1756181701Smarius	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
175781180Sjake	 */
175888644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
175988644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
176088644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
176181180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
176281180Sjake	membar	#Sync
176381180Sjake
176491246Sjake	tl1_split
1765103921Sjake	clr	%o1
1766103921Sjake	set	trap, %o2
176788644Sjake	mov	%g2, %o3
176888644Sjake	mov	%g3, %o4
176988644Sjake	mov	%g4, %o5
1770116589Sjake	ba	%xcc, tl1_trap
177188644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
177288644SjakeEND(tl1_dmmu_prot_trap)
177381180Sjake
177480709Sjake	.macro	tl1_spill_0_n
177582906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
177680709Sjake	saved
177780709Sjake	retry
177882906Sjake	.align	32
177982906Sjake	RSF_FATAL(T_SPILL)
178082906Sjake	RSF_FATAL(T_SPILL)
178180709Sjake	.endm
178280709Sjake
178391246Sjake	.macro	tl1_spill_2_n
178491246Sjake	wr	%g0, ASI_AIUP, %asi
178591246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
178682906Sjake	saved
178782906Sjake	retry
178882906Sjake	.align	32
178982906Sjake	RSF_SPILL_TOPCB
179082906Sjake	RSF_SPILL_TOPCB
179181380Sjake	.endm
179281380Sjake
179391246Sjake	.macro	tl1_spill_3_n
179491246Sjake	wr	%g0, ASI_AIUP, %asi
179592200Sjake	SPILL(stwa, %sp, 4, %asi)
179682906Sjake	saved
179782906Sjake	retry
179882906Sjake	.align	32
179982906Sjake	RSF_SPILL_TOPCB
180082906Sjake	RSF_SPILL_TOPCB
180182906Sjake	.endm
180282906Sjake
1803205409Smarius	.macro	tl1_spill_7_n
1804205409Smarius	btst	1, %sp
1805205409Smarius	bnz,a,pn %xcc, tl1_spill_0_n
1806205409Smarius	 nop
1807205409Smarius	srl	%sp, 0, %sp
1808205409Smarius	SPILL(stw, %sp, 4, EMPTY)
1809205409Smarius	saved
1810205409Smarius	retry
1811205409Smarius	.align	32
1812205409Smarius	RSF_FATAL(T_SPILL)
1813205409Smarius	RSF_FATAL(T_SPILL)
1814205409Smarius	.endm
1815205409Smarius
181691246Sjake	.macro	tl1_spill_0_o
181782906Sjake	wr	%g0, ASI_AIUP, %asi
181882906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
181982906Sjake	saved
182082906Sjake	retry
182182906Sjake	.align	32
182282906Sjake	RSF_SPILL_TOPCB
182382906Sjake	RSF_SPILL_TOPCB
182482906Sjake	.endm
182582906Sjake
182682906Sjake	.macro	tl1_spill_1_o
182791246Sjake	wr	%g0, ASI_AIUP, %asi
182882906Sjake	SPILL(stwa, %sp, 4, %asi)
182982005Sjake	saved
183082005Sjake	retry
183182906Sjake	.align	32
183282906Sjake	RSF_SPILL_TOPCB
183382906Sjake	RSF_SPILL_TOPCB
183482906Sjake	.endm
183582005Sjake
183682906Sjake	.macro	tl1_spill_2_o
183782906Sjake	RSF_SPILL_TOPCB
183891246Sjake	.align	128
183980709Sjake	.endm
184080709Sjake
184180709Sjake	.macro	tl1_fill_0_n
184282906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
184380709Sjake	restored
184480709Sjake	retry
184582906Sjake	.align	32
184682906Sjake	RSF_FATAL(T_FILL)
184782906Sjake	RSF_FATAL(T_FILL)
184880709Sjake	.endm
184980709Sjake
185091246Sjake	.macro	tl1_fill_2_n
185182906Sjake	wr	%g0, ASI_AIUP, %asi
185282906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
185382906Sjake	restored
185482906Sjake	retry
185582906Sjake	.align 32
185682906Sjake	RSF_FILL_MAGIC
185791246Sjake	RSF_FILL_MAGIC
185882906Sjake	.endm
185982906Sjake
186091246Sjake	.macro	tl1_fill_3_n
186182906Sjake	wr	%g0, ASI_AIUP, %asi
186282906Sjake	FILL(lduwa, %sp, 4, %asi)
186382906Sjake	restored
186482906Sjake	retry
186582906Sjake	.align 32
186682906Sjake	RSF_FILL_MAGIC
186791246Sjake	RSF_FILL_MAGIC
186882906Sjake	.endm
186982906Sjake
1870205409Smarius	.macro	tl1_fill_7_n
1871205409Smarius	btst	1, %sp
1872205409Smarius	bnz,a,pt %xcc, tl1_fill_0_n
1873205409Smarius	 nop
1874205409Smarius	srl	%sp, 0, %sp
1875205409Smarius	FILL(lduw, %sp, 4, EMPTY)
1876205409Smarius	restored
1877205409Smarius	retry
1878205409Smarius	.align	32
1879205409Smarius	RSF_FATAL(T_FILL)
1880205409Smarius	RSF_FATAL(T_FILL)
1881205409Smarius	.endm
1882205409Smarius
188382005Sjake/*
188482906Sjake * This is used to spill windows that are still occupied with user
188582906Sjake * data on kernel entry to the pcb.
188682005Sjake */
188782906SjakeENTRY(tl1_spill_topcb)
188882906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
188982906Sjake
189082005Sjake	/* Free some globals for our use. */
189188644Sjake	dec	24, ASP_REG
189288644Sjake	stx	%g1, [ASP_REG + 0]
189388644Sjake	stx	%g2, [ASP_REG + 8]
189488644Sjake	stx	%g3, [ASP_REG + 16]
189582906Sjake
189688644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
189782906Sjake
189888644Sjake	sllx	%g1, PTR_SHIFT, %g2
189988644Sjake	add	%g2, PCB_REG, %g2
190088644Sjake	stx	%sp, [%g2 + PCB_RWSP]
190182906Sjake
190288644Sjake	sllx	%g1, RW_SHIFT, %g2
190388644Sjake	add	%g2, PCB_REG, %g2
190488644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
190582906Sjake
190688644Sjake	inc	%g1
190788644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
190882906Sjake
190985243Sjake#if KTR_COMPILE & KTR_TRAP
191088785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
191182906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
191282906Sjake	rdpr	%tpc, %g2
191382906Sjake	stx	%g2, [%g1 + KTR_PARM1]
191488785Sjake	rdpr	%tnpc, %g2
191588785Sjake	stx	%g2, [%g1 + KTR_PARM2]
191688785Sjake	stx	%sp, [%g1 + KTR_PARM3]
191788644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
191888785Sjake	stx	%g2, [%g1 + KTR_PARM4]
191982906Sjake9:
192082906Sjake#endif
192182906Sjake
192282906Sjake	saved
192382906Sjake
192488644Sjake	ldx	[ASP_REG + 16], %g3
192588644Sjake	ldx	[ASP_REG + 8], %g2
192688644Sjake	ldx	[ASP_REG + 0], %g1
192788644Sjake	inc	24, ASP_REG
192882005Sjake	retry
192982906SjakeEND(tl1_spill_topcb)
193082005Sjake
193182906Sjake	.macro	tl1_spill_bad	count
193282906Sjake	.rept	\count
193388644Sjake	sir
193488644Sjake	.align	128
193582906Sjake	.endr
193682906Sjake	.endm
193782906Sjake
193880709Sjake	.macro	tl1_fill_bad	count
193980709Sjake	.rept	\count
194088644Sjake	sir
194188644Sjake	.align	128
194280709Sjake	.endr
194380709Sjake	.endm
194480709Sjake
194580709Sjake	.macro	tl1_soft	count
194682906Sjake	.rept	\count
194782906Sjake	tl1_gen	T_SOFT | T_KERNEL
194882906Sjake	.endr
194980709Sjake	.endm
195080709Sjake
195180709Sjake	.sect	.trap
1952155839Smarius	.globl	tl_trap_begin
1953155839Smariustl_trap_begin:
1954155839Smarius	nop
1955155839Smarius
195680709Sjake	.align	0x8000
195780709Sjake	.globl	tl0_base
195880709Sjake
195980709Sjaketl0_base:
196088779Sjake	tl0_reserved	8				! 0x0-0x7
196180709Sjaketl0_insn_excptn:
196288779Sjake	tl0_insn_excptn					! 0x8
196388779Sjake	tl0_reserved	1				! 0x9
196480709Sjaketl0_insn_error:
196588779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
196688779Sjake	tl0_reserved	5				! 0xb-0xf
196780709Sjaketl0_insn_illegal:
196888779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
196980709Sjaketl0_priv_opcode:
197088779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
197188779Sjake	tl0_reserved	14				! 0x12-0x1f
197280709Sjaketl0_fp_disabled:
197388779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
197480709Sjaketl0_fp_ieee:
197588779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
197680709Sjaketl0_fp_other:
197788779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
197880709Sjaketl0_tag_ovflw:
1979154419Skris	tl0_gen		T_TAG_OVERFLOW			! 0x23
198080709Sjaketl0_clean_window:
198188779Sjake	clean_window					! 0x24
198280709Sjaketl0_divide:
198388779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
198488779Sjake	tl0_reserved	7				! 0x29-0x2f
198580709Sjaketl0_data_excptn:
198688779Sjake	tl0_data_excptn					! 0x30
198788779Sjake	tl0_reserved	1				! 0x31
198880709Sjaketl0_data_error:
198988779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
199088779Sjake	tl0_reserved	1				! 0x33
199180709Sjaketl0_align:
199288779Sjake	tl0_align					! 0x34
199380709Sjaketl0_align_lddf:
199488779Sjake	tl0_gen		T_RESERVED			! 0x35
199580709Sjaketl0_align_stdf:
199688779Sjake	tl0_gen		T_RESERVED			! 0x36
199780709Sjaketl0_priv_action:
199888779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
199988779Sjake	tl0_reserved	9				! 0x38-0x40
200080709Sjaketl0_intr_level:
200188779Sjake	tl0_intr_level					! 0x41-0x4f
200288779Sjake	tl0_reserved	16				! 0x50-0x5f
200380709Sjaketl0_intr_vector:
200497265Sjake	intr_vector					! 0x60
200580709Sjaketl0_watch_phys:
200688779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
200780709Sjaketl0_watch_virt:
200888779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
200980709Sjaketl0_ecc:
201088779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
201180709Sjaketl0_immu_miss:
201288779Sjake	tl0_immu_miss					! 0x64
201380709Sjaketl0_dmmu_miss:
201488779Sjake	tl0_dmmu_miss					! 0x68
201580709Sjaketl0_dmmu_prot:
201688779Sjake	tl0_dmmu_prot					! 0x6c
201788779Sjake	tl0_reserved	16				! 0x70-0x7f
201880709Sjaketl0_spill_0_n:
201988779Sjake	tl0_spill_0_n					! 0x80
202082906Sjaketl0_spill_1_n:
202188779Sjake	tl0_spill_1_n					! 0x84
202291246Sjake	tl0_spill_bad	14				! 0x88-0xbf
202380709Sjaketl0_fill_0_n:
202488779Sjake	tl0_fill_0_n					! 0xc0
202582906Sjaketl0_fill_1_n:
202688779Sjake	tl0_fill_1_n					! 0xc4
202791246Sjake	tl0_fill_bad	14				! 0xc8-0xff
202888644Sjaketl0_soft:
2029106050Sjake	tl0_gen		T_SYSCALL			! 0x100
203088779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
203188779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
203288779Sjake	tl0_reserved	1				! 0x103
203388779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
203488779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
203588779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
203688779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
2037106050Sjake	tl0_gen		T_SYSCALL			! 0x108
2038106050Sjake	tl0_gen		T_SYSCALL			! 0x109
203988779Sjake	tl0_fp_restore					! 0x10a
204088779Sjake	tl0_reserved	5				! 0x10b-0x10f
204188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
204288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
204388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
204488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
204588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
204688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
204788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
204888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
204988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
205088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
205188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
205288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
205388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
205488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
205588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
205688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
2057106050Sjake	tl0_reserved	32				! 0x120-0x13f
2058106050Sjake	tl0_gen		T_SYSCALL			! 0x140
2059106050Sjake	tl0_syscall					! 0x141
2060106050Sjake	tl0_gen		T_SYSCALL			! 0x142
2061106050Sjake	tl0_gen		T_SYSCALL			! 0x143
2062106050Sjake	tl0_reserved	188				! 0x144-0x1ff
206380709Sjake
206480709Sjaketl1_base:
206588779Sjake	tl1_reserved	8				! 0x200-0x207
206680709Sjaketl1_insn_excptn:
206788779Sjake	tl1_insn_excptn					! 0x208
206888779Sjake	tl1_reserved	1				! 0x209
206980709Sjaketl1_insn_error:
207088779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
207188779Sjake	tl1_reserved	5				! 0x20b-0x20f
207280709Sjaketl1_insn_illegal:
207388779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
207480709Sjaketl1_priv_opcode:
207588779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
207688779Sjake	tl1_reserved	14				! 0x212-0x21f
207780709Sjaketl1_fp_disabled:
2078113024Sjake	tl1_fp_disabled					! 0x220
207980709Sjaketl1_fp_ieee:
208088779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
208180709Sjaketl1_fp_other:
208288779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
208380709Sjaketl1_tag_ovflw:
2084154419Skris	tl1_gen		T_TAG_OVERFLOW			! 0x223
208580709Sjaketl1_clean_window:
208688779Sjake	clean_window					! 0x224
208780709Sjaketl1_divide:
208888779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
208988779Sjake	tl1_reserved	7				! 0x229-0x22f
209080709Sjaketl1_data_excptn:
209188779Sjake	tl1_data_excptn					! 0x230
209288779Sjake	tl1_reserved	1				! 0x231
209380709Sjaketl1_data_error:
209488779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
209588779Sjake	tl1_reserved	1				! 0x233
209680709Sjaketl1_align:
209788779Sjake	tl1_align					! 0x234
209880709Sjaketl1_align_lddf:
209988779Sjake	tl1_gen		T_RESERVED			! 0x235
210080709Sjaketl1_align_stdf:
210188779Sjake	tl1_gen		T_RESERVED			! 0x236
210280709Sjaketl1_priv_action:
210388779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
210488779Sjake	tl1_reserved	9				! 0x238-0x240
210580709Sjaketl1_intr_level:
210688779Sjake	tl1_intr_level					! 0x241-0x24f
210788779Sjake	tl1_reserved	16				! 0x250-0x25f
210880709Sjaketl1_intr_vector:
210997265Sjake	intr_vector					! 0x260
211080709Sjaketl1_watch_phys:
211188779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
211280709Sjaketl1_watch_virt:
211388779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
211480709Sjaketl1_ecc:
211588779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
211680709Sjaketl1_immu_miss:
211788779Sjake	tl1_immu_miss					! 0x264
211880709Sjaketl1_dmmu_miss:
211988779Sjake	tl1_dmmu_miss					! 0x268
212080709Sjaketl1_dmmu_prot:
212188779Sjake	tl1_dmmu_prot					! 0x26c
212288779Sjake	tl1_reserved	16				! 0x270-0x27f
212380709Sjaketl1_spill_0_n:
212488779Sjake	tl1_spill_0_n					! 0x280
212591246Sjake	tl1_spill_bad	1				! 0x284
212691246Sjaketl1_spill_2_n:
212791246Sjake	tl1_spill_2_n					! 0x288
212891246Sjaketl1_spill_3_n:
2129205409Smarius	tl1_spill_3_n					! 0x28c
2130205409Smarius	tl1_spill_bad	3				! 0x290-0x29b
2131205409Smariustl1_spill_7_n:
2132205409Smarius	tl1_spill_7_n					! 0x29c
213381380Sjaketl1_spill_0_o:
213488779Sjake	tl1_spill_0_o					! 0x2a0
213582906Sjaketl1_spill_1_o:
213688779Sjake	tl1_spill_1_o					! 0x2a4
213782906Sjaketl1_spill_2_o:
213888779Sjake	tl1_spill_2_o					! 0x2a8
213991246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
214080709Sjaketl1_fill_0_n:
214188779Sjake	tl1_fill_0_n					! 0x2c0
214291246Sjake	tl1_fill_bad	1				! 0x2c4
214391246Sjaketl1_fill_2_n:
2144205409Smarius	tl1_fill_2_n					! 0x2c8
214591246Sjaketl1_fill_3_n:
2146205409Smarius	tl1_fill_3_n					! 0x2cc
2147205409Smarius	tl1_fill_bad	3				! 0x2d0-0x2db
2148205409Smariustl1_fill_7_n:
2149205409Smarius	tl1_fill_7_n					! 0x2dc
2150205409Smarius	tl1_fill_bad	8				! 0x2e0-0x2ff
215188779Sjake	tl1_reserved	1				! 0x300
215280709Sjaketl1_breakpoint:
215388779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
215488779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
215588779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
215688779Sjake	tl1_reserved	252				! 0x304-0x3ff
215780709Sjake
2158155839Smarius	.globl	tl_trap_end
2159155839Smariustl_trap_end:
2160155839Smarius	nop
2161155839Smarius
216281380Sjake/*
2163181701Smarius * User trap entry point
216482906Sjake *
2165103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2166181701Smarius *     u_long sfsr)
2167103897Sjake *
2168103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
2169103897Sjake * program must have first registered a trap handler with the kernel using
2170103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2171103897Sjake * for it to return to the trapping code directly, it will not return through
2172103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2173103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2174103897Sjake * parameters passed in out registers may be used by the user trap handler.
2175103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2176103897Sjake *
2177103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2178103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2179103897Sjake */
2180103897SjakeENTRY(tl0_utrap)
2181103897Sjake	/*
2182103897Sjake	 * Check if the trap type allows user traps.
2183103897Sjake	 */
2184103897Sjake	cmp	%o0, UT_MAX
2185103897Sjake	bge,a,pt %xcc, tl0_trap
2186103897Sjake	 nop
2187103897Sjake
2188103897Sjake	/*
2189103897Sjake	 * Load the user trap handler from the utrap table.
2190103897Sjake	 */
2191103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2192103897Sjake	ldx	[%l0 + TD_PROC], %l0
2193103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2194103897Sjake	brz,pt	%l0, tl0_trap
2195103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2196103897Sjake	ldx	[%l0 + %l1], %l0
2197103897Sjake	brz,a,pt %l0, tl0_trap
2198103897Sjake	 nop
2199103897Sjake
2200103897Sjake	/*
2201103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2202103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2203103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2204103897Sjake	 * not be able to find them, since the user trap handler returns
2205103897Sjake	 * directly to the trapping code.  Note that we only support precise
2206103897Sjake	 * user traps, which implies that the condition that caused the trap
2207103897Sjake	 * in the first place is still valid, so it will occur again when we
2208103897Sjake	 * re-execute the trapping instruction.
2209181701Smarius	 */
2210103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2211103897Sjake	brnz,a,pn %l1, tl0_trap
2212103897Sjake	 mov	T_SPILL, %o0
2213103897Sjake
2214103897Sjake	/*
2215103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2216103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2217103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2218103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2219103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2220103897Sjake	 * temporary stack for that.
2221103897Sjake	 */
2222103897Sjake	rd	%fprs, %l1
2223103897Sjake	or	%l1, FPRS_FEF, %l2
2224103897Sjake	wr	%l2, 0, %fprs
2225103897Sjake	dec	8, ASP_REG
2226103897Sjake	stx	%fsr, [ASP_REG]
2227103897Sjake	ldx	[ASP_REG], %l4
2228103897Sjake	inc	8, ASP_REG
2229103897Sjake	wr	%l1, 0, %fprs
2230103897Sjake
2231103897Sjake	rdpr	%tstate, %l5
2232103897Sjake	rdpr	%tpc, %l6
2233103897Sjake	rdpr	%tnpc, %l7
2234103897Sjake
2235103897Sjake	/*
2236103897Sjake	 * Setup %tnpc to return to.
2237103897Sjake	 */
2238103897Sjake	wrpr	%l0, 0, %tnpc
2239103897Sjake
2240103897Sjake	/*
2241103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2242103897Sjake	 */
2243103897Sjake	rdpr	%wstate, %l1
2244103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2245103897Sjake	wrpr	%l1, 0, %wstate
2246103897Sjake
2247103897Sjake	/*
2248103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2249103897Sjake	 * current window instead of the window at the time of the trap.
2250103897Sjake	 */
2251103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2252103897Sjake	rdpr	%cwp, %l2
2253103897Sjake	wrpr	%l1, %l2, %tstate
2254103897Sjake
2255103897Sjake	/*
2256103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2257103897Sjake	 */
2258103897Sjake	sub	%fp, CCFSZ, %sp
2259103897Sjake
2260103897Sjake	/*
2261103897Sjake	 * Execute the user trap handler.
2262103897Sjake	 */
2263103897Sjake	done
2264103897SjakeEND(tl0_utrap)
2265103897Sjake
2266103897Sjake/*
2267181701Smarius * (Real) User trap entry point
2268103897Sjake *
2269181701Smarius * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2270181701Smarius *     u_int sfsr)
227182906Sjake *
227282906Sjake * The following setup has been performed:
227382906Sjake *	- the windows have been split and the active user window has been saved
227482906Sjake *	  (maybe just to the pcb)
227582906Sjake *	- we are on alternate globals and interrupts are disabled
227682906Sjake *
227789050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
227888644Sjake * globals, enable interrupts and call trap.
227982906Sjake *
228082906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
228182906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
228282906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
228387702Sjhb * of cpu migration and using the wrong pcpup.
228481380Sjake */
228582005SjakeENTRY(tl0_trap)
228682906Sjake	/*
228782906Sjake	 * Force kernel store order.
228882906Sjake	 */
228982906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
229080709Sjake
229181380Sjake	rdpr	%tstate, %l0
229288644Sjake	rdpr	%tpc, %l1
229388644Sjake	rdpr	%tnpc, %l2
229488644Sjake	rd	%y, %l3
229588644Sjake	rd	%fprs, %l4
229688644Sjake	rdpr	%wstate, %l5
229788644Sjake
229888644Sjake#if KTR_COMPILE & KTR_TRAP
229988644Sjake	CATR(KTR_TRAP,
230088644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
230188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
230288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
230388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
230488644Sjake	stx	%o0, [%g1 + KTR_PARM2]
230588644Sjake	rdpr	%pil, %g2
230688644Sjake	stx	%g2, [%g1 + KTR_PARM3]
230788644Sjake	stx	%l1, [%g1 + KTR_PARM4]
230888644Sjake	stx	%l2, [%g1 + KTR_PARM5]
230988644Sjake	stx	%i6, [%g1 + KTR_PARM6]
231088644Sjake9:
231188644Sjake#endif
231288644Sjake
2313103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2314103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
231588644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
231688644Sjake	rdpr	%canrestore, %l6
231788644Sjake	wrpr	%l6, 0, %otherwin
231888644Sjake	wrpr	%g0, 0, %canrestore
231988644Sjake
232088644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
232188644Sjake
2322105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2323105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
232488644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
232588644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2326105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
232788644Sjake
232881380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
232981380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
233081380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2331105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2332105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2333105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
233481380Sjake
233588644Sjake	wr	%g0, FPRS_FEF, %fprs
233688644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2337108379Sjake	rd	%gsr, %l6
2338105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
233988644Sjake	wr	%g0, 0, %fprs
234082906Sjake
234189050Sjake	mov	PCB_REG, %l0
234289050Sjake	mov	PCPU_REG, %l1
234382906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
234482005Sjake
234582005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
234682005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
234782005Sjake
234889050Sjake	mov	%l0, PCB_REG
234989050Sjake	mov	%l1, PCPU_REG
235088644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
235184186Sjake
235284186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
235384186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
235484186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
235584186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
235684186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
235784186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
235884186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
235984186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
236084186Sjake
2361108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2362108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2363108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2364108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2365108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2366108377Sjake
2367103921Sjake	set	tl0_ret - 8, %o7
2368103921Sjake	jmpl	%o2, %g0
236984186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
237084186SjakeEND(tl0_trap)
237184186Sjake
237288644Sjake/*
237391246Sjake * void tl0_intr(u_int level, u_int mask)
237491246Sjake */
237584186SjakeENTRY(tl0_intr)
237684186Sjake	/*
237784186Sjake	 * Force kernel store order.
237884186Sjake	 */
237984186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
238084186Sjake
238184186Sjake	rdpr	%tstate, %l0
238288644Sjake	rdpr	%tpc, %l1
238388644Sjake	rdpr	%tnpc, %l2
238488644Sjake	rd	%y, %l3
238588644Sjake	rd	%fprs, %l4
238688644Sjake	rdpr	%wstate, %l5
238788644Sjake
238888644Sjake#if KTR_COMPILE & KTR_INTR
238988644Sjake	CATR(KTR_INTR,
239091246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
239188644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
239288644Sjake	ldx	[PCPU(CURTHREAD)], %g2
239388644Sjake	stx	%g2, [%g1 + KTR_PARM1]
239488644Sjake	stx	%o0, [%g1 + KTR_PARM2]
239588644Sjake	rdpr	%pil, %g2
239688644Sjake	stx	%g2, [%g1 + KTR_PARM3]
239788644Sjake	stx	%l1, [%g1 + KTR_PARM4]
239888644Sjake	stx	%l2, [%g1 + KTR_PARM5]
239988644Sjake	stx	%i6, [%g1 + KTR_PARM6]
240088644Sjake9:
240188644Sjake#endif
240288644Sjake
240391246Sjake	wrpr	%o0, 0, %pil
2404108379Sjake	wr	%o1, 0, %clear_softint
240591246Sjake
240688644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
240788644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
240888644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
240988644Sjake	rdpr	%canrestore, %l6
241088644Sjake	wrpr	%l6, 0, %otherwin
241188644Sjake	wrpr	%g0, 0, %canrestore
241288644Sjake
241388644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
241488644Sjake
241584186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
241684186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
241784186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2418105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2419105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2420105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
242181380Sjake
242288644Sjake	wr	%g0, FPRS_FEF, %fprs
242388644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2424108379Sjake	rd	%gsr, %l6
2425105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
242688644Sjake	wr	%g0, 0, %fprs
242784186Sjake
242891246Sjake	mov	%o0, %l3
242991246Sjake	mov	T_INTERRUPT, %o1
243089050Sjake
2431105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2432105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
243388644Sjake
243489050Sjake	mov	PCB_REG, %l0
243589050Sjake	mov	PCPU_REG, %l1
243684186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
243784186Sjake
243884186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
243984186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
244084186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
244184186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
244284186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
244384186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
244484186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
244584186Sjake
244689050Sjake	mov	%l0, PCB_REG
244789050Sjake	mov	%l1, PCPU_REG
244888644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
244984186Sjake
245084186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
245184186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
245284186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
245384186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
245484186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
245584186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
245684186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
245784186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
245884186Sjake
2459157825Smarius	SET(intr_handlers, %l1, %l0)
2460157825Smarius	sllx	%l3, IH_SHIFT, %l1
2461157825Smarius	ldx	[%l0 + %l1], %l1
2462157825Smarius	KASSERT(%l1, "tl0_intr: ih null")
2463157825Smarius	call	%l1
2464157825Smarius	 add	%sp, CCFSZ + SPOFF, %o0
2465157825Smarius
2466117658Sjmg	/* %l3 contains PIL */
2467117658Sjmg	SET(intrcnt, %l1, %l2)
2468117658Sjmg	prefetcha [%l2] ASI_N, 1
2469117658Sjmg	SET(pil_countp, %l1, %l0)
2470117658Sjmg	sllx	%l3, 1, %l1
2471117658Sjmg	lduh	[%l0 + %l1], %l0
2472117658Sjmg	sllx	%l0, 3, %l0
2473117658Sjmg	add	%l0, %l2, %l0
2474145153Smarius	ldx	[%l0], %l1
2475145153Smarius	inc	%l1
2476145153Smarius	stx	%l1, [%l0]
2477117658Sjmg
2478145153Smarius	lduw	[PCPU(CNT) + V_INTR], %l0
2479145153Smarius	inc	%l0
2480145153Smarius	stw	%l0, [PCPU(CNT) + V_INTR]
248184186Sjake
2482116589Sjake	ba,a	%xcc, tl0_ret
248384186Sjake	 nop
248484186SjakeEND(tl0_intr)
248584186Sjake
2486105733Sjake/*
2487105733Sjake * Initiate return to usermode.
2488105733Sjake *
2489105733Sjake * Called with a trapframe on the stack.  The window that was setup in
2490105733Sjake * tl0_trap may have been used by "fast" trap handlers that pretend to be
2491105733Sjake * leaf functions, so all ins and locals may have been clobbered since
2492105733Sjake * then.
2493105733Sjake *
2494105733Sjake * This code is rather long and complicated.
2495105733Sjake */
249682005SjakeENTRY(tl0_ret)
249793389Sjake	/*
249893389Sjake	 * Check for pending asts atomically with returning.  We must raise
2499182020Smarius	 * the PIL before checking, and if no asts are found the PIL must
250093389Sjake	 * remain raised until the retry is executed, or we risk missing asts
2501220939Smarius	 * caused by interrupts occurring after the test.  If the PIL is
2502182020Smarius	 * lowered, as it is when we call ast, the check must be re-executed.
250393389Sjake	 */
2504103784Sjake	wrpr	%g0, PIL_TICK, %pil
250584186Sjake	ldx	[PCPU(CURTHREAD)], %l0
2506111032Sjulian	lduw	[%l0 + TD_FLAGS], %l1
2507111032Sjulian	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2508111032Sjulian	and	%l1, %l2, %l1
2509111032Sjulian	brz,a,pt %l1, 1f
251082906Sjake	 nop
2511105733Sjake
2512105733Sjake	/*
2513182020Smarius	 * We have an AST.  Re-enable interrupts and handle it, then restart
2514105733Sjake	 * the return sequence.
2515105733Sjake	 */
251693389Sjake	wrpr	%g0, 0, %pil
251782906Sjake	call	ast
251882906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2519103784Sjake	ba,a	%xcc, tl0_ret
252093389Sjake	 nop
252182906Sjake
252293389Sjake	/*
252393389Sjake	 * Check for windows that were spilled to the pcb and need to be
252493389Sjake	 * copied out.  This must be the last thing that is done before the
252593389Sjake	 * return to usermode.  If there are still user windows in the cpu
252693389Sjake	 * and we call a nested function after this, which causes them to be
252793389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
252893389Sjake	 * be inconsistent.
252993389Sjake	 */
2530103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2531103784Sjake	brz,a,pt %l1, 2f
2532103784Sjake	 nop
2533103784Sjake	wrpr	%g0, 0, %pil
253493389Sjake	mov	T_SPILL, %o0
2535105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2536103784Sjake	call	trap
2537103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2538103784Sjake	ba,a	%xcc, tl0_ret
2539103784Sjake	 nop
254082906Sjake
2541105733Sjake	/*
2542108377Sjake	 * Restore the out and most global registers from the trapframe.
2543108377Sjake	 * The ins will become the outs when we restore below.
2544105733Sjake	 */
2545103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
254682906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
254782906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
254882906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
254982906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
255082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
255182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
255282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
255381380Sjake
2554108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2555108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2556108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2557108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2558108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2559108377Sjake
2560105733Sjake	/*
2561105733Sjake	 * Load everything we need to restore below before disabling
2562105733Sjake	 * interrupts.
2563105733Sjake	 */
2564105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2565105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
256685243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2567105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2568105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2569105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2570105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
257182906Sjake
2572105733Sjake	/*
2573108377Sjake	 * Disable interrupts to restore the special globals.  They are not
2574108377Sjake	 * saved and restored for all kernel traps, so an interrupt at the
2575108377Sjake	 * wrong time would clobber them.
2576105733Sjake	 */
257789050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
257889050Sjake
257989050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
258089050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
258189050Sjake
2582105733Sjake	/*
2583105733Sjake	 * Switch to alternate globals.  This frees up some registers we
2584105733Sjake	 * can use after the restore changes our window.
2585105733Sjake	 */
258682906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
258782906Sjake
2588105733Sjake	/*
2589105733Sjake	 * Drop %pil to zero.  It must have been zero at the time of the
2590105733Sjake	 * trap, since we were in usermode, but it was raised above in
2591105733Sjake	 * order to check for asts atomically.  We have interrupts disabled
2592105733Sjake	 * so any interrupts will not be serviced until we complete the
2593105733Sjake	 * return to usermode.
2594105733Sjake	 */
259588644Sjake	wrpr	%g0, 0, %pil
2596105733Sjake
2597105733Sjake	/*
2598105733Sjake	 * Save %fprs in an alternate global so it can be restored after the
2599105733Sjake	 * restore instruction below.  If we restore it before the restore,
2600105733Sjake	 * and the restore traps we may run for a while with floating point
2601105733Sjake	 * enabled in the kernel, which we want to avoid.
2602105733Sjake	 */
2603105733Sjake	mov	%l0, %g1
2604105733Sjake
2605105733Sjake	/*
2606105733Sjake	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2607105733Sjake	 * so we set it temporarily and then clear it.
2608105733Sjake	 */
2609105733Sjake	wr	%g0, FPRS_FEF, %fprs
2610105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2611108379Sjake	wr	%l1, 0, %gsr
2612105733Sjake	wr	%g0, 0, %fprs
2613105733Sjake
2614105733Sjake	/*
2615105733Sjake	 * Restore program counters.  This could be done after the restore
2616105733Sjake	 * but we're out of alternate globals to store them in...
2617105733Sjake	 */
261888644Sjake	wrpr	%l2, 0, %tnpc
2619105733Sjake	wrpr	%l3, 0, %tpc
262082906Sjake
2621105733Sjake	/*
2622105733Sjake	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2623105733Sjake	 * will be affected by the restore below and we need to make sure it
2624105733Sjake	 * points to the current window at that time, not the window that was
2625105733Sjake	 * active at the time of the trap.
2626105733Sjake	 */
2627105733Sjake	andn	%l4, TSTATE_CWP_MASK, %g2
262882906Sjake
2629105733Sjake	/*
2630222828Smarius	 * Save %y in an alternate global.
2631105733Sjake	 */
2632222828Smarius	mov	%l5, %g4
2633105733Sjake
2634105733Sjake	/*
2635105733Sjake	 * Setup %wstate for return.  We need to restore the user window state
2636105733Sjake	 * which we saved in wstate.other when we trapped.  We also need to
2637105733Sjake	 * set the transition bit so the restore will be handled specially
2638105733Sjake	 * if it traps, use the xor feature of wrpr to do that.
2639105733Sjake	 */
2640105733Sjake	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
264188644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
2642105733Sjake
2643105733Sjake	/*
2644105733Sjake	 * Setup window management registers for return.  If not all user
2645105733Sjake	 * windows were spilled in the kernel %otherwin will be non-zero,
2646105733Sjake	 * so we need to transfer it to %canrestore to correctly restore
2647105733Sjake	 * those windows.  Otherwise everything gets set to zero and the
2648105733Sjake	 * restore below will fill a window directly from the user stack.
2649105733Sjake	 */
265088644Sjake	rdpr	%otherwin, %o0
265188644Sjake	wrpr	%o0, 0, %canrestore
265282906Sjake	wrpr	%g0, 0, %otherwin
265388644Sjake	wrpr	%o0, 0, %cleanwin
265481380Sjake
265582005Sjake	/*
2656105733Sjake	 * Now do the restore.  If this instruction causes a fill trap which
2657105733Sjake	 * fails to fill a window from the user stack, we will resume at
2658105733Sjake	 * tl0_ret_fill_end and call back into the kernel.
265982005Sjake	 */
266082906Sjake	restore
266182906Sjaketl0_ret_fill:
266281380Sjake
2663105733Sjake	/*
2664105733Sjake	 * We made it.  We're back in the window that was active at the time
2665105733Sjake	 * of the trap, and ready to return to usermode.
2666105733Sjake	 */
2667105733Sjake
2668105733Sjake	/*
2669105733Sjake	 * Restore %frps.  This was saved in an alternate global above.
2670105733Sjake	 */
2671105733Sjake	wr	%g1, 0, %fprs
2672105733Sjake
2673105733Sjake	/*
2674105733Sjake	 * Fixup %tstate so the saved %cwp points to the current window and
2675105733Sjake	 * restore it.
2676105733Sjake	 */
2677222828Smarius	rdpr	%cwp, %g1
2678222828Smarius	wrpr	%g2, %g1, %tstate
2679105733Sjake
2680105733Sjake	/*
2681105733Sjake	 * Restore the user window state.  The transition bit was set above
2682105733Sjake	 * for special handling of the restore, this clears it.
2683105733Sjake	 */
268488644Sjake	wrpr	%g3, 0, %wstate
268585243Sjake
268684186Sjake#if KTR_COMPILE & KTR_TRAP
268788644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2688222828Smarius	    , %g1, %g2, %g3, 7, 8, 9)
2689222828Smarius	ldx	[PCPU(CURTHREAD)], %g2
2690222828Smarius	stx	%g2, [%g1 + KTR_PARM1]
2691222828Smarius	rdpr	%pil, %g2
2692222828Smarius	stx	%g2, [%g1 + KTR_PARM2]
2693222828Smarius	rdpr	%tpc, %g2
2694222828Smarius	stx	%g2, [%g1 + KTR_PARM3]
2695222828Smarius	rdpr	%tnpc, %g2
2696222828Smarius	stx	%g2, [%g1 + KTR_PARM4]
2697222828Smarius	stx	%sp, [%g1 + KTR_PARM5]
269882906Sjake9:
269982906Sjake#endif
270081380Sjake
2701105733Sjake	/*
2702222828Smarius	 * Restore %y.  Note that the CATR above clobbered it.
2703222828Smarius	 */
2704222828Smarius	wr	%g4, 0, %y
2705222828Smarius
2706222828Smarius	/*
2707105733Sjake	 * Return to usermode.
2708105733Sjake	 */
270982906Sjake	retry
271082906Sjaketl0_ret_fill_end:
271182005Sjake
271284186Sjake#if KTR_COMPILE & KTR_TRAP
271388785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
271482906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
271588785Sjake	rdpr	%pstate, %l1
271688785Sjake	stx	%l1, [%l0 + KTR_PARM1]
2717222840Smarius	stx	%l6, [%l0 + KTR_PARM2]
271888785Sjake	stx	%sp, [%l0 + KTR_PARM3]
271982906Sjake9:
2720222828Smarius
2721222828Smarius	/*
2722222828Smarius	 * Restore %y clobbered by the CATR.  This was saved in %l5 above.
2723222828Smarius	 */
2724222828Smarius	wr	%l5, 0, %y
272582906Sjake#endif
272682906Sjake
272782906Sjake	/*
2728105733Sjake	 * The restore above caused a fill trap and the fill handler was
2729105733Sjake	 * unable to fill a window from the user stack.  The special fill
2730105733Sjake	 * handler recognized this and punted, sending us here.  We need
2731105733Sjake	 * to carefully undo any state that was restored before the restore
2732105733Sjake	 * was executed and call trap again.  Trap will copyin a window
2733105733Sjake	 * from the user stack which will fault in the page we need so the
2734105733Sjake	 * restore above will succeed when we try again.  If this fails
2735105733Sjake	 * the process has trashed its stack, so we kill it.
273682906Sjake	 */
2737105733Sjake
2738105733Sjake	/*
2739105733Sjake	 * Restore the kernel window state.  This was saved in %l6 above, and
2740105733Sjake	 * since the restore failed we're back in the same window.
2741105733Sjake	 */
2742105733Sjake	wrpr	%l6, 0, %wstate
2743105733Sjake
2744105733Sjake	/*
2745105733Sjake	 * Restore the normal globals which have predefined values in the
2746105733Sjake	 * kernel.  We clobbered them above restoring the user's globals
2747105733Sjake	 * so this is very important.
2748105733Sjake	 * XXX PSTATE_ALT must already be set.
2749105733Sjake	 */
275088785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
275189050Sjake	mov	PCB_REG, %o0
275289050Sjake	mov	PCPU_REG, %o1
275388785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
275489050Sjake	mov	%o0, PCB_REG
275589050Sjake	mov	%o1, PCPU_REG
275688644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2757105733Sjake
2758105733Sjake	/*
2759105733Sjake	 * Simulate a fill trap and then start the whole return sequence over
2760105733Sjake	 * again.  This is special because it only copies in 1 window, not 2
2761105733Sjake	 * as we would for a normal failed fill.  This may be the first time
2762105733Sjake	 * the process has been run, so there may not be 2 windows worth of
2763105733Sjake	 * stack to copyin.
2764105733Sjake	 */
2765103784Sjake	mov	T_FILL_RET, %o0
2766105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2767103784Sjake	call	trap
2768103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2769103784Sjake	ba,a	%xcc, tl0_ret
2770103784Sjake	 nop
277182005SjakeEND(tl0_ret)
277281380Sjake
277380709Sjake/*
277482906Sjake * Kernel trap entry point
277582906Sjake *
277691246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2777205409Smarius *     u_int sfsr)
277882906Sjake *
277982906Sjake * This is easy because the stack is already setup and the windows don't need
278082906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
278182906Sjake * the outs don't need to be saved.
278280709Sjake */
278380709SjakeENTRY(tl1_trap)
278480709Sjake	rdpr	%tstate, %l0
278580709Sjake	rdpr	%tpc, %l1
278680709Sjake	rdpr	%tnpc, %l2
278791246Sjake	rdpr	%pil, %l3
278891316Sjake	rd	%y, %l4
278991316Sjake	rdpr	%wstate, %l5
279080709Sjake
279184186Sjake#if KTR_COMPILE & KTR_TRAP
279288644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
279388644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
279488644Sjake	ldx	[PCPU(CURTHREAD)], %g2
279588644Sjake	stx	%g2, [%g1 + KTR_PARM1]
279697265Sjake	stx	%o0, [%g1 + KTR_PARM2]
279791246Sjake	stx	%l3, [%g1 + KTR_PARM3]
279888644Sjake	stx	%l1, [%g1 + KTR_PARM4]
279988644Sjake	stx	%i6, [%g1 + KTR_PARM5]
280082906Sjake9:
280182906Sjake#endif
280282906Sjake
280380709Sjake	wrpr	%g0, 1, %tl
280488644Sjake
280591316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
280691316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
280791246Sjake
2808105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2809105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2810103919Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2811103919Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2812105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2813103919Sjake
281488644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
281588644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
281688644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2817105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2818105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
281988644Sjake
2820103919Sjake	mov	PCB_REG, %l0
2821103919Sjake	mov	PCPU_REG, %l1
282291158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
282391158Sjake
2824108377Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2825108377Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
282680709Sjake
2827103919Sjake	mov	%l0, PCB_REG
2828103919Sjake	mov	%l1, PCPU_REG
282991158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
283091158Sjake
2831103919Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2832103919Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2833103919Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2834103919Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2835103919Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2836103919Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2837103919Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2838103919Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2839103919Sjake
2840108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2841108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2842108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2843108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2844108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2845108377Sjake
2846103921Sjake	set	tl1_ret - 8, %o7
2847103921Sjake	jmpl	%o2, %g0
284880709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2849103921SjakeEND(tl1_trap)
285080709Sjake
2851103921SjakeENTRY(tl1_ret)
2852103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2853103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2854103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2855103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2856103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2857103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2858103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2859103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2860103919Sjake
2861108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2862108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2863108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2864108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2865108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2866108377Sjake
286788644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
286888644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
286988644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2870105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2871105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
287288644Sjake
2873108377Sjake	set	VM_MIN_PROM_ADDRESS, %l5
2874108377Sjake	cmp	%l1, %l5
2875108377Sjake	bl,a,pt	%xcc, 1f
2876108377Sjake	 nop
2877182774Smarius	set	VM_MAX_PROM_ADDRESS, %l5
2878182774Smarius	cmp	%l1, %l5
2879182774Smarius	bg,a,pt	%xcc, 1f
2880182774Smarius	 nop
288180709Sjake
2882108377Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
288380709Sjake
2884108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2885108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2886108377Sjake
2887108377Sjake1:	wrpr	%g0, PSTATE_ALT, %pstate
2888108377Sjake
288988644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
289086519Sjake	mov	%l1, %g2
289186519Sjake	mov	%l2, %g3
2892222828Smarius	mov	%l4, %g4
289381380Sjake
289488644Sjake	wrpr	%l3, 0, %pil
289586519Sjake
289686519Sjake	restore
289786519Sjake
289880709Sjake	wrpr	%g0, 2, %tl
289980709Sjake
290086519Sjake	wrpr	%g2, 0, %tpc
290186519Sjake	wrpr	%g3, 0, %tnpc
2902222828Smarius	rdpr	%cwp, %g2
2903222828Smarius	wrpr	%g1, %g2, %tstate
290486519Sjake
290584186Sjake#if KTR_COMPILE & KTR_TRAP
2906103921Sjake	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2907222828Smarius	    , %g1, %g2, %g3, 7, 8, 9)
2908222828Smarius	ldx	[PCPU(CURTHREAD)], %g2
2909222828Smarius	stx	%g2, [%g1 + KTR_PARM1]
2910222828Smarius	rdpr	%pil, %g2
2911222828Smarius	stx	%g2, [%g1 + KTR_PARM2]
2912222828Smarius	rdpr	%tstate, %g2
2913222828Smarius	stx	%g2, [%g1 + KTR_PARM3]
2914222828Smarius	rdpr	%tpc, %g2
2915222828Smarius	stx	%g2, [%g1 + KTR_PARM4]
2916222828Smarius	stx	%sp, [%g1 + KTR_PARM5]
291782906Sjake9:
291882906Sjake#endif
291982906Sjake
2920222828Smarius	wr	%g4, 0, %y
2921222828Smarius
292280709Sjake	retry
2923103921SjakeEND(tl1_ret)
292480709Sjake
292591246Sjake/*
292691246Sjake * void tl1_intr(u_int level, u_int mask)
292791246Sjake */
292884186SjakeENTRY(tl1_intr)
292984186Sjake	rdpr	%tstate, %l0
293084186Sjake	rdpr	%tpc, %l1
293184186Sjake	rdpr	%tnpc, %l2
293291246Sjake	rdpr	%pil, %l3
293391316Sjake	rd	%y, %l4
293491316Sjake	rdpr	%wstate, %l5
293584186Sjake
293684186Sjake#if KTR_COMPILE & KTR_INTR
293789050Sjake	CATR(KTR_INTR,
2938145153Smarius	    "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
293988644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
294088644Sjake	ldx	[PCPU(CURTHREAD)], %g2
294188644Sjake	stx	%g2, [%g1 + KTR_PARM1]
294291246Sjake	stx	%o0, [%g1 + KTR_PARM2]
294391246Sjake	stx	%l3, [%g1 + KTR_PARM3]
294491246Sjake	stx	%l1, [%g1 + KTR_PARM4]
294591246Sjake	stx	%i6, [%g1 + KTR_PARM5]
294684186Sjake9:
294784186Sjake#endif
294884186Sjake
294991246Sjake	wrpr	%o0, 0, %pil
2950108379Sjake	wr	%o1, 0, %clear_softint
295191246Sjake
295284186Sjake	wrpr	%g0, 1, %tl
295388644Sjake
295491316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
295591316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
295691246Sjake
295788644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
295888644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
295988644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2960105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2961105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
296288644Sjake
296391246Sjake	mov	%o0, %l7
296491246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
296589050Sjake
2966105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2967105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
296888644Sjake
296988644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
297088644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
297188644Sjake
297291158Sjake	mov	PCB_REG, %l4
297391158Sjake	mov	PCPU_REG, %l5
297491158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
297591158Sjake
297684186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
297784186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
297884186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
297984186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
298084186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
298184186Sjake
298291158Sjake	mov	%l4, PCB_REG
298391158Sjake	mov	%l5, PCPU_REG
298491158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
298591158Sjake
2986157825Smarius	SET(intr_handlers, %l5, %l4)
2987157825Smarius	sllx	%l7, IH_SHIFT, %l5
2988157825Smarius	ldx	[%l4 + %l5], %l5
2989157825Smarius	KASSERT(%l5, "tl1_intr: ih null")
2990157825Smarius	call	%l5
2991157825Smarius	 add	%sp, CCFSZ + SPOFF, %o0
2992157825Smarius
2993145153Smarius	/* %l7 contains PIL */
2994117658Sjmg	SET(intrcnt, %l5, %l4)
2995117658Sjmg	prefetcha [%l4] ASI_N, 1
2996117658Sjmg	SET(pil_countp, %l5, %l6)
2997117658Sjmg	sllx	%l7, 1, %l5
2998117658Sjmg	lduh	[%l5 + %l6], %l5
2999117658Sjmg	sllx	%l5, 3, %l5
3000117658Sjmg	add	%l5, %l4, %l4
3001145153Smarius	ldx	[%l4], %l5
3002145153Smarius	inc	%l5
3003145153Smarius	stx	%l5, [%l4]
3004117658Sjmg
3005145153Smarius	lduw	[PCPU(CNT) + V_INTR], %l4
3006145153Smarius	inc	%l4
3007145153Smarius	stw	%l4, [PCPU(CNT) + V_INTR]
300888644Sjake
3009105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
301091316Sjake
301184186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
301284186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
301384186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
301484186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
301584186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
301684186Sjake
301784186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
301884186Sjake
301988644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
302086519Sjake	mov	%l1, %g2
302186519Sjake	mov	%l2, %g3
3022222828Smarius	mov	%l4, %g4
302388644Sjake	wrpr	%l3, 0, %pil
302484186Sjake
302586519Sjake	restore
302686519Sjake
302784186Sjake	wrpr	%g0, 2, %tl
302884186Sjake
302986519Sjake	wrpr	%g2, 0, %tpc
303086519Sjake	wrpr	%g3, 0, %tnpc
3031222828Smarius	rdpr	%cwp, %g2
3032222828Smarius	wrpr	%g1, %g2, %tstate
303386519Sjake
303488644Sjake#if KTR_COMPILE & KTR_INTR
3035145153Smarius	CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3036222828Smarius	    , %g1, %g2, %g3, 7, 8, 9)
3037222828Smarius	ldx	[PCPU(CURTHREAD)], %g2
3038222828Smarius	stx	%g2, [%g1 + KTR_PARM1]
3039222828Smarius	rdpr	%pil, %g2
3040222828Smarius	stx	%g2, [%g1 + KTR_PARM2]
3041222828Smarius	rdpr	%tstate, %g2
3042222828Smarius	stx	%g2, [%g1 + KTR_PARM3]
3043222828Smarius	rdpr	%tpc, %g2
3044222828Smarius	stx	%g2, [%g1 + KTR_PARM4]
3045222828Smarius	stx	%sp, [%g1 + KTR_PARM5]
304684186Sjake9:
304784186Sjake#endif
304884186Sjake
3049222828Smarius	wr	%g4, 0, %y
3050222828Smarius
305184186Sjake	retry
305284186SjakeEND(tl1_intr)
305384186Sjake
3054155839Smarius	.globl	tl_text_end
3055155839Smariustl_text_end:
3056155839Smarius	nop
3057155839Smarius
305882906Sjake/*
305982906Sjake * Freshly forked processes come here when switched to for the first time.
306082906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
306182906Sjake * them to the outs.
306282906Sjake */
306380709SjakeENTRY(fork_trampoline)
306484186Sjake#if KTR_COMPILE & KTR_PROC
306584186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
306682906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
306783366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
306882906Sjake	stx	%g2, [%g1 + KTR_PARM1]
306984186Sjake	ldx	[%g2 + TD_PROC], %g2
307082906Sjake	add	%g2, P_COMM, %g2
307182906Sjake	stx	%g2, [%g1 + KTR_PARM2]
307282906Sjake	rdpr	%cwp, %g2
307382906Sjake	stx	%g2, [%g1 + KTR_PARM3]
307482906Sjake9:
307582906Sjake#endif
307680709Sjake	mov	%l0, %o0
307780709Sjake	mov	%l1, %o1
307880709Sjake	call	fork_exit
307988644Sjake	 mov	%l2, %o2
3080116589Sjake	ba,a	%xcc, tl0_ret
308184186Sjake	 nop
308280709SjakeEND(fork_trampoline)
3083