180709Sjake/*-
281180Sjake * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
381180Sjake *
481180Sjake * Redistribution and use in source and binary forms, with or without
581180Sjake * modification, are permitted provided that the following conditions
681180Sjake * are met:
781180Sjake * 1. Redistributions of source code must retain the above copyright
881180Sjake *    notice, this list of conditions and the following disclaimer.
981180Sjake * 2. Redistributions in binary form must reproduce the above copyright
1081180Sjake *    notice, this list of conditions and the following disclaimer in the
1181180Sjake *    documentation and/or other materials provided with the distribution.
1281180Sjake * 3. Berkeley Software Design Inc's name may not be used to endorse or
1381180Sjake *    promote products derived from this software without specific prior
1481180Sjake *    written permission.
1581180Sjake *
1681180Sjake * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
1781180Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1881180Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1981180Sjake * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
2081180Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2181180Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2281180Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2381180Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2481180Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2581180Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2681180Sjake * SUCH DAMAGE.
2781180Sjake *
28114085Sobrien *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
2981180Sjake */
3081180Sjake/*-
3180709Sjake * Copyright (c) 2001 Jake Burkholder.
3280709Sjake * All rights reserved.
3380709Sjake *
3480709Sjake * Redistribution and use in source and binary forms, with or without
3580709Sjake * modification, are permitted provided that the following conditions
3680709Sjake * are met:
3780709Sjake * 1. Redistributions of source code must retain the above copyright
3880709Sjake *    notice, this list of conditions and the following disclaimer.
3980709Sjake * 2. Redistributions in binary form must reproduce the above copyright
4080709Sjake *    notice, this list of conditions and the following disclaimer in the
4180709Sjake *    documentation and/or other materials provided with the distribution.
4280709Sjake *
4381337Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
4480709Sjake * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4580709Sjake * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4681337Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
4780709Sjake * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
4880709Sjake * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
4980709Sjake * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5080709Sjake * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5180709Sjake * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5280709Sjake * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5380709Sjake * SUCH DAMAGE.
5480709Sjake */
5580709Sjake
56114188Sjake#include <machine/asm.h>
57114188Sjake__FBSDID("$FreeBSD$");
58114188Sjake
59106050Sjake#include "opt_compat.h"
6080709Sjake#include "opt_ddb.h"
61285627Szbb#include "opt_kstack_pages.h"
6280709Sjake
6380709Sjake#include <machine/asi.h>
6480709Sjake#include <machine/asmacros.h>
65166105Smarius#include <machine/frame.h>
66166105Smarius#include <machine/fsr.h>
67166105Smarius#include <machine/intr_machdep.h>
6882906Sjake#include <machine/ktr.h>
69166105Smarius#include <machine/pcb.h>
7082906Sjake#include <machine/pstate.h>
7180709Sjake#include <machine/trap.h>
72166105Smarius#include <machine/tsb.h>
7382906Sjake#include <machine/tstate.h>
74166105Smarius#include <machine/utrap.h>
7582906Sjake#include <machine/wstate.h>
7680709Sjake
7780709Sjake#include "assym.s"
7880709Sjake
79216803Smarius#define	TSB_ASI			0x0
80216803Smarius#define	TSB_KERNEL		0x0
81216803Smarius#define	TSB_KERNEL_MASK		0x0
82216803Smarius#define	TSB_KERNEL_PHYS		0x0
83216803Smarius#define	TSB_KERNEL_PHYS_END	0x0
84216803Smarius#define	TSB_QUAD_LDD		0x0
85101653Sjake
8688644Sjake	.register %g2,#ignore
8788644Sjake	.register %g3,#ignore
8888644Sjake	.register %g6,#ignore
8988644Sjake	.register %g7,#ignore
9088644Sjake
9182005Sjake/*
92216803Smarius * Atomically set a bit in a TTE.
9388644Sjake */
94216803Smarius#define	TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
9588644Sjake	add	r1, TTE_DATA, r1 ; \
96216803Smarius	LD(x, a) [r1] asi, r2 ; \
9788644Sjake9:	or	r2, bit, r3 ; \
98216803Smarius	CAS(x, a) [r1] asi, r2, r3 ; \
9988644Sjake	cmp	r2, r3 ; \
10088644Sjake	bne,pn	%xcc, 9b ; \
10188644Sjake	 mov	r3, r2
10288644Sjake
103216803Smarius#define	TTE_SET_REF(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
104216803Smarius#define	TTE_SET_W(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
10588644Sjake
10688644Sjake/*
10782906Sjake * Macros for spilling and filling live windows.
10882906Sjake *
10982906Sjake * NOTE: These macros use exactly 16 instructions, and it is assumed that the
11082906Sjake * handler will not use more than 24 instructions total, to leave room for
11182906Sjake * resume vectors which occupy the last 8 instructions.
11282005Sjake */
11380709Sjake
11482906Sjake#define	SPILL(storer, base, size, asi) \
11582906Sjake	storer	%l0, [base + (0 * size)] asi ; \
11682906Sjake	storer	%l1, [base + (1 * size)] asi ; \
11782906Sjake	storer	%l2, [base + (2 * size)] asi ; \
11882906Sjake	storer	%l3, [base + (3 * size)] asi ; \
11982906Sjake	storer	%l4, [base + (4 * size)] asi ; \
12082906Sjake	storer	%l5, [base + (5 * size)] asi ; \
12182906Sjake	storer	%l6, [base + (6 * size)] asi ; \
12282906Sjake	storer	%l7, [base + (7 * size)] asi ; \
12382906Sjake	storer	%i0, [base + (8 * size)] asi ; \
12482906Sjake	storer	%i1, [base + (9 * size)] asi ; \
12582906Sjake	storer	%i2, [base + (10 * size)] asi ; \
12682906Sjake	storer	%i3, [base + (11 * size)] asi ; \
12782906Sjake	storer	%i4, [base + (12 * size)] asi ; \
12882906Sjake	storer	%i5, [base + (13 * size)] asi ; \
12982906Sjake	storer	%i6, [base + (14 * size)] asi ; \
13082906Sjake	storer	%i7, [base + (15 * size)] asi
13180709Sjake
13282906Sjake#define	FILL(loader, base, size, asi) \
13382906Sjake	loader	[base + (0 * size)] asi, %l0 ; \
13482906Sjake	loader	[base + (1 * size)] asi, %l1 ; \
13582906Sjake	loader	[base + (2 * size)] asi, %l2 ; \
13682906Sjake	loader	[base + (3 * size)] asi, %l3 ; \
13782906Sjake	loader	[base + (4 * size)] asi, %l4 ; \
13882906Sjake	loader	[base + (5 * size)] asi, %l5 ; \
13982906Sjake	loader	[base + (6 * size)] asi, %l6 ; \
14082906Sjake	loader	[base + (7 * size)] asi, %l7 ; \
14182906Sjake	loader	[base + (8 * size)] asi, %i0 ; \
14282906Sjake	loader	[base + (9 * size)] asi, %i1 ; \
14382906Sjake	loader	[base + (10 * size)] asi, %i2 ; \
14482906Sjake	loader	[base + (11 * size)] asi, %i3 ; \
14582906Sjake	loader	[base + (12 * size)] asi, %i4 ; \
14682906Sjake	loader	[base + (13 * size)] asi, %i5 ; \
14782906Sjake	loader	[base + (14 * size)] asi, %i6 ; \
14882906Sjake	loader	[base + (15 * size)] asi, %i7
14982005Sjake
15082906Sjake#define	ERRATUM50(reg)	mov reg, reg
15182906Sjake
15288781Sjake#define	KSTACK_SLOP	1024
15388781Sjake
15489048Sjake/*
155181701Smarius * Sanity check the kernel stack and bail out if it's wrong.
15689048Sjake * XXX: doesn't handle being on the panic stack.
15789048Sjake */
15888781Sjake#define	KSTACK_CHECK \
15988781Sjake	dec	16, ASP_REG ; \
16088781Sjake	stx	%g1, [ASP_REG + 0] ; \
16188781Sjake	stx	%g2, [ASP_REG + 8] ; \
16288781Sjake	add	%sp, SPOFF, %g1 ; \
16388781Sjake	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
16488781Sjake	bnz,a	%xcc, tl1_kstack_fault ; \
16588781Sjake	 inc	16, ASP_REG ; \
16688781Sjake	ldx	[PCPU(CURTHREAD)], %g2 ; \
16788781Sjake	ldx	[%g2 + TD_KSTACK], %g2 ; \
16888781Sjake	add	%g2, KSTACK_SLOP, %g2 ; \
16988781Sjake	subcc	%g1, %g2, %g1 ; \
17088781Sjake	ble,a	%xcc, tl1_kstack_fault ; \
17188781Sjake	 inc	16, ASP_REG ; \
17288781Sjake	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
17388781Sjake	cmp	%g1, %g2 ; \
17488781Sjake	bgt,a	%xcc, tl1_kstack_fault ; \
17588781Sjake	 inc	16, ASP_REG ; \
17688781Sjake	ldx	[ASP_REG + 8], %g2 ; \
17788781Sjake	ldx	[ASP_REG + 0], %g1 ; \
17888781Sjake	inc	16, ASP_REG
17988781Sjake
180155839Smarius	.globl	tl_text_begin
181155839Smariustl_text_begin:
182155839Smarius	nop
183155839Smarius
18488781SjakeENTRY(tl1_kstack_fault)
18588781Sjake	rdpr	%tl, %g1
18697263Sjake1:	cmp	%g1, 2
18797263Sjake	be,a	2f
18888781Sjake	 nop
18988781Sjake
19088781Sjake#if KTR_COMPILE & KTR_TRAP
19188781Sjake	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
19297263Sjake	    , %g2, %g3, %g4, 7, 8, 9)
19397263Sjake	rdpr	%tl, %g3
19497263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19597263Sjake	rdpr	%tpc, %g3
19697263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19797263Sjake	rdpr	%tnpc, %g3
19897263Sjake	stx	%g3, [%g2 + KTR_PARM1]
19988781Sjake9:
20088781Sjake#endif
20188781Sjake
20297263Sjake	sub	%g1, 1, %g1
20397263Sjake	wrpr	%g1, 0, %tl
20497263Sjake	ba,a	%xcc, 1b
20597263Sjake	 nop
20697263Sjake
20788781Sjake2:
20888781Sjake#if KTR_COMPILE & KTR_TRAP
20988781Sjake	CATR(KTR_TRAP,
21088781Sjake	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
21188781Sjake	    , %g1, %g2, %g3, 7, 8, 9)
21288781Sjake	add	%sp, SPOFF, %g2
21388781Sjake	stx	%g2, [%g1 + KTR_PARM1]
21488781Sjake	ldx	[PCPU(CURTHREAD)], %g2
21588781Sjake	ldx	[%g2 + TD_KSTACK], %g2
21688781Sjake	stx	%g2, [%g1 + KTR_PARM2]
21788781Sjake	rdpr	%canrestore, %g2
21888781Sjake	stx	%g2, [%g1 + KTR_PARM3]
21988781Sjake	rdpr	%cansave, %g2
22088781Sjake	stx	%g2, [%g1 + KTR_PARM4]
22188781Sjake	rdpr	%otherwin, %g2
22288781Sjake	stx	%g2, [%g1 + KTR_PARM5]
22388781Sjake	rdpr	%wstate, %g2
22488781Sjake	stx	%g2, [%g1 + KTR_PARM6]
22588781Sjake9:
22688781Sjake#endif
22788781Sjake
22888781Sjake	wrpr	%g0, 0, %canrestore
22988781Sjake	wrpr	%g0, 6, %cansave
23088781Sjake	wrpr	%g0, 0, %otherwin
23188781Sjake	wrpr	%g0, WSTATE_KERNEL, %wstate
23288781Sjake
23389048Sjake	sub	ASP_REG, SPOFF + CCFSZ, %sp
23488781Sjake	clr	%fp
23588781Sjake
236103921Sjake	set	trap, %o2
237116589Sjake	ba	%xcc, tl1_trap
23888781Sjake	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
23988781SjakeEND(tl1_kstack_fault)
24088781Sjake
24182906Sjake/*
24282906Sjake * Magic to resume from a spill or fill trap.  If we get an alignment or an
243182020Smarius * MMU fault during a spill or a fill, this macro will detect the fault and
24488644Sjake * resume at a set instruction offset in the trap handler.
24582906Sjake *
24688644Sjake * To check if the previous trap was a spill/fill we convert the trapped pc
24788644Sjake * to a trap type and verify that it is in the range of spill/fill vectors.
24882906Sjake * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
24982906Sjake * tl bit allows us to detect both ranges with one test.
25082906Sjake *
25182906Sjake * This is:
25288644Sjake *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
25382906Sjake *
25482906Sjake * To calculate the new pc we take advantage of the xor feature of wrpr.
25582906Sjake * Forcing all the low bits of the trapped pc on we can produce any offset
25682906Sjake * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
25782906Sjake *
25882906Sjake *	0x7f ^ 0x1f == 0x60
25982906Sjake *	0x1f == (0x80 - 0x60) - 1
26082906Sjake *
26186519Sjake * Which are the offset and xor value used to resume from alignment faults.
26282906Sjake */
26382906Sjake
26482906Sjake/*
26588644Sjake * Determine if we have trapped inside of a spill/fill vector, and if so resume
26688644Sjake * at a fixed instruction offset in the trap vector.  Must be called on
26788644Sjake * alternate globals.
26882906Sjake */
26988644Sjake#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
27088644Sjake	dec	16, ASP_REG ; \
27188644Sjake	stx	%g1, [ASP_REG + 0] ; \
27288644Sjake	stx	%g2, [ASP_REG + 8] ; \
27388644Sjake	rdpr	%tpc, %g1 ; \
27488644Sjake	ERRATUM50(%g1) ; \
27588644Sjake	rdpr	%tba, %g2 ; \
27688644Sjake	sub	%g1, %g2, %g2 ; \
27788644Sjake	srlx	%g2, 5, %g2 ; \
27888644Sjake	andn	%g2, 0x200, %g2 ; \
27988644Sjake	cmp	%g2, 0x80 ; \
28088644Sjake	blu,pt	%xcc, 9f ; \
28188644Sjake	 cmp	%g2, 0x100 ; \
28288644Sjake	bgeu,pt	%xcc, 9f ; \
28388644Sjake	 or	%g1, 0x7f, %g1 ; \
28488644Sjake	wrpr	%g1, xor, %tnpc ; \
28588644Sjake	stxa_g0_sfsr ; \
28688644Sjake	ldx	[ASP_REG + 8], %g2 ; \
28788644Sjake	ldx	[ASP_REG + 0], %g1 ; \
28888644Sjake	inc	16, ASP_REG ; \
28988644Sjake	done ; \
29088644Sjake9:	ldx	[ASP_REG + 8], %g2 ; \
29188644Sjake	ldx	[ASP_REG + 0], %g1 ; \
29288644Sjake	inc	16, ASP_REG
29382906Sjake
29488644Sjake/*
295182020Smarius * For certain faults we need to clear the SFSR MMU register before returning.
29688644Sjake */
29788644Sjake#define	RSF_CLR_SFSR \
29888644Sjake	wr	%g0, ASI_DMMU, %asi ; \
29988644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
30088644Sjake
30182906Sjake#define	RSF_XOR(off)	((0x80 - off) - 1)
30282906Sjake
30382906Sjake/*
30482906Sjake * Instruction offsets in spill and fill trap handlers for handling certain
30582906Sjake * nested traps, and corresponding xor constants for wrpr.
30682906Sjake */
30786519Sjake#define	RSF_OFF_ALIGN	0x60
30886519Sjake#define	RSF_OFF_MMU	0x70
30982906Sjake
31088644Sjake#define	RESUME_SPILLFILL_ALIGN \
31188644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
31288644Sjake#define	RESUME_SPILLFILL_MMU \
31388644Sjake	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
31488644Sjake#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
31588644Sjake	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
31682906Sjake
31782906Sjake/*
31882906Sjake * Constant to add to %tnpc when taking a fill trap just before returning to
31988644Sjake * user mode.
32082906Sjake */
32182906Sjake#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
32282906Sjake
32382906Sjake/*
32482906Sjake * Generate a T_SPILL or T_FILL trap if the window operation fails.
32582906Sjake */
32682906Sjake#define	RSF_TRAP(type) \
327116589Sjake	ba	%xcc, tl0_sftrap ; \
32882906Sjake	 mov	type, %g2 ; \
32982906Sjake	.align	16
33082906Sjake
33182906Sjake/*
33282906Sjake * Game over if the window operation fails.
33382906Sjake */
33482906Sjake#define	RSF_FATAL(type) \
335116589Sjake	ba	%xcc, rsf_fatal ; \
33688781Sjake	 mov	type, %g2 ; \
33782906Sjake	.align	16
33882906Sjake
33982906Sjake/*
34082906Sjake * Magic to resume from a failed fill a few instructions after the corrsponding
34182906Sjake * restore.  This is used on return from the kernel to usermode.
34282906Sjake */
34382906Sjake#define	RSF_FILL_MAGIC \
34482906Sjake	rdpr	%tnpc, %g1 ; \
34582906Sjake	add	%g1, RSF_FILL_INC, %g1 ; \
34682906Sjake	wrpr	%g1, 0, %tnpc ; \
34782906Sjake	done ; \
34882906Sjake	.align	16
34982906Sjake
35082906Sjake/*
35182906Sjake * Spill to the pcb if a spill to the user stack in kernel mode fails.
35282906Sjake */
35382906Sjake#define	RSF_SPILL_TOPCB \
354116589Sjake	ba,a	%xcc, tl1_spill_topcb ; \
35582906Sjake	 nop ; \
35682906Sjake	.align	16
35782906Sjake
35888781SjakeENTRY(rsf_fatal)
35988781Sjake#if KTR_COMPILE & KTR_TRAP
36088781Sjake	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
36188781Sjake	    , %g1, %g3, %g4, 7, 8, 9)
36288781Sjake	rdpr	%tt, %g3
36388781Sjake	stx	%g3, [%g1 + KTR_PARM1]
36488781Sjake	stx	%g2, [%g1 + KTR_PARM2]
36588781Sjake9:
36688781Sjake#endif
36788781Sjake
36888781Sjake	KSTACK_CHECK
36988781Sjake
37088781Sjake	sir
37188781SjakeEND(rsf_fatal)
37288781Sjake
373223718Smarius	.data
374223718Smarius	_ALIGN_DATA
375224187Sattilio	.globl	intrnames, sintrnames
376223718Smariusintrnames:
377225899Smarius	.space	(IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
378224187Sattiliosintrnames:
379225899Smarius	.quad	(IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
380224187Sattilio
381224187Sattilio	.globl	intrcnt, sintrcnt
382223718Smariusintrcnt:
383225899Smarius	.space	(IV_MAX + PIL_MAX) * 8
384224187Sattiliosintrcnt:
385225899Smarius	.quad	(IV_MAX + PIL_MAX) * 8
38680709Sjake
387223718Smarius	.text
38880709Sjake
38982906Sjake/*
39082906Sjake * Trap table and associated macros
39182906Sjake *
39282906Sjake * Due to its size a trap table is an inherently hard thing to represent in
39382906Sjake * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
39482906Sjake * instructions each, many of which are identical.  The way that this is
395220939Smarius * laid out is the instructions (8 or 32) for the actual trap vector appear
39682906Sjake * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
39782906Sjake * but if not supporting code can be placed just after the definition of the
39882906Sjake * macro.  The macros are then instantiated in a different section (.trap),
39982906Sjake * which is setup to be placed by the linker at the beginning of .text, and the
40082906Sjake * code around the macros is moved to the end of trap table.  In this way the
40182906Sjake * code that must be sequential in memory can be split up, and located near
40282906Sjake * its supporting code so that it is easier to follow.
40382906Sjake */
40482906Sjake
40582906Sjake	/*
40682906Sjake	 * Clean window traps occur when %cleanwin is zero to ensure that data
40782906Sjake	 * is not leaked between address spaces in registers.
40882906Sjake	 */
40980709Sjake	.macro	clean_window
41080709Sjake	clr	%o0
41180709Sjake	clr	%o1
41280709Sjake	clr	%o2
41380709Sjake	clr	%o3
41480709Sjake	clr	%o4
41580709Sjake	clr	%o5
41680709Sjake	clr	%o6
41780709Sjake	clr	%o7
41880709Sjake	clr	%l0
41980709Sjake	clr	%l1
42080709Sjake	clr	%l2
42180709Sjake	clr	%l3
42280709Sjake	clr	%l4
42380709Sjake	clr	%l5
42480709Sjake	clr	%l6
42580709Sjake	rdpr	%cleanwin, %l7
42680709Sjake	inc	%l7
42780709Sjake	wrpr	%l7, 0, %cleanwin
42880709Sjake	clr	%l7
42980709Sjake	retry
43080709Sjake	.align	128
43180709Sjake	.endm
43280709Sjake
43381380Sjake	/*
43482906Sjake	 * Stack fixups for entry from user mode.  We are still running on the
43582906Sjake	 * user stack, and with its live registers, so we must save soon.  We
43682906Sjake	 * are on alternate globals so we do have some registers.  Set the
43788644Sjake	 * transitional window state, and do the save.  If this traps we
438181701Smarius	 * attempt to spill a window to the user stack.  If this fails, we
439181701Smarius	 * spill the window to the pcb and continue.  Spilling to the pcb
44088644Sjake	 * must not fail.
44182906Sjake	 *
44282906Sjake	 * NOTE: Must be called with alternate globals and clobbers %g1.
44381380Sjake	 */
44482906Sjake
44588644Sjake	.macro	tl0_split
44682906Sjake	rdpr	%wstate, %g1
44782906Sjake	wrpr	%g1, WSTATE_TRANSITION, %wstate
44881380Sjake	save
44981380Sjake	.endm
45081380Sjake
45182906Sjake	.macro	tl0_setup	type
45288644Sjake	tl0_split
453108374Sjake	clr	%o1
454103921Sjake	set	trap, %o2
455103897Sjake	ba	%xcc, tl0_utrap
45682906Sjake	 mov	\type, %o0
45781380Sjake	.endm
45881380Sjake
45981380Sjake	/*
46082906Sjake	 * Generic trap type.  Call trap() with the specified type.
46181380Sjake	 */
46280709Sjake	.macro	tl0_gen		type
46382906Sjake	tl0_setup \type
46480709Sjake	.align	32
46580709Sjake	.endm
46680709Sjake
46782906Sjake	/*
46882906Sjake	 * This is used to suck up the massive swaths of reserved trap types.
46982906Sjake	 * Generates count "reserved" trap vectors.
47082906Sjake	 */
47180709Sjake	.macro	tl0_reserved	count
47280709Sjake	.rept	\count
47380709Sjake	tl0_gen	T_RESERVED
47480709Sjake	.endr
47580709Sjake	.endm
47680709Sjake
477109810Sjake	.macro	tl1_split
478109810Sjake	rdpr	%wstate, %g1
479109810Sjake	wrpr	%g1, WSTATE_NESTED, %wstate
480109810Sjake	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
481109810Sjake	.endm
482109810Sjake
483109810Sjake	.macro	tl1_setup	type
484109810Sjake	tl1_split
485109810Sjake	clr	%o1
486109810Sjake	set	trap, %o2
487116589Sjake	ba	%xcc, tl1_trap
488109810Sjake	 mov	\type | T_KERNEL, %o0
489109810Sjake	.endm
490109810Sjake
491109810Sjake	.macro	tl1_gen		type
492109810Sjake	tl1_setup \type
493109810Sjake	.align	32
494109810Sjake	.endm
495109810Sjake
496109810Sjake	.macro	tl1_reserved	count
497109810Sjake	.rept	\count
498109810Sjake	tl1_gen	T_RESERVED
499109810Sjake	.endr
500109810Sjake	.endm
501109810Sjake
50288644Sjake	.macro	tl0_insn_excptn
503101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
50488644Sjake	wr	%g0, ASI_IMMU, %asi
50588644Sjake	rdpr	%tpc, %g3
50688644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
507182877Smarius	/*
508182877Smarius	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
509182877Smarius	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
510182877Smarius	 * this triggers a RED state exception though.
511182877Smarius	 */
51288644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
51388644Sjake	membar	#Sync
514116589Sjake	ba	%xcc, tl0_sfsr_trap
51588644Sjake	 mov	T_INSTRUCTION_EXCEPTION, %g2
51688644Sjake	.align	32
51788644Sjake	.endm
51888644Sjake
51982906Sjake	.macro	tl0_data_excptn
520101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
52182906Sjake	wr	%g0, ASI_DMMU, %asi
52282906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
52382906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
52488644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
52588644Sjake	membar	#Sync
526116589Sjake	ba	%xcc, tl0_sfsr_trap
52788644Sjake	 mov	T_DATA_EXCEPTION, %g2
52882906Sjake	.align	32
52982906Sjake	.endm
53082906Sjake
53182005Sjake	.macro	tl0_align
53282906Sjake	wr	%g0, ASI_DMMU, %asi
53382906Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
53482906Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
53588644Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
53688644Sjake	membar	#Sync
537116589Sjake	ba	%xcc, tl0_sfsr_trap
53888644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
53982005Sjake	.align	32
54082005Sjake	.endm
54182005Sjake
54282005SjakeENTRY(tl0_sfsr_trap)
54388644Sjake	tl0_split
544108374Sjake	clr	%o1
545103921Sjake	set	trap, %o2
54688644Sjake	mov	%g3, %o4
54788644Sjake	mov	%g4, %o5
548103897Sjake	ba	%xcc, tl0_utrap
54982906Sjake	 mov	%g2, %o0
55082005SjakeEND(tl0_sfsr_trap)
55182005Sjake
55282906Sjake	.macro	tl0_intr level, mask
55388644Sjake	tl0_split
55491246Sjake	set	\mask, %o1
555116589Sjake	ba	%xcc, tl0_intr
55691246Sjake	 mov	\level, %o0
55781380Sjake	.align	32
55881380Sjake	.endm
55981380Sjake
56081380Sjake#define	INTR(level, traplvl)						\
56182906Sjake	tl ## traplvl ## _intr	level, 1 << level
56281380Sjake
56381380Sjake#define	TICK(traplvl) \
564182743Smarius	tl ## traplvl ## _intr	PIL_TICK, 0x10001
56581380Sjake
56681380Sjake#define	INTR_LEVEL(tl)							\
56781380Sjake	INTR(1, tl) ;							\
56881380Sjake	INTR(2, tl) ;							\
56981380Sjake	INTR(3, tl) ;							\
57081380Sjake	INTR(4, tl) ;							\
57181380Sjake	INTR(5, tl) ;							\
57281380Sjake	INTR(6, tl) ;							\
57381380Sjake	INTR(7, tl) ;							\
57481380Sjake	INTR(8, tl) ;							\
57581380Sjake	INTR(9, tl) ;							\
57681380Sjake	INTR(10, tl) ;							\
57781380Sjake	INTR(11, tl) ;							\
57881380Sjake	INTR(12, tl) ;							\
57981380Sjake	INTR(13, tl) ;							\
58081380Sjake	TICK(tl) ;							\
58181380Sjake	INTR(15, tl) ;
58281380Sjake
58380709Sjake	.macro	tl0_intr_level
58481380Sjake	INTR_LEVEL(0)
58580709Sjake	.endm
58680709Sjake
58797265Sjake	.macro	intr_vector
58897265Sjake	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
58997265Sjake	andcc	%g1, IRSR_BUSY, %g0
590104075Sjake	bnz,a,pt %xcc, intr_vector
59197265Sjake	 nop
592223721Smarius	ba,a,pt	%xcc, intr_vector_stray
593223721Smarius	 nop
59481380Sjake	.align	32
59580709Sjake	.endm
59680709Sjake
597109860Sjake	.macro	tl0_immu_miss
59881380Sjake	/*
599181701Smarius	 * Load the context and the virtual page number from the tag access
600109860Sjake	 * register.  We ignore the context.
601109860Sjake	 */
602109860Sjake	wr	%g0, ASI_IMMU, %asi
603109860Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
604109860Sjake
605109860Sjake	/*
606102040Sjake	 * Initialize the page size walker.
607102040Sjake	 */
608102040Sjake	mov	TS_MIN, %g2
609102040Sjake
610102040Sjake	/*
611102040Sjake	 * Loop over all supported page sizes.
612102040Sjake	 */
613102040Sjake
614102040Sjake	/*
615102040Sjake	 * Compute the page shift for the page size we are currently looking
616102040Sjake	 * for.
617102040Sjake	 */
618102040Sjake1:	add	%g2, %g2, %g3
619102040Sjake	add	%g3, %g2, %g3
620102040Sjake	add	%g3, PAGE_SHIFT, %g3
621102040Sjake
622102040Sjake	/*
62391224Sjake	 * Extract the virtual page number from the contents of the tag
62491224Sjake	 * access register.
62581380Sjake	 */
626102040Sjake	srlx	%g1, %g3, %g3
62781380Sjake
62881380Sjake	/*
629181701Smarius	 * Compute the TTE bucket address.
63081380Sjake	 */
631102040Sjake	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
632102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
633102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
634102040Sjake	add	%g4, %g5, %g4
63581380Sjake
63681380Sjake	/*
637181701Smarius	 * Compute the TTE tag target.
63881380Sjake	 */
639102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
640102040Sjake	or	%g3, %g2, %g3
64181380Sjake
64281380Sjake	/*
643181701Smarius	 * Loop over the TTEs in this bucket.
64481380Sjake	 */
64581380Sjake
64681380Sjake	/*
647181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
648102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
649102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
650102040Sjake	 * completes successfully.
65181380Sjake	 */
652102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
65381380Sjake
65481380Sjake	/*
655181701Smarius	 * Check that it's valid and executable and that the TTE tags match.
65681380Sjake	 */
657102040Sjake	brgez,pn %g7, 3f
658102040Sjake	 andcc	%g7, TD_EXEC, %g0
659102040Sjake	bz,pn	%xcc, 3f
660102040Sjake	 cmp	%g3, %g6
661102040Sjake	bne,pn	%xcc, 3f
66288644Sjake	 EMPTY
66381380Sjake
66481380Sjake	/*
665181701Smarius	 * We matched a TTE, load the TLB.
66681380Sjake	 */
66781380Sjake
66881380Sjake	/*
66981380Sjake	 * Set the reference bit, if it's currently clear.
67081380Sjake	 */
671102040Sjake	 andcc	%g7, TD_REF, %g0
67282906Sjake	bz,a,pn	%xcc, tl0_immu_miss_set_ref
67381380Sjake	 nop
67481380Sjake
67581380Sjake	/*
676181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
67781380Sjake	 */
678102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
679102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
68081380Sjake	retry
68181380Sjake
68281380Sjake	/*
683181701Smarius	 * Advance to the next TTE in this bucket, and check the low bits
684102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
68581380Sjake	 */
686102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
687102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
688102040Sjake	bnz,pt	%xcc, 2b
689102040Sjake	 EMPTY
69091224Sjake
69191224Sjake	/*
692102040Sjake	 * See if we just checked the largest page size, and advance to the
693102040Sjake	 * next one if not.
69491224Sjake	 */
695102040Sjake	 cmp	%g2, TS_MAX
696102040Sjake	bne,pt	%xcc, 1b
697102040Sjake	 add	%g2, 1, %g2
69891224Sjake
69996207Sjake	/*
700181701Smarius	 * Not in user TSB, call C code.
701102040Sjake	 */
702102040Sjake	ba,a	%xcc, tl0_immu_miss_trap
70381380Sjake	.align	128
70480709Sjake	.endm
70580709Sjake
70682906SjakeENTRY(tl0_immu_miss_set_ref)
70781380Sjake	/*
70881380Sjake	 * Set the reference bit.
70981380Sjake	 */
710216803Smarius	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
71181380Sjake
71281380Sjake	/*
713102040Sjake	 * May have become invalid during casxa, in which case start over.
71481380Sjake	 */
715102040Sjake	brgez,pn %g2, 1f
716102040Sjake	 nop
71781380Sjake
71881380Sjake	/*
719181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
72081380Sjake	 */
721102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
722102040Sjake	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
72391224Sjake1:	retry
72482906SjakeEND(tl0_immu_miss_set_ref)
72581380Sjake
72682906SjakeENTRY(tl0_immu_miss_trap)
72781380Sjake	/*
72896207Sjake	 * Put back the contents of the tag access register, in case we
72996207Sjake	 * faulted.
73096207Sjake	 */
731182877Smarius	sethi	%hi(KERNBASE), %g2
732102040Sjake	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
733182877Smarius	flush	%g2
73496207Sjake
73596207Sjake	/*
73682906Sjake	 * Switch to alternate globals.
73782906Sjake	 */
73882906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
73982906Sjake
74082906Sjake	/*
74191224Sjake	 * Reload the tag access register.
74281380Sjake	 */
74391224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
74481380Sjake
74581380Sjake	/*
74691224Sjake	 * Save the tag access register, and call common trap code.
74781380Sjake	 */
74888644Sjake	tl0_split
749108374Sjake	clr	%o1
750103921Sjake	set	trap, %o2
75191224Sjake	mov	%g2, %o3
752114257Sjake	ba	%xcc, tl0_utrap
75388644Sjake	 mov	T_INSTRUCTION_MISS, %o0
75482906SjakeEND(tl0_immu_miss_trap)
75581380Sjake
756109860Sjake	.macro	tl0_dmmu_miss
75781180Sjake	/*
758181701Smarius	 * Load the context and the virtual page number from the tag access
759109860Sjake	 * register.  We ignore the context.
760109860Sjake	 */
761109860Sjake	wr	%g0, ASI_DMMU, %asi
762109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
763109860Sjake
764109860Sjake	/*
765102040Sjake	 * Initialize the page size walker.
766102040Sjake	 */
767109860Sjaketl1_dmmu_miss_user:
768102040Sjake	mov	TS_MIN, %g2
769102040Sjake
770102040Sjake	/*
771102040Sjake	 * Loop over all supported page sizes.
772102040Sjake	 */
773102040Sjake
774102040Sjake	/*
775102040Sjake	 * Compute the page shift for the page size we are currently looking
776102040Sjake	 * for.
777102040Sjake	 */
778102040Sjake1:	add	%g2, %g2, %g3
779102040Sjake	add	%g3, %g2, %g3
780102040Sjake	add	%g3, PAGE_SHIFT, %g3
781102040Sjake
782102040Sjake	/*
78391224Sjake	 * Extract the virtual page number from the contents of the tag
78491224Sjake	 * access register.
78591224Sjake	 */
786102040Sjake	srlx	%g1, %g3, %g3
78791224Sjake
78891224Sjake	/*
789181701Smarius	 * Compute the TTE bucket address.
79081180Sjake	 */
791102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
792102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
793102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
794102040Sjake	add	%g4, %g5, %g4
79581180Sjake
79681180Sjake	/*
797181701Smarius	 * Compute the TTE tag target.
79881180Sjake	 */
799102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
800102040Sjake	or	%g3, %g2, %g3
80181180Sjake
80281180Sjake	/*
803181701Smarius	 * Loop over the TTEs in this bucket.
80481180Sjake	 */
80581180Sjake
80681180Sjake	/*
807181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
808102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
809102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
810102040Sjake	 * completes successfully.
81181180Sjake	 */
812102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
81381180Sjake
81481180Sjake	/*
815181701Smarius	 * Check that it's valid and that the virtual page numbers match.
81681180Sjake	 */
817102040Sjake	brgez,pn %g7, 3f
818102040Sjake	 cmp	%g3, %g6
819102040Sjake	bne,pn	%xcc, 3f
82088644Sjake	 EMPTY
82181180Sjake
82281180Sjake	/*
823181701Smarius	 * We matched a TTE, load the TLB.
82481180Sjake	 */
82581180Sjake
82681180Sjake	/*
82781180Sjake	 * Set the reference bit, if it's currently clear.
82881180Sjake	 */
829102040Sjake	 andcc	%g7, TD_REF, %g0
830109860Sjake	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
83181180Sjake	 nop
83281180Sjake
83381180Sjake	/*
834181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
83581180Sjake	 */
836102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
837102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
83881180Sjake	retry
83981180Sjake
84081180Sjake	/*
841181701Smarius	 * Advance to the next TTE in this bucket, and check the low bits
842102040Sjake	 * of the bucket pointer to see if we've finished the bucket.
84381180Sjake	 */
844102040Sjake3:	add	%g4, 1 << TTE_SHIFT, %g4
845102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
846102040Sjake	bnz,pt	%xcc, 2b
847102040Sjake	 EMPTY
848102040Sjake
849102040Sjake	/*
850102040Sjake	 * See if we just checked the largest page size, and advance to the
851102040Sjake	 * next one if not.
852102040Sjake	 */
853102040Sjake	 cmp	%g2, TS_MAX
854102040Sjake	bne,pt	%xcc, 1b
855102040Sjake	 add	%g2, 1, %g2
856109860Sjake
857109860Sjake	/*
858181701Smarius	 * Not in user TSB, call C code.
859109860Sjake	 */
860109860Sjake	ba,a	%xcc, tl0_dmmu_miss_trap
861109860Sjake	.align	128
86281180Sjake	.endm
86381180Sjake
864109860SjakeENTRY(tl0_dmmu_miss_set_ref)
86581180Sjake	/*
86681180Sjake	 * Set the reference bit.
86781180Sjake	 */
868216803Smarius	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
86981180Sjake
87081180Sjake	/*
871102040Sjake	 * May have become invalid during casxa, in which case start over.
87281180Sjake	 */
873102040Sjake	brgez,pn %g2, 1f
874102040Sjake	 nop
87581180Sjake
87681180Sjake	/*
877181701Smarius	 * Load the TTE tag and data into the TLB and retry the instruction.
87881180Sjake	 */
879102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
880102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
88191224Sjake1:	retry
882109860SjakeEND(tl0_dmmu_miss_set_ref)
88381180Sjake
88481180SjakeENTRY(tl0_dmmu_miss_trap)
88582005Sjake	/*
88696207Sjake	 * Put back the contents of the tag access register, in case we
88796207Sjake	 * faulted.
88896207Sjake	 */
889102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
89096207Sjake	membar	#Sync
89196207Sjake
89296207Sjake	/*
89382906Sjake	 * Switch to alternate globals.
89482005Sjake	 */
89582906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
89682005Sjake
89782005Sjake	/*
898109860Sjake	 * Check if we actually came from the kernel.
899109860Sjake	 */
900109860Sjake	rdpr	%tl, %g1
901109860Sjake	cmp	%g1, 1
902109860Sjake	bgt,a,pn %xcc, 1f
903109860Sjake	 nop
904109860Sjake
905109860Sjake	/*
90691224Sjake	 * Reload the tag access register.
90782005Sjake	 */
90891224Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
90981180Sjake
91081180Sjake	/*
91191224Sjake	 * Save the tag access register and call common trap code.
91281180Sjake	 */
91388644Sjake	tl0_split
914108374Sjake	clr	%o1
915103921Sjake	set	trap, %o2
91691224Sjake	mov	%g2, %o3
917114257Sjake	ba	%xcc, tl0_utrap
91888644Sjake	 mov	T_DATA_MISS, %o0
919109860Sjake
920109860Sjake	/*
921109860Sjake	 * Handle faults during window spill/fill.
922109860Sjake	 */
923109860Sjake1:	RESUME_SPILLFILL_MMU
924109860Sjake
925109860Sjake	/*
926109860Sjake	 * Reload the tag access register.
927109860Sjake	 */
928109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
929109860Sjake
930109860Sjake	tl1_split
931109860Sjake	clr	%o1
932109860Sjake	set	trap, %o2
933109860Sjake	mov	%g2, %o3
934116589Sjake	ba	%xcc, tl1_trap
935109860Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
93682906SjakeEND(tl0_dmmu_miss_trap)
93781180Sjake
938109860Sjake	.macro	tl0_dmmu_prot
939109860Sjake	ba,a	%xcc, tl0_dmmu_prot_1
940109860Sjake	 nop
941109860Sjake	.align	128
942109860Sjake	.endm
943109860Sjake
944109860SjakeENTRY(tl0_dmmu_prot_1)
94588644Sjake	/*
946181701Smarius	 * Load the context and the virtual page number from the tag access
947109860Sjake	 * register.  We ignore the context.
948109860Sjake	 */
949109860Sjake	wr	%g0, ASI_DMMU, %asi
950109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
951109860Sjake
952109860Sjake	/*
953102040Sjake	 * Initialize the page size walker.
954102040Sjake	 */
955109860Sjaketl1_dmmu_prot_user:
956102040Sjake	mov	TS_MIN, %g2
957102040Sjake
958102040Sjake	/*
959102040Sjake	 * Loop over all supported page sizes.
960102040Sjake	 */
961102040Sjake
962102040Sjake	/*
963102040Sjake	 * Compute the page shift for the page size we are currently looking
964102040Sjake	 * for.
965102040Sjake	 */
966102040Sjake1:	add	%g2, %g2, %g3
967102040Sjake	add	%g3, %g2, %g3
968102040Sjake	add	%g3, PAGE_SHIFT, %g3
969102040Sjake
970102040Sjake	/*
97191224Sjake	 * Extract the virtual page number from the contents of the tag
97291224Sjake	 * access register.
97391224Sjake	 */
974102040Sjake	srlx	%g1, %g3, %g3
97591224Sjake
97691224Sjake	/*
977181701Smarius	 * Compute the TTE bucket address.
97888644Sjake	 */
979102040Sjake	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
980102040Sjake	and	%g3, TSB_BUCKET_MASK, %g4
981102040Sjake	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
982102040Sjake	add	%g4, %g5, %g4
98388644Sjake
98488644Sjake	/*
985181701Smarius	 * Compute the TTE tag target.
98688644Sjake	 */
987102040Sjake	sllx	%g3, TV_SIZE_BITS, %g3
988102040Sjake	or	%g3, %g2, %g3
98988644Sjake
99088644Sjake	/*
991181701Smarius	 * Loop over the TTEs in this bucket.
99288644Sjake	 */
99388644Sjake
99488644Sjake	/*
995181701Smarius	 * Load the TTE.  Note that this instruction may fault, clobbering
996102040Sjake	 * the contents of the tag access register, %g5, %g6, and %g7.  We
997102040Sjake	 * do not use %g5, and %g6 and %g7 are not used until this instruction
998102040Sjake	 * completes successfully.
99988644Sjake	 */
1000102040Sjake2:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
100188644Sjake
100288644Sjake	/*
1003181701Smarius	 * Check that it's valid and writable and that the virtual page
100491224Sjake	 * numbers match.
100588644Sjake	 */
1006102040Sjake	brgez,pn %g7, 4f
1007102040Sjake	 andcc	%g7, TD_SW, %g0
1008102040Sjake	bz,pn	%xcc, 4f
1009102040Sjake	 cmp	%g3, %g6
1010102040Sjake	bne,pn	%xcc, 4f
101188644Sjake	 nop
101288644Sjake
101391224Sjake	/*
101491224Sjake	 * Set the hardware write bit.
101591224Sjake	 */
1016216803Smarius	TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
101788644Sjake
101888644Sjake	/*
1019181701Smarius	 * Delete the old TLB entry and clear the SFSR.
102088644Sjake	 */
1021102040Sjake	srlx	%g1, PAGE_SHIFT, %g3
1022102040Sjake	sllx	%g3, PAGE_SHIFT, %g3
1023102040Sjake	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1024102040Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1025102040Sjake	membar	#Sync
102688644Sjake
102781180Sjake	/*
1028102040Sjake	 * May have become invalid during casxa, in which case start over.
102988644Sjake	 */
1030102040Sjake	brgez,pn %g2, 3f
1031102040Sjake	 or	%g2, TD_W, %g2
103288644Sjake
103388644Sjake	/*
1034181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
103596207Sjake	 */
1036102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1037102040Sjake	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
1038102040Sjake3:	retry
103996207Sjake
104096207Sjake	/*
1041102040Sjake	 * Check the low bits to see if we've finished the bucket.
104288644Sjake	 */
1043102040Sjake4:	add	%g4, 1 << TTE_SHIFT, %g4
1044102040Sjake	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1045102040Sjake	bnz,pt	%xcc, 2b
1046102040Sjake	 EMPTY
104788644Sjake
104888644Sjake	/*
1049102040Sjake	 * See if we just checked the largest page size, and advance to the
1050102040Sjake	 * next one if not.
105188644Sjake	 */
1052102040Sjake	 cmp	%g2, TS_MAX
1053102040Sjake	bne,pt	%xcc, 1b
1054102040Sjake	 add	%g2, 1, %g2
1055102040Sjake
105688644Sjake	/*
1057181701Smarius	 * Not in user TSB, call C code.
105891224Sjake	 */
1059116589Sjake	ba,a	%xcc, tl0_dmmu_prot_trap
1060102040Sjake	 nop
1061102040SjakeEND(tl0_dmmu_prot_1)
106291224Sjake
106388644SjakeENTRY(tl0_dmmu_prot_trap)
106488644Sjake	/*
106596207Sjake	 * Put back the contents of the tag access register, in case we
106696207Sjake	 * faulted.
106796207Sjake	 */
1068102040Sjake	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
106996207Sjake	membar	#Sync
107096207Sjake
107196207Sjake	/*
107282906Sjake	 * Switch to alternate globals.
107381180Sjake	 */
107482906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
107581180Sjake
107681180Sjake	/*
1077109860Sjake	 * Check if we actually came from the kernel.
1078109860Sjake	 */
1079109860Sjake	rdpr	%tl, %g1
1080109860Sjake	cmp	%g1, 1
1081109860Sjake	bgt,a,pn %xcc, 1f
1082109860Sjake	 nop
1083109860Sjake
1084109860Sjake	/*
1085181701Smarius	 * Load the SFAR, SFSR and TAR.
108682005Sjake	 */
108788644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
108888644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
108988644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
109085243Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
109182005Sjake	membar	#Sync
109282005Sjake
109382005Sjake	/*
1094182020Smarius	 * Save the MMU registers and call common trap code.
109582005Sjake	 */
109688644Sjake	tl0_split
1097108374Sjake	clr	%o1
1098103921Sjake	set	trap, %o2
109988644Sjake	mov	%g2, %o3
110088644Sjake	mov	%g3, %o4
110188644Sjake	mov	%g4, %o5
1102103897Sjake	ba	%xcc, tl0_utrap
110388644Sjake	 mov	T_DATA_PROTECTION, %o0
1104109860Sjake
1105109860Sjake	/*
1106109860Sjake	 * Handle faults during window spill/fill.
1107109860Sjake	 */
1108109860Sjake1:	RESUME_SPILLFILL_MMU_CLR_SFSR
1109109860Sjake
1110109860Sjake	/*
1111181701Smarius	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1112109860Sjake	 */
1113109860Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1114109860Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1115109860Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1116109860Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1117109860Sjake	membar	#Sync
1118109860Sjake
1119109860Sjake	tl1_split
1120109860Sjake	clr	%o1
1121109860Sjake	set	trap, %o2
1122109860Sjake	mov	%g2, %o3
1123109860Sjake	mov	%g3, %o4
1124109860Sjake	mov	%g4, %o5
1125116589Sjake	ba	%xcc, tl1_trap
1126109860Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
112788644SjakeEND(tl0_dmmu_prot_trap)
112881180Sjake
112980709Sjake	.macro	tl0_spill_0_n
113091246Sjake	wr	%g0, ASI_AIUP, %asi
113191246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
113280709Sjake	saved
113380709Sjake	retry
113482906Sjake	.align	32
113582906Sjake	RSF_TRAP(T_SPILL)
113682906Sjake	RSF_TRAP(T_SPILL)
113780709Sjake	.endm
113880709Sjake
113982906Sjake	.macro	tl0_spill_1_n
114091246Sjake	wr	%g0, ASI_AIUP, %asi
114182906Sjake	SPILL(stwa, %sp, 4, %asi)
114282906Sjake	saved
114382906Sjake	retry
114482906Sjake	.align	32
114582906Sjake	RSF_TRAP(T_SPILL)
114682906Sjake	RSF_TRAP(T_SPILL)
114782906Sjake	.endm
114882005Sjake
114991246Sjake	.macro	tl0_fill_0_n
115082906Sjake	wr	%g0, ASI_AIUP, %asi
115191246Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
115282906Sjake	restored
115382906Sjake	retry
115482906Sjake	.align	32
115582906Sjake	RSF_TRAP(T_FILL)
115682906Sjake	RSF_TRAP(T_FILL)
115780709Sjake	.endm
115880709Sjake
115982906Sjake	.macro	tl0_fill_1_n
116091246Sjake	wr	%g0, ASI_AIUP, %asi
116182906Sjake	FILL(lduwa, %sp, 4, %asi)
116282906Sjake	restored
116382906Sjake	retry
116482906Sjake	.align	32
116582906Sjake	RSF_TRAP(T_FILL)
116682906Sjake	RSF_TRAP(T_FILL)
116782906Sjake	.endm
116882906Sjake
116982906SjakeENTRY(tl0_sftrap)
117082906Sjake	rdpr	%tstate, %g1
117182906Sjake	and	%g1, TSTATE_CWP_MASK, %g1
117282906Sjake	wrpr	%g1, 0, %cwp
117388644Sjake	tl0_split
1174108374Sjake	clr	%o1
1175103921Sjake	set	trap, %o2
1176116589Sjake	ba	%xcc, tl0_trap
117782906Sjake	 mov	%g2, %o0
117882906SjakeEND(tl0_sftrap)
117982906Sjake
118082906Sjake	.macro	tl0_spill_bad	count
118182906Sjake	.rept	\count
118288644Sjake	sir
118388644Sjake	.align	128
118482906Sjake	.endr
118582906Sjake	.endm
118682906Sjake
118780709Sjake	.macro	tl0_fill_bad	count
118880709Sjake	.rept	\count
118988644Sjake	sir
119088644Sjake	.align	128
119180709Sjake	.endr
119280709Sjake	.endm
119380709Sjake
119484186Sjake	.macro	tl0_syscall
119588644Sjake	tl0_split
1196108374Sjake	clr	%o1
1197103921Sjake	set	syscall, %o2
1198103921Sjake	ba	%xcc, tl0_trap
119984186Sjake	 mov	T_SYSCALL, %o0
120088784Sjake	.align	32
120184186Sjake	.endm
120284186Sjake
1203112920Sjake	.macro	tl0_fp_restore
1204112920Sjake	ba,a	%xcc, tl0_fp_restore
1205112920Sjake	 nop
1206112920Sjake	.align	32
1207112920Sjake	.endm
1208112920Sjake
1209112920SjakeENTRY(tl0_fp_restore)
1210112924Sjake	ldx	[PCB_REG + PCB_FLAGS], %g1
1211112924Sjake	andn	%g1, PCB_FEF, %g1
1212112924Sjake	stx	%g1, [PCB_REG + PCB_FLAGS]
1213112924Sjake
1214112920Sjake	wr	%g0, FPRS_FEF, %fprs
1215112920Sjake	wr	%g0, ASI_BLK_S, %asi
1216112920Sjake	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1217112920Sjake	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1218112920Sjake	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1219112920Sjake	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1220112920Sjake	membar	#Sync
1221112920Sjake	done
1222112920SjakeEND(tl0_fp_restore)
1223112920Sjake
122480709Sjake	.macro	tl1_insn_excptn
1225101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
122688644Sjake	wr	%g0, ASI_IMMU, %asi
122788644Sjake	rdpr	%tpc, %g3
122888644Sjake	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1229182877Smarius	/*
1230182877Smarius	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1231182877Smarius	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
1232182877Smarius	 * this triggers a RED state exception though.
1233182877Smarius	 */
123488644Sjake	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
123588644Sjake	membar	#Sync
1236116589Sjake	ba	%xcc, tl1_insn_exceptn_trap
123788644Sjake	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
123880709Sjake	.align	32
123980709Sjake	.endm
124080709Sjake
124188644SjakeENTRY(tl1_insn_exceptn_trap)
124291246Sjake	tl1_split
1243103921Sjake	clr	%o1
1244103921Sjake	set	trap, %o2
124588644Sjake	mov	%g3, %o4
124688644Sjake	mov	%g4, %o5
1247116589Sjake	ba	%xcc, tl1_trap
124888644Sjake	 mov	%g2, %o0
124988644SjakeEND(tl1_insn_exceptn_trap)
125088644Sjake
1251113024Sjake	.macro	tl1_fp_disabled
1252113024Sjake	ba,a	%xcc, tl1_fp_disabled_1
1253113024Sjake	 nop
1254113024Sjake	.align	32
1255113024Sjake	.endm
1256113024Sjake
1257113024SjakeENTRY(tl1_fp_disabled_1)
1258113024Sjake	rdpr	%tpc, %g1
1259113024Sjake	set	fpu_fault_begin, %g2
1260113024Sjake	sub	%g1, %g2, %g1
1261113024Sjake	cmp	%g1, fpu_fault_size
1262113024Sjake	bgeu,a,pn %xcc, 1f
1263113024Sjake	 nop
1264113024Sjake
1265113024Sjake	wr	%g0, FPRS_FEF, %fprs
1266113024Sjake	wr	%g0, ASI_BLK_S, %asi
1267113024Sjake	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1268113024Sjake	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1269113024Sjake	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1270113024Sjake	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1271113024Sjake	membar	#Sync
1272113024Sjake	retry
1273113024Sjake
1274113024Sjake1:	tl1_split
1275113024Sjake	clr	%o1
1276113024Sjake	set	trap, %o2
1277113024Sjake	ba	%xcc, tl1_trap
1278113024Sjake	 mov	T_FP_DISABLED | T_KERNEL, %o0
1279113024SjakeEND(tl1_fp_disabled_1)
1280113024Sjake
128182005Sjake	.macro	tl1_data_excptn
1282101899Sjake	wrpr	%g0, PSTATE_ALT, %pstate
1283116589Sjake	ba,a	%xcc, tl1_data_excptn_trap
128482906Sjake	 nop
128582005Sjake	.align	32
128682005Sjake	.endm
128782005Sjake
128888644SjakeENTRY(tl1_data_excptn_trap)
128988644Sjake	RESUME_SPILLFILL_MMU_CLR_SFSR
1290116589Sjake	ba	%xcc, tl1_sfsr_trap
129188644Sjake	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
129288644SjakeEND(tl1_data_excptn_trap)
129382906Sjake
129480709Sjake	.macro	tl1_align
1295222840Smarius	wrpr	%g0, PSTATE_ALT, %pstate
1296116589Sjake	ba,a	%xcc, tl1_align_trap
129788644Sjake	 nop
129880709Sjake	.align	32
129980709Sjake	.endm
130080709Sjake
130182906SjakeENTRY(tl1_align_trap)
130288644Sjake	RESUME_SPILLFILL_ALIGN
1303116589Sjake	ba	%xcc, tl1_sfsr_trap
130488644Sjake	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1305222840SmariusEND(tl1_align_trap)
130682906Sjake
130780709SjakeENTRY(tl1_sfsr_trap)
130888644Sjake	wr	%g0, ASI_DMMU, %asi
130988644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
131088644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
131180709Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
131280709Sjake	membar	#Sync
131382005Sjake
131491246Sjake	tl1_split
1315103921Sjake	clr	%o1
1316103921Sjake	set	trap, %o2
131788644Sjake	mov	%g3, %o4
131888644Sjake	mov	%g4, %o5
1319116589Sjake	ba	%xcc, tl1_trap
132088644Sjake	 mov	%g2, %o0
132188644SjakeEND(tl1_sfsr_trap)
132280709Sjake
132384186Sjake	.macro	tl1_intr level, mask
132491246Sjake	tl1_split
132591246Sjake	set	\mask, %o1
1326116589Sjake	ba	%xcc, tl1_intr
132791246Sjake	 mov	\level, %o0
132881380Sjake	.align	32
132981380Sjake	.endm
133081380Sjake
133180709Sjake	.macro	tl1_intr_level
133281380Sjake	INTR_LEVEL(1)
133380709Sjake	.endm
133480709Sjake
133580709Sjake	.macro	tl1_immu_miss
133691224Sjake	/*
133791224Sjake	 * Load the context and the virtual page number from the tag access
133891224Sjake	 * register.  We ignore the context.
133991224Sjake	 */
134091224Sjake	wr	%g0, ASI_IMMU, %asi
1341102040Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
134285585Sjake
134391224Sjake	/*
1344181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1345181701Smarius	 * TSB are patched at startup.
134691224Sjake	 */
1347217514Smarius	.globl	tl1_immu_miss_patch_tsb_1
1348217514Smariustl1_immu_miss_patch_tsb_1:
1349217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1350217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1351217514Smarius	sllx	%g6, 32, %g6
1352217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1353217514Smarius	or	%g7, %g6, %g7
1354216803Smarius	.globl	tl1_immu_miss_patch_tsb_mask_1
1355216803Smariustl1_immu_miss_patch_tsb_mask_1:
1356102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1357102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
135885585Sjake
1359102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1360102040Sjake	and	%g5, %g6, %g6
1361102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1362102040Sjake	add	%g6, %g7, %g6
136385585Sjake
136485585Sjake	/*
1365181701Smarius	 * Load the TTE.
136691224Sjake	 */
1367216803Smarius	.globl	tl1_immu_miss_patch_quad_ldd_1
1368216803Smariustl1_immu_miss_patch_quad_ldd_1:
1369216803Smarius	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
137091224Sjake
137191224Sjake	/*
1372181701Smarius	 * Check that it's valid and executable and that the virtual page
137391224Sjake	 * numbers match.
137491224Sjake	 */
1375102040Sjake	brgez,pn %g7, tl1_immu_miss_trap
1376102040Sjake	 andcc	%g7, TD_EXEC, %g0
137791224Sjake	bz,pn	%xcc, tl1_immu_miss_trap
1378102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1379102040Sjake	cmp	%g5, %g6
138091224Sjake	bne,pn	%xcc, tl1_immu_miss_trap
138185585Sjake	 EMPTY
138285585Sjake
138385585Sjake	/*
1384181701Smarius	 * Set the reference bit if it's currently clear.
138585585Sjake	 */
1386102040Sjake	 andcc	%g7, TD_REF, %g0
1387102040Sjake	bz,a,pn	%xcc, tl1_immu_miss_set_ref
138891224Sjake	 nop
138985585Sjake
139091224Sjake	/*
1391181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
139291224Sjake	 */
1393102040Sjake	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1394102040Sjake	retry
1395102040Sjake	.align	128
1396102040Sjake	.endm
139788644Sjake
1398102040SjakeENTRY(tl1_immu_miss_set_ref)
139985585Sjake	/*
1400181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1401181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1402102040Sjake	 */
1403217514Smarius	.globl	tl1_immu_miss_patch_tsb_2
1404217514Smariustl1_immu_miss_patch_tsb_2:
1405217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1406217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1407217514Smarius	sllx	%g6, 32, %g6
1408217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1409217514Smarius	or	%g7, %g6, %g7
1410216803Smarius	.globl	tl1_immu_miss_patch_tsb_mask_2
1411216803Smariustl1_immu_miss_patch_tsb_mask_2:
1412102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1413102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1414102040Sjake
1415102040Sjake	and	%g5, %g6, %g5
1416102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1417102040Sjake	add	%g5, %g7, %g5
1418102040Sjake
1419102040Sjake	/*
1420102040Sjake	 * Set the reference bit.
1421102040Sjake	 */
1422216803Smarius	.globl	tl1_immu_miss_patch_asi_1
1423216803Smariustl1_immu_miss_patch_asi_1:
1424216803Smarius	wr	%g0, TSB_ASI, %asi
1425216803Smarius	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1426102040Sjake
1427102040Sjake	/*
1428102040Sjake	 * May have become invalid during casxa, in which case start over.
1429102040Sjake	 */
1430102040Sjake	brgez,pn %g6, 1f
1431102040Sjake	 nop
1432102040Sjake
1433102040Sjake	/*
1434181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
143585585Sjake	 */
1436102040Sjake	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
1437102040Sjake1:	retry
1438102040SjakeEND(tl1_immu_miss_set_ref)
143985585Sjake
144091224SjakeENTRY(tl1_immu_miss_trap)
144185585Sjake	/*
144285585Sjake	 * Switch to alternate globals.
144385585Sjake	 */
144491224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
144585585Sjake
144691224Sjake	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
144785585Sjake
144891246Sjake	tl1_split
1449103921Sjake	clr	%o1
1450103921Sjake	set	trap, %o2
145191224Sjake	mov	%g2, %o3
1452116589Sjake	ba	%xcc, tl1_trap
145388644Sjake	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
145491224SjakeEND(tl1_immu_miss_trap)
145591224Sjake
145691224Sjake	.macro	tl1_dmmu_miss
145791224Sjake	/*
145891224Sjake	 * Load the context and the virtual page number from the tag access
145991224Sjake	 * register.
146091224Sjake	 */
146191224Sjake	wr	%g0, ASI_DMMU, %asi
1462102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
146380709Sjake
146491224Sjake	/*
146591224Sjake	 * Extract the context from the contents of the tag access register.
1466181701Smarius	 * If it's non-zero this is a fault on a user address.  Note that the
1467108195Sjake	 * faulting address is passed in %g1.
146891224Sjake	 */
1469102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1470102040Sjake	brnz,a,pn %g6, tl1_dmmu_miss_user
1471102040Sjake	 mov	%g5, %g1
147280709Sjake
147391224Sjake	/*
1474100771Sjake	 * Check for the direct mapped physical region.  These addresses have
1475100771Sjake	 * the high bit set so they are negative.
1476100771Sjake	 */
1477102040Sjake	brlz,pn %g5, tl1_dmmu_miss_direct
1478100771Sjake	 EMPTY
1479100771Sjake
1480100771Sjake	/*
1481181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1482181701Smarius	 * TSB are patched at startup.
148391224Sjake	 */
1484217514Smarius	.globl	tl1_dmmu_miss_patch_tsb_1
1485217514Smariustl1_dmmu_miss_patch_tsb_1:
1486217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1487217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1488217514Smarius	sllx	%g6, 32, %g6
1489217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1490217514Smarius	or	%g7, %g6, %g7
1491216803Smarius	.globl	tl1_dmmu_miss_patch_tsb_mask_1
1492216803Smariustl1_dmmu_miss_patch_tsb_mask_1:
1493102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1494102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
149584186Sjake
1496102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1497102040Sjake	and	%g5, %g6, %g6
1498102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1499102040Sjake	add	%g6, %g7, %g6
150091224Sjake
150191224Sjake	/*
1502181701Smarius	 * Load the TTE.
150391224Sjake	 */
1504216803Smarius	.globl	tl1_dmmu_miss_patch_quad_ldd_1
1505216803Smariustl1_dmmu_miss_patch_quad_ldd_1:
1506216803Smarius	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
150791224Sjake
150891224Sjake	/*
1509181701Smarius	 * Check that it's valid and that the virtual page numbers match.
151091224Sjake	 */
1511102040Sjake	brgez,pn %g7, tl1_dmmu_miss_trap
1512102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1513102040Sjake	cmp	%g5, %g6
151491224Sjake	bne,pn %xcc, tl1_dmmu_miss_trap
151580709Sjake	 EMPTY
151680709Sjake
151780709Sjake	/*
1518181701Smarius	 * Set the reference bit if it's currently clear.
151980709Sjake	 */
1520102040Sjake	 andcc	%g7, TD_REF, %g0
1521102040Sjake	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
152291224Sjake	 nop
152380709Sjake
152491224Sjake	/*
1525181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
152691224Sjake	 */
1527102040Sjake	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1528102040Sjake	retry
1529102040Sjake	.align	128
1530102040Sjake	.endm
153188644Sjake
1532102040SjakeENTRY(tl1_dmmu_miss_set_ref)
153380709Sjake	/*
1534181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1535181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1536102040Sjake	 */
1537216803Smarius	.globl	tl1_dmmu_miss_patch_tsb_mask_2
1538217514Smariustl1_dmmu_miss_patch_tsb_2:
1539217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1540217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1541217514Smarius	sllx	%g6, 32, %g6
1542217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1543217514Smarius	or	%g7, %g6, %g7
1544217514Smarius	.globl	tl1_dmmu_miss_patch_tsb_2
1545216803Smariustl1_dmmu_miss_patch_tsb_mask_2:
1546102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1547102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1548102040Sjake
1549102040Sjake	and	%g5, %g6, %g5
1550102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1551102040Sjake	add	%g5, %g7, %g5
1552102040Sjake
1553102040Sjake	/*
1554102040Sjake	 * Set the reference bit.
1555102040Sjake	 */
1556216803Smarius	.globl	tl1_dmmu_miss_patch_asi_1
1557216803Smariustl1_dmmu_miss_patch_asi_1:
1558216803Smarius	wr	%g0, TSB_ASI, %asi
1559216803Smarius	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1560102040Sjake
1561102040Sjake	/*
1562102040Sjake	 * May have become invalid during casxa, in which case start over.
1563102040Sjake	 */
1564102040Sjake	brgez,pn %g6, 1f
1565102040Sjake	 nop
1566102040Sjake
1567102040Sjake	/*
1568181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
156980709Sjake	 */
1570102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1571102040Sjake1:	retry
1572102040SjakeEND(tl1_dmmu_miss_set_ref)
157380709Sjake
157491224SjakeENTRY(tl1_dmmu_miss_trap)
157580709Sjake	/*
157682906Sjake	 * Switch to alternate globals.
157780709Sjake	 */
157891224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
157980709Sjake
1580108195Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1581108195Sjake
158288781Sjake	KSTACK_CHECK
158388781Sjake
158491246Sjake	tl1_split
1585103921Sjake	clr	%o1
1586103921Sjake	set	trap, %o2
158791224Sjake	mov	%g2, %o3
1588116589Sjake	ba	%xcc, tl1_trap
158988644Sjake	 mov	T_DATA_MISS | T_KERNEL, %o0
159088781SjakeEND(tl1_dmmu_miss_trap)
159180709Sjake
1592100771SjakeENTRY(tl1_dmmu_miss_direct)
1593100771Sjake	/*
1594100771Sjake	 * Mask off the high bits of the virtual address to get the physical
1595181701Smarius	 * address, and or in the TTE bits.  The virtual address bits that
1596181701Smarius	 * correspond to the TTE valid and page size bits are left set, so
1597181701Smarius	 * they don't have to be included in the TTE bits below.  We know they
1598108245Sjake	 * are set because the virtual address is in the upper va hole.
1599216803Smarius	 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1600216803Smarius	 * and we get a miss on the directly accessed kernel TSB we must not
1601216803Smarius	 * set TD_CV in order to access it uniformly bypassing the D$.
1602100771Sjake	 */
1603216803Smarius	setx	TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1604216803Smarius	and	%g5, %g4, %g4
1605108245Sjake	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1606108245Sjake	and	%g5, %g6, %g5
1607216803Smarius	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_1
1608216803Smariustl1_dmmu_miss_direct_patch_tsb_phys_1:
1609217514Smarius	sethi	%uhi(TSB_KERNEL_PHYS), %g3
1610217514Smarius	or	%g3, %ulo(TSB_KERNEL_PHYS), %g3
1611217514Smarius	sllx	%g3, 32, %g3
1612217514Smarius	sethi	%hi(TSB_KERNEL_PHYS), %g3
1613217514Smarius	or	%g7, %g3, %g7
1614216803Smarius	cmp	%g4, %g7
1615216803Smarius	bl,pt	%xcc, 1f
1616216803Smarius	 or	%g5, TD_CP | TD_W, %g5
1617216803Smarius	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1618216803Smariustl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1619217514Smarius	sethi	%uhi(TSB_KERNEL_PHYS_END), %g3
1620217514Smarius	or	%g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1621217514Smarius	sllx	%g3, 32, %g3
1622216803Smarius	sethi	%hi(TSB_KERNEL_PHYS_END), %g7
1623217514Smarius	or	%g7, %g3, %g7
1624216803Smarius	cmp	%g4, %g7
1625216803Smarius	bg,a,pt	%xcc, 1f
1626216803Smarius	 nop
1627216803Smarius	ba,pt	%xcc, 2f
1628216803Smarius	 nop
1629216803Smarius1:	or	%g5, TD_CV, %g5
1630100771Sjake
1631100771Sjake	/*
1632181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
1633100771Sjake	 */
1634216803Smarius2:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1635100771Sjake	retry
1636100771SjakeEND(tl1_dmmu_miss_direct)
1637100771Sjake
163882906Sjake	.macro	tl1_dmmu_prot
1639102040Sjake	ba,a	%xcc, tl1_dmmu_prot_1
1640102040Sjake	 nop
1641102040Sjake	.align	128
1642102040Sjake	.endm
1643102040Sjake
1644102040SjakeENTRY(tl1_dmmu_prot_1)
164591224Sjake	/*
164691224Sjake	 * Load the context and the virtual page number from the tag access
164791224Sjake	 * register.
164891224Sjake	 */
164991224Sjake	wr	%g0, ASI_DMMU, %asi
1650102040Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
165188644Sjake
165291224Sjake	/*
165391224Sjake	 * Extract the context from the contents of the tag access register.
1654181701Smarius	 * If it's non-zero this is a fault on a user address.  Note that the
1655108195Sjake	 * faulting address is passed in %g1.
165691224Sjake	 */
1657102040Sjake	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1658102040Sjake	brnz,a,pn %g6, tl1_dmmu_prot_user
1659102040Sjake	 mov	%g5, %g1
166088644Sjake
166191224Sjake	/*
1662181701Smarius	 * Compute the address of the TTE.  The TSB mask and address of the
1663181701Smarius	 * TSB are patched at startup.
166491224Sjake	 */
1665217514Smarius	.globl	tl1_dmmu_prot_patch_tsb_1
1666217514Smariustl1_dmmu_prot_patch_tsb_1:
1667217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1668217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1669217514Smarius	sllx	%g6, 32, %g6
1670217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1671217514Smarius	or	%g7, %g6, %g7
1672216803Smarius	.globl	tl1_dmmu_prot_patch_tsb_mask_1
1673216803Smariustl1_dmmu_prot_patch_tsb_mask_1:
1674102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1675102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
167688644Sjake
1677102040Sjake	srlx	%g5, TAR_VPN_SHIFT, %g5
1678102040Sjake	and	%g5, %g6, %g6
1679102040Sjake	sllx	%g6, TTE_SHIFT, %g6
1680102040Sjake	add	%g6, %g7, %g6
168191224Sjake
168291224Sjake	/*
1683181701Smarius	 * Load the TTE.
168491224Sjake	 */
1685216803Smarius	.globl	tl1_dmmu_prot_patch_quad_ldd_1
1686216803Smariustl1_dmmu_prot_patch_quad_ldd_1:
1687216803Smarius	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
168891224Sjake
168991224Sjake	/*
1690181701Smarius	 * Check that it's valid and writeable and that the virtual page
169191224Sjake	 * numbers match.
169291224Sjake	 */
1693102040Sjake	brgez,pn %g7, tl1_dmmu_prot_trap
1694102040Sjake	 andcc	%g7, TD_SW, %g0
169591224Sjake	bz,pn	%xcc, tl1_dmmu_prot_trap
1696102040Sjake	 srlx	%g6, TV_SIZE_BITS, %g6
1697102040Sjake	cmp	%g5, %g6
169891224Sjake	bne,pn	%xcc, tl1_dmmu_prot_trap
169988644Sjake	 EMPTY
170088644Sjake
170188644Sjake	/*
1702181701Smarius	 * Delete the old TLB entry and clear the SFSR.
170388644Sjake	 */
1704102040Sjake	 sllx	%g5, TAR_VPN_SHIFT, %g6
170591224Sjake	or	%g6, TLB_DEMAP_NUCLEUS, %g6
170691224Sjake	stxa	%g0, [%g6] ASI_DMMU_DEMAP
170781180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
170891224Sjake	membar	#Sync
170981180Sjake
1710102040Sjake	/*
1711181701Smarius	 * Recompute the TTE address, which we clobbered loading the TTE.
1712181701Smarius	 * The TSB mask and address of the TSB are patched at startup.
1713102040Sjake	 */
1714217514Smarius	.globl	tl1_dmmu_prot_patch_tsb_2
1715217514Smariustl1_dmmu_prot_patch_tsb_2:
1716217514Smarius	sethi	%uhi(TSB_KERNEL), %g6
1717217514Smarius	or	%g6, %ulo(TSB_KERNEL), %g6
1718217514Smarius	sllx	%g6, 32, %g6
1719217514Smarius	sethi	%hi(TSB_KERNEL), %g7
1720217514Smarius	or	%g7, %g6, %g7
1721216803Smarius	.globl	tl1_dmmu_prot_patch_tsb_mask_2
1722216803Smariustl1_dmmu_prot_patch_tsb_mask_2:
1723102040Sjake	sethi	%hi(TSB_KERNEL_MASK), %g6
1724102040Sjake	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1725102040Sjake	and	%g5, %g6, %g5
1726102040Sjake	sllx	%g5, TTE_SHIFT, %g5
1727102040Sjake	add	%g5, %g7, %g5
1728102040Sjake
172981180Sjake	/*
173091224Sjake	 * Set the hardware write bit.
173191224Sjake	 */
1732216803Smarius	.globl	tl1_dmmu_prot_patch_asi_1
1733216803Smariustl1_dmmu_prot_patch_asi_1:
1734216803Smarius	wr	%g0, TSB_ASI, %asi
1735216803Smarius	TTE_SET_W(%g5, %g6, %g7, a, %asi)
173691224Sjake
173791224Sjake	/*
1738102040Sjake	 * May have become invalid during casxa, in which case start over.
1739102040Sjake	 */
1740102040Sjake	brgez,pn %g6, 1f
1741102040Sjake	 or	%g6, TD_W, %g6
1742102040Sjake
1743102040Sjake	/*
1744181701Smarius	 * Load the TTE data into the TLB and retry the instruction.
174588644Sjake	 */
1746102040Sjake	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
1747102040Sjake1:	retry
1748102040SjakeEND(tl1_dmmu_prot_1)
174988644Sjake
175088644SjakeENTRY(tl1_dmmu_prot_trap)
175181180Sjake	/*
175291224Sjake	 * Switch to alternate globals.
175391224Sjake	 */
175491224Sjake	wrpr	%g0, PSTATE_ALT, %pstate
175591224Sjake
175691224Sjake	/*
1757181701Smarius	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
175881180Sjake	 */
175988644Sjake	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
176088644Sjake	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
176188644Sjake	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
176281180Sjake	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
176381180Sjake	membar	#Sync
176481180Sjake
176591246Sjake	tl1_split
1766103921Sjake	clr	%o1
1767103921Sjake	set	trap, %o2
176888644Sjake	mov	%g2, %o3
176988644Sjake	mov	%g3, %o4
177088644Sjake	mov	%g4, %o5
1771116589Sjake	ba	%xcc, tl1_trap
177288644Sjake	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
177388644SjakeEND(tl1_dmmu_prot_trap)
177481180Sjake
177580709Sjake	.macro	tl1_spill_0_n
177682906Sjake	SPILL(stx, %sp + SPOFF, 8, EMPTY)
177780709Sjake	saved
177880709Sjake	retry
177982906Sjake	.align	32
178082906Sjake	RSF_FATAL(T_SPILL)
178182906Sjake	RSF_FATAL(T_SPILL)
178280709Sjake	.endm
178380709Sjake
178491246Sjake	.macro	tl1_spill_2_n
178591246Sjake	wr	%g0, ASI_AIUP, %asi
178691246Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
178782906Sjake	saved
178882906Sjake	retry
178982906Sjake	.align	32
179082906Sjake	RSF_SPILL_TOPCB
179182906Sjake	RSF_SPILL_TOPCB
179281380Sjake	.endm
179381380Sjake
179491246Sjake	.macro	tl1_spill_3_n
179591246Sjake	wr	%g0, ASI_AIUP, %asi
179692200Sjake	SPILL(stwa, %sp, 4, %asi)
179782906Sjake	saved
179882906Sjake	retry
179982906Sjake	.align	32
180082906Sjake	RSF_SPILL_TOPCB
180182906Sjake	RSF_SPILL_TOPCB
180282906Sjake	.endm
180382906Sjake
1804205409Smarius	.macro	tl1_spill_7_n
1805205409Smarius	btst	1, %sp
1806205409Smarius	bnz,a,pn %xcc, tl1_spill_0_n
1807205409Smarius	 nop
1808205409Smarius	srl	%sp, 0, %sp
1809205409Smarius	SPILL(stw, %sp, 4, EMPTY)
1810205409Smarius	saved
1811205409Smarius	retry
1812205409Smarius	.align	32
1813205409Smarius	RSF_FATAL(T_SPILL)
1814205409Smarius	RSF_FATAL(T_SPILL)
1815205409Smarius	.endm
1816205409Smarius
181791246Sjake	.macro	tl1_spill_0_o
181882906Sjake	wr	%g0, ASI_AIUP, %asi
181982906Sjake	SPILL(stxa, %sp + SPOFF, 8, %asi)
182082906Sjake	saved
182182906Sjake	retry
182282906Sjake	.align	32
182382906Sjake	RSF_SPILL_TOPCB
182482906Sjake	RSF_SPILL_TOPCB
182582906Sjake	.endm
182682906Sjake
182782906Sjake	.macro	tl1_spill_1_o
182891246Sjake	wr	%g0, ASI_AIUP, %asi
182982906Sjake	SPILL(stwa, %sp, 4, %asi)
183082005Sjake	saved
183182005Sjake	retry
183282906Sjake	.align	32
183382906Sjake	RSF_SPILL_TOPCB
183482906Sjake	RSF_SPILL_TOPCB
183582906Sjake	.endm
183682005Sjake
183782906Sjake	.macro	tl1_spill_2_o
183882906Sjake	RSF_SPILL_TOPCB
183991246Sjake	.align	128
184080709Sjake	.endm
184180709Sjake
184280709Sjake	.macro	tl1_fill_0_n
184382906Sjake	FILL(ldx, %sp + SPOFF, 8, EMPTY)
184480709Sjake	restored
184580709Sjake	retry
184682906Sjake	.align	32
184782906Sjake	RSF_FATAL(T_FILL)
184882906Sjake	RSF_FATAL(T_FILL)
184980709Sjake	.endm
185080709Sjake
185191246Sjake	.macro	tl1_fill_2_n
185282906Sjake	wr	%g0, ASI_AIUP, %asi
185382906Sjake	FILL(ldxa, %sp + SPOFF, 8, %asi)
185482906Sjake	restored
185582906Sjake	retry
185682906Sjake	.align 32
185782906Sjake	RSF_FILL_MAGIC
185891246Sjake	RSF_FILL_MAGIC
185982906Sjake	.endm
186082906Sjake
186191246Sjake	.macro	tl1_fill_3_n
186282906Sjake	wr	%g0, ASI_AIUP, %asi
186382906Sjake	FILL(lduwa, %sp, 4, %asi)
186482906Sjake	restored
186582906Sjake	retry
186682906Sjake	.align 32
186782906Sjake	RSF_FILL_MAGIC
186891246Sjake	RSF_FILL_MAGIC
186982906Sjake	.endm
187082906Sjake
1871205409Smarius	.macro	tl1_fill_7_n
1872205409Smarius	btst	1, %sp
1873205409Smarius	bnz,a,pt %xcc, tl1_fill_0_n
1874205409Smarius	 nop
1875205409Smarius	srl	%sp, 0, %sp
1876205409Smarius	FILL(lduw, %sp, 4, EMPTY)
1877205409Smarius	restored
1878205409Smarius	retry
1879205409Smarius	.align	32
1880205409Smarius	RSF_FATAL(T_FILL)
1881205409Smarius	RSF_FATAL(T_FILL)
1882205409Smarius	.endm
1883205409Smarius
188482005Sjake/*
188582906Sjake * This is used to spill windows that are still occupied with user
188682906Sjake * data on kernel entry to the pcb.
188782005Sjake */
188882906SjakeENTRY(tl1_spill_topcb)
188982906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
189082906Sjake
189182005Sjake	/* Free some globals for our use. */
189288644Sjake	dec	24, ASP_REG
189388644Sjake	stx	%g1, [ASP_REG + 0]
189488644Sjake	stx	%g2, [ASP_REG + 8]
189588644Sjake	stx	%g3, [ASP_REG + 16]
189682906Sjake
189788644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g1
189882906Sjake
189988644Sjake	sllx	%g1, PTR_SHIFT, %g2
190088644Sjake	add	%g2, PCB_REG, %g2
190188644Sjake	stx	%sp, [%g2 + PCB_RWSP]
190282906Sjake
190388644Sjake	sllx	%g1, RW_SHIFT, %g2
190488644Sjake	add	%g2, PCB_REG, %g2
190588644Sjake	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
190682906Sjake
190788644Sjake	inc	%g1
190888644Sjake	stx	%g1, [PCB_REG + PCB_NSAVED]
190982906Sjake
191085243Sjake#if KTR_COMPILE & KTR_TRAP
191188785Sjake	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
191282906Sjake	   , %g1, %g2, %g3, 7, 8, 9)
191382906Sjake	rdpr	%tpc, %g2
191482906Sjake	stx	%g2, [%g1 + KTR_PARM1]
191588785Sjake	rdpr	%tnpc, %g2
191688785Sjake	stx	%g2, [%g1 + KTR_PARM2]
191788785Sjake	stx	%sp, [%g1 + KTR_PARM3]
191888644Sjake	ldx	[PCB_REG + PCB_NSAVED], %g2
191988785Sjake	stx	%g2, [%g1 + KTR_PARM4]
192082906Sjake9:
192182906Sjake#endif
192282906Sjake
192382906Sjake	saved
192482906Sjake
192588644Sjake	ldx	[ASP_REG + 16], %g3
192688644Sjake	ldx	[ASP_REG + 8], %g2
192788644Sjake	ldx	[ASP_REG + 0], %g1
192888644Sjake	inc	24, ASP_REG
192982005Sjake	retry
193082906SjakeEND(tl1_spill_topcb)
193182005Sjake
193282906Sjake	.macro	tl1_spill_bad	count
193382906Sjake	.rept	\count
193488644Sjake	sir
193588644Sjake	.align	128
193682906Sjake	.endr
193782906Sjake	.endm
193882906Sjake
193980709Sjake	.macro	tl1_fill_bad	count
194080709Sjake	.rept	\count
194188644Sjake	sir
194288644Sjake	.align	128
194380709Sjake	.endr
194480709Sjake	.endm
194580709Sjake
194680709Sjake	.macro	tl1_soft	count
194782906Sjake	.rept	\count
194882906Sjake	tl1_gen	T_SOFT | T_KERNEL
194982906Sjake	.endr
195080709Sjake	.endm
195180709Sjake
195280709Sjake	.sect	.trap
1953155839Smarius	.globl	tl_trap_begin
1954155839Smariustl_trap_begin:
1955155839Smarius	nop
1956155839Smarius
195780709Sjake	.align	0x8000
195880709Sjake	.globl	tl0_base
195980709Sjake
196080709Sjaketl0_base:
196188779Sjake	tl0_reserved	8				! 0x0-0x7
196280709Sjaketl0_insn_excptn:
196388779Sjake	tl0_insn_excptn					! 0x8
196488779Sjake	tl0_reserved	1				! 0x9
196580709Sjaketl0_insn_error:
196688779Sjake	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
196788779Sjake	tl0_reserved	5				! 0xb-0xf
196880709Sjaketl0_insn_illegal:
196988779Sjake	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
197080709Sjaketl0_priv_opcode:
197188779Sjake	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
197288779Sjake	tl0_reserved	14				! 0x12-0x1f
197380709Sjaketl0_fp_disabled:
197488779Sjake	tl0_gen		T_FP_DISABLED			! 0x20
197580709Sjaketl0_fp_ieee:
197688779Sjake	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
197780709Sjaketl0_fp_other:
197888779Sjake	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
197980709Sjaketl0_tag_ovflw:
1980154419Skris	tl0_gen		T_TAG_OVERFLOW			! 0x23
198180709Sjaketl0_clean_window:
198288779Sjake	clean_window					! 0x24
198380709Sjaketl0_divide:
198488779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
198588779Sjake	tl0_reserved	7				! 0x29-0x2f
198680709Sjaketl0_data_excptn:
198788779Sjake	tl0_data_excptn					! 0x30
198888779Sjake	tl0_reserved	1				! 0x31
198980709Sjaketl0_data_error:
199088779Sjake	tl0_gen		T_DATA_ERROR			! 0x32
199188779Sjake	tl0_reserved	1				! 0x33
199280709Sjaketl0_align:
199388779Sjake	tl0_align					! 0x34
199480709Sjaketl0_align_lddf:
199588779Sjake	tl0_gen		T_RESERVED			! 0x35
199680709Sjaketl0_align_stdf:
199788779Sjake	tl0_gen		T_RESERVED			! 0x36
199880709Sjaketl0_priv_action:
199988779Sjake	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
200088779Sjake	tl0_reserved	9				! 0x38-0x40
200180709Sjaketl0_intr_level:
200288779Sjake	tl0_intr_level					! 0x41-0x4f
200388779Sjake	tl0_reserved	16				! 0x50-0x5f
200480709Sjaketl0_intr_vector:
200597265Sjake	intr_vector					! 0x60
200680709Sjaketl0_watch_phys:
200788779Sjake	tl0_gen		T_PA_WATCHPOINT			! 0x61
200880709Sjaketl0_watch_virt:
200988779Sjake	tl0_gen		T_VA_WATCHPOINT			! 0x62
201080709Sjaketl0_ecc:
201188779Sjake	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
201280709Sjaketl0_immu_miss:
201388779Sjake	tl0_immu_miss					! 0x64
201480709Sjaketl0_dmmu_miss:
201588779Sjake	tl0_dmmu_miss					! 0x68
201680709Sjaketl0_dmmu_prot:
201788779Sjake	tl0_dmmu_prot					! 0x6c
201888779Sjake	tl0_reserved	16				! 0x70-0x7f
201980709Sjaketl0_spill_0_n:
202088779Sjake	tl0_spill_0_n					! 0x80
202182906Sjaketl0_spill_1_n:
202288779Sjake	tl0_spill_1_n					! 0x84
202391246Sjake	tl0_spill_bad	14				! 0x88-0xbf
202480709Sjaketl0_fill_0_n:
202588779Sjake	tl0_fill_0_n					! 0xc0
202682906Sjaketl0_fill_1_n:
202788779Sjake	tl0_fill_1_n					! 0xc4
202891246Sjake	tl0_fill_bad	14				! 0xc8-0xff
202988644Sjaketl0_soft:
2030106050Sjake	tl0_gen		T_SYSCALL			! 0x100
203188779Sjake	tl0_gen		T_BREAKPOINT			! 0x101
203288779Sjake	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
203388779Sjake	tl0_reserved	1				! 0x103
203488779Sjake	tl0_gen		T_CLEAN_WINDOW			! 0x104
203588779Sjake	tl0_gen		T_RANGE_CHECK			! 0x105
203688779Sjake	tl0_gen		T_FIX_ALIGNMENT			! 0x106
203788779Sjake	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
2038106050Sjake	tl0_gen		T_SYSCALL			! 0x108
2039106050Sjake	tl0_gen		T_SYSCALL			! 0x109
204088779Sjake	tl0_fp_restore					! 0x10a
204188779Sjake	tl0_reserved	5				! 0x10b-0x10f
204288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
204388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
204488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
204588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
204688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
204788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
204888779Sjake	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
204988779Sjake	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
205088779Sjake	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
205188779Sjake	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
205288779Sjake	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
205388779Sjake	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
205488779Sjake	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
205588779Sjake	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
205688779Sjake	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
205788779Sjake	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
2058106050Sjake	tl0_reserved	32				! 0x120-0x13f
2059106050Sjake	tl0_gen		T_SYSCALL			! 0x140
2060106050Sjake	tl0_syscall					! 0x141
2061106050Sjake	tl0_gen		T_SYSCALL			! 0x142
2062106050Sjake	tl0_gen		T_SYSCALL			! 0x143
2063106050Sjake	tl0_reserved	188				! 0x144-0x1ff
206480709Sjake
206580709Sjaketl1_base:
206688779Sjake	tl1_reserved	8				! 0x200-0x207
206780709Sjaketl1_insn_excptn:
206888779Sjake	tl1_insn_excptn					! 0x208
206988779Sjake	tl1_reserved	1				! 0x209
207080709Sjaketl1_insn_error:
207188779Sjake	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
207288779Sjake	tl1_reserved	5				! 0x20b-0x20f
207380709Sjaketl1_insn_illegal:
207488779Sjake	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
207580709Sjaketl1_priv_opcode:
207688779Sjake	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
207788779Sjake	tl1_reserved	14				! 0x212-0x21f
207880709Sjaketl1_fp_disabled:
2079113024Sjake	tl1_fp_disabled					! 0x220
208080709Sjaketl1_fp_ieee:
208188779Sjake	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
208280709Sjaketl1_fp_other:
208388779Sjake	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
208480709Sjaketl1_tag_ovflw:
2085154419Skris	tl1_gen		T_TAG_OVERFLOW			! 0x223
208680709Sjaketl1_clean_window:
208788779Sjake	clean_window					! 0x224
208880709Sjaketl1_divide:
208988779Sjake	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
209088779Sjake	tl1_reserved	7				! 0x229-0x22f
209180709Sjaketl1_data_excptn:
209288779Sjake	tl1_data_excptn					! 0x230
209388779Sjake	tl1_reserved	1				! 0x231
209480709Sjaketl1_data_error:
209588779Sjake	tl1_gen		T_DATA_ERROR			! 0x232
209688779Sjake	tl1_reserved	1				! 0x233
209780709Sjaketl1_align:
209888779Sjake	tl1_align					! 0x234
209980709Sjaketl1_align_lddf:
210088779Sjake	tl1_gen		T_RESERVED			! 0x235
210180709Sjaketl1_align_stdf:
210288779Sjake	tl1_gen		T_RESERVED			! 0x236
210380709Sjaketl1_priv_action:
210488779Sjake	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
210588779Sjake	tl1_reserved	9				! 0x238-0x240
210680709Sjaketl1_intr_level:
210788779Sjake	tl1_intr_level					! 0x241-0x24f
210888779Sjake	tl1_reserved	16				! 0x250-0x25f
210980709Sjaketl1_intr_vector:
211097265Sjake	intr_vector					! 0x260
211180709Sjaketl1_watch_phys:
211288779Sjake	tl1_gen		T_PA_WATCHPOINT			! 0x261
211380709Sjaketl1_watch_virt:
211488779Sjake	tl1_gen		T_VA_WATCHPOINT			! 0x262
211580709Sjaketl1_ecc:
211688779Sjake	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
211780709Sjaketl1_immu_miss:
211888779Sjake	tl1_immu_miss					! 0x264
211980709Sjaketl1_dmmu_miss:
212088779Sjake	tl1_dmmu_miss					! 0x268
212180709Sjaketl1_dmmu_prot:
212288779Sjake	tl1_dmmu_prot					! 0x26c
212388779Sjake	tl1_reserved	16				! 0x270-0x27f
212480709Sjaketl1_spill_0_n:
212588779Sjake	tl1_spill_0_n					! 0x280
212691246Sjake	tl1_spill_bad	1				! 0x284
212791246Sjaketl1_spill_2_n:
212891246Sjake	tl1_spill_2_n					! 0x288
212991246Sjaketl1_spill_3_n:
2130205409Smarius	tl1_spill_3_n					! 0x28c
2131205409Smarius	tl1_spill_bad	3				! 0x290-0x29b
2132205409Smariustl1_spill_7_n:
2133205409Smarius	tl1_spill_7_n					! 0x29c
213481380Sjaketl1_spill_0_o:
213588779Sjake	tl1_spill_0_o					! 0x2a0
213682906Sjaketl1_spill_1_o:
213788779Sjake	tl1_spill_1_o					! 0x2a4
213882906Sjaketl1_spill_2_o:
213988779Sjake	tl1_spill_2_o					! 0x2a8
214091246Sjake	tl1_spill_bad	5				! 0x2ac-0x2bf
214180709Sjaketl1_fill_0_n:
214288779Sjake	tl1_fill_0_n					! 0x2c0
214391246Sjake	tl1_fill_bad	1				! 0x2c4
214491246Sjaketl1_fill_2_n:
2145205409Smarius	tl1_fill_2_n					! 0x2c8
214691246Sjaketl1_fill_3_n:
2147205409Smarius	tl1_fill_3_n					! 0x2cc
2148205409Smarius	tl1_fill_bad	3				! 0x2d0-0x2db
2149205409Smariustl1_fill_7_n:
2150205409Smarius	tl1_fill_7_n					! 0x2dc
2151205409Smarius	tl1_fill_bad	8				! 0x2e0-0x2ff
215288779Sjake	tl1_reserved	1				! 0x300
215380709Sjaketl1_breakpoint:
215488779Sjake	tl1_gen		T_BREAKPOINT			! 0x301
215588779Sjake	tl1_gen		T_RSTRWP_PHYS			! 0x302
215688779Sjake	tl1_gen		T_RSTRWP_VIRT			! 0x303
215788779Sjake	tl1_reserved	252				! 0x304-0x3ff
215880709Sjake
2159155839Smarius	.globl	tl_trap_end
2160155839Smariustl_trap_end:
2161155839Smarius	nop
2162155839Smarius
216381380Sjake/*
2164181701Smarius * User trap entry point
216582906Sjake *
2166103897Sjake * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2167181701Smarius *     u_long sfsr)
2168103897Sjake *
2169103897Sjake * This handles redirecting a trap back to usermode as a user trap.  The user
2170103897Sjake * program must have first registered a trap handler with the kernel using
2171103897Sjake * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2172103897Sjake * for it to return to the trapping code directly, it will not return through
2173103897Sjake * the kernel.  The trap type is passed in %o0, all out registers must be
2174103897Sjake * passed through to tl0_trap or to usermode untouched.  Note that the
2175103897Sjake * parameters passed in out registers may be used by the user trap handler.
2176103897Sjake * Do not change the registers they are passed in or you will break the ABI.
2177103897Sjake *
2178103897Sjake * If the trap type allows user traps, setup state to execute the user trap
2179103897Sjake * handler and bounce back to usermode, otherwise branch to tl0_trap.
2180103897Sjake */
2181103897SjakeENTRY(tl0_utrap)
2182103897Sjake	/*
2183103897Sjake	 * Check if the trap type allows user traps.
2184103897Sjake	 */
2185103897Sjake	cmp	%o0, UT_MAX
2186103897Sjake	bge,a,pt %xcc, tl0_trap
2187103897Sjake	 nop
2188103897Sjake
2189103897Sjake	/*
2190103897Sjake	 * Load the user trap handler from the utrap table.
2191103897Sjake	 */
2192103897Sjake	ldx	[PCPU(CURTHREAD)], %l0
2193103897Sjake	ldx	[%l0 + TD_PROC], %l0
2194103897Sjake	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2195103897Sjake	brz,pt	%l0, tl0_trap
2196103897Sjake	 sllx	%o0, PTR_SHIFT, %l1
2197103897Sjake	ldx	[%l0 + %l1], %l0
2198103897Sjake	brz,a,pt %l0, tl0_trap
2199103897Sjake	 nop
2200103897Sjake
2201103897Sjake	/*
2202103897Sjake	 * If the save we did on entry to the kernel had to spill a window
2203103897Sjake	 * to the pcb, pretend we took a spill trap instead.  Any windows
2204103897Sjake	 * that are in the pcb must be copied out or the fill handler will
2205103897Sjake	 * not be able to find them, since the user trap handler returns
2206103897Sjake	 * directly to the trapping code.  Note that we only support precise
2207103897Sjake	 * user traps, which implies that the condition that caused the trap
2208103897Sjake	 * in the first place is still valid, so it will occur again when we
2209103897Sjake	 * re-execute the trapping instruction.
2210181701Smarius	 */
2211103897Sjake	ldx	[PCB_REG + PCB_NSAVED], %l1
2212103897Sjake	brnz,a,pn %l1, tl0_trap
2213103897Sjake	 mov	T_SPILL, %o0
2214103897Sjake
2215103897Sjake	/*
2216103897Sjake	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2217103897Sjake	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2218103897Sjake	 * it may be clobbered by an interrupt before the user trap code
2219103897Sjake	 * can read it, and we must pass %tstate in order to restore %ccr
2220103897Sjake	 * and %asi.  The %fsr must be stored to memory, so we use the
2221103897Sjake	 * temporary stack for that.
2222103897Sjake	 */
2223103897Sjake	rd	%fprs, %l1
2224103897Sjake	or	%l1, FPRS_FEF, %l2
2225103897Sjake	wr	%l2, 0, %fprs
2226103897Sjake	dec	8, ASP_REG
2227103897Sjake	stx	%fsr, [ASP_REG]
2228103897Sjake	ldx	[ASP_REG], %l4
2229103897Sjake	inc	8, ASP_REG
2230103897Sjake	wr	%l1, 0, %fprs
2231103897Sjake
2232103897Sjake	rdpr	%tstate, %l5
2233103897Sjake	rdpr	%tpc, %l6
2234103897Sjake	rdpr	%tnpc, %l7
2235103897Sjake
2236103897Sjake	/*
2237103897Sjake	 * Setup %tnpc to return to.
2238103897Sjake	 */
2239103897Sjake	wrpr	%l0, 0, %tnpc
2240103897Sjake
2241103897Sjake	/*
2242103897Sjake	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2243103897Sjake	 */
2244103897Sjake	rdpr	%wstate, %l1
2245103897Sjake	and	%l1, WSTATE_NORMAL_MASK, %l1
2246103897Sjake	wrpr	%l1, 0, %wstate
2247103897Sjake
2248103897Sjake	/*
2249103897Sjake	 * Setup %tstate for return, change the saved cwp to point to the
2250103897Sjake	 * current window instead of the window at the time of the trap.
2251103897Sjake	 */
2252103897Sjake	andn	%l5, TSTATE_CWP_MASK, %l1
2253103897Sjake	rdpr	%cwp, %l2
2254103897Sjake	wrpr	%l1, %l2, %tstate
2255103897Sjake
2256103897Sjake	/*
2257103897Sjake	 * Setup %sp.  Userland processes will crash if this is not setup.
2258103897Sjake	 */
2259103897Sjake	sub	%fp, CCFSZ, %sp
2260103897Sjake
2261103897Sjake	/*
2262103897Sjake	 * Execute the user trap handler.
2263103897Sjake	 */
2264103897Sjake	done
2265103897SjakeEND(tl0_utrap)
2266103897Sjake
2267103897Sjake/*
2268181701Smarius * (Real) User trap entry point
2269103897Sjake *
2270181701Smarius * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2271181701Smarius *     u_int sfsr)
227282906Sjake *
227382906Sjake * The following setup has been performed:
227482906Sjake *	- the windows have been split and the active user window has been saved
227582906Sjake *	  (maybe just to the pcb)
227682906Sjake *	- we are on alternate globals and interrupts are disabled
227782906Sjake *
227889050Sjake * We switch to the kernel stack, build a trapframe, switch to normal
227988644Sjake * globals, enable interrupts and call trap.
228082906Sjake *
228182906Sjake * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
228282906Sjake * it has been pre-set in alternate globals, so we read it from there and setup
228382906Sjake * the normal %g7 *before* enabling interrupts.  This avoids any possibility
228487702Sjhb * of cpu migration and using the wrong pcpup.
228581380Sjake */
228682005SjakeENTRY(tl0_trap)
228782906Sjake	/*
228882906Sjake	 * Force kernel store order.
228982906Sjake	 */
229082906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
229180709Sjake
229281380Sjake	rdpr	%tstate, %l0
229388644Sjake	rdpr	%tpc, %l1
229488644Sjake	rdpr	%tnpc, %l2
229588644Sjake	rd	%y, %l3
229688644Sjake	rd	%fprs, %l4
229788644Sjake	rdpr	%wstate, %l5
229888644Sjake
229988644Sjake#if KTR_COMPILE & KTR_TRAP
230088644Sjake	CATR(KTR_TRAP,
230188644Sjake	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
230288644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
230388644Sjake	ldx	[PCPU(CURTHREAD)], %g2
230488644Sjake	stx	%g2, [%g1 + KTR_PARM1]
230588644Sjake	stx	%o0, [%g1 + KTR_PARM2]
230688644Sjake	rdpr	%pil, %g2
230788644Sjake	stx	%g2, [%g1 + KTR_PARM3]
230888644Sjake	stx	%l1, [%g1 + KTR_PARM4]
230988644Sjake	stx	%l2, [%g1 + KTR_PARM5]
231088644Sjake	stx	%i6, [%g1 + KTR_PARM6]
231188644Sjake9:
231288644Sjake#endif
231388644Sjake
2314103897Sjake1:	and	%l5, WSTATE_NORMAL_MASK, %l5
2315103897Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
231688644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
231788644Sjake	rdpr	%canrestore, %l6
231888644Sjake	wrpr	%l6, 0, %otherwin
231988644Sjake	wrpr	%g0, 0, %canrestore
232088644Sjake
232188644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
232288644Sjake
2323105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2324105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
232588644Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
232688644Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2327105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
232888644Sjake
232981380Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
233081380Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
233181380Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2332105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2333105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2334105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
233581380Sjake
233688644Sjake	wr	%g0, FPRS_FEF, %fprs
233788644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2338108379Sjake	rd	%gsr, %l6
2339105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
234088644Sjake	wr	%g0, 0, %fprs
234182906Sjake
234289050Sjake	mov	PCB_REG, %l0
234389050Sjake	mov	PCPU_REG, %l1
234482906Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
234582005Sjake
234682005Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
234782005Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
234882005Sjake
234989050Sjake	mov	%l0, PCB_REG
235089050Sjake	mov	%l1, PCPU_REG
235188644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
235284186Sjake
235384186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
235484186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
235584186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
235684186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
235784186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
235884186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
235984186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
236084186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
236184186Sjake
2362108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2363108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2364108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2365108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2366108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2367108377Sjake
2368103921Sjake	set	tl0_ret - 8, %o7
2369103921Sjake	jmpl	%o2, %g0
237084186Sjake	 add	%sp, CCFSZ + SPOFF, %o0
237184186SjakeEND(tl0_trap)
237284186Sjake
237388644Sjake/*
237491246Sjake * void tl0_intr(u_int level, u_int mask)
237591246Sjake */
237684186SjakeENTRY(tl0_intr)
237784186Sjake	/*
237884186Sjake	 * Force kernel store order.
237984186Sjake	 */
238084186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
238184186Sjake
238284186Sjake	rdpr	%tstate, %l0
238388644Sjake	rdpr	%tpc, %l1
238488644Sjake	rdpr	%tnpc, %l2
238588644Sjake	rd	%y, %l3
238688644Sjake	rd	%fprs, %l4
238788644Sjake	rdpr	%wstate, %l5
238888644Sjake
238988644Sjake#if KTR_COMPILE & KTR_INTR
239088644Sjake	CATR(KTR_INTR,
239191246Sjake	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
239288644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
239388644Sjake	ldx	[PCPU(CURTHREAD)], %g2
239488644Sjake	stx	%g2, [%g1 + KTR_PARM1]
239588644Sjake	stx	%o0, [%g1 + KTR_PARM2]
239688644Sjake	rdpr	%pil, %g2
239788644Sjake	stx	%g2, [%g1 + KTR_PARM3]
239888644Sjake	stx	%l1, [%g1 + KTR_PARM4]
239988644Sjake	stx	%l2, [%g1 + KTR_PARM5]
240088644Sjake	stx	%i6, [%g1 + KTR_PARM6]
240188644Sjake9:
240288644Sjake#endif
240388644Sjake
240491246Sjake	wrpr	%o0, 0, %pil
2405108379Sjake	wr	%o1, 0, %clear_softint
240691246Sjake
240788644Sjake	and	%l5, WSTATE_NORMAL_MASK, %l5
240888644Sjake	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
240988644Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
241088644Sjake	rdpr	%canrestore, %l6
241188644Sjake	wrpr	%l6, 0, %otherwin
241288644Sjake	wrpr	%g0, 0, %canrestore
241388644Sjake
241488644Sjake	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
241588644Sjake
241684186Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
241784186Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
241884186Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2419105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2420105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2421105733Sjake	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
242281380Sjake
242388644Sjake	wr	%g0, FPRS_FEF, %fprs
242488644Sjake	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2425108379Sjake	rd	%gsr, %l6
2426105733Sjake	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
242788644Sjake	wr	%g0, 0, %fprs
242884186Sjake
242991246Sjake	mov	%o0, %l3
243091246Sjake	mov	T_INTERRUPT, %o1
243189050Sjake
2432105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2433105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
243488644Sjake
243589050Sjake	mov	PCB_REG, %l0
243689050Sjake	mov	PCPU_REG, %l1
243784186Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
243884186Sjake
243984186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
244084186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
244184186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
244284186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
244384186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
244484186Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
244584186Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
244684186Sjake
244789050Sjake	mov	%l0, PCB_REG
244889050Sjake	mov	%l1, PCPU_REG
244988644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
245084186Sjake
245184186Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
245284186Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
245384186Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
245484186Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
245584186Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
245684186Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
245784186Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
245884186Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
245984186Sjake
2460157825Smarius	SET(intr_handlers, %l1, %l0)
2461157825Smarius	sllx	%l3, IH_SHIFT, %l1
2462157825Smarius	ldx	[%l0 + %l1], %l1
2463157825Smarius	KASSERT(%l1, "tl0_intr: ih null")
2464157825Smarius	call	%l1
2465157825Smarius	 add	%sp, CCFSZ + SPOFF, %o0
2466157825Smarius
2467117658Sjmg	/* %l3 contains PIL */
2468117658Sjmg	SET(intrcnt, %l1, %l2)
2469117658Sjmg	prefetcha [%l2] ASI_N, 1
2470117658Sjmg	SET(pil_countp, %l1, %l0)
2471117658Sjmg	sllx	%l3, 1, %l1
2472117658Sjmg	lduh	[%l0 + %l1], %l0
2473117658Sjmg	sllx	%l0, 3, %l0
2474117658Sjmg	add	%l0, %l2, %l0
2475145153Smarius	ldx	[%l0], %l1
2476145153Smarius	inc	%l1
2477145153Smarius	stx	%l1, [%l0]
2478117658Sjmg
2479145153Smarius	lduw	[PCPU(CNT) + V_INTR], %l0
2480145153Smarius	inc	%l0
2481145153Smarius	stw	%l0, [PCPU(CNT) + V_INTR]
248284186Sjake
2483116589Sjake	ba,a	%xcc, tl0_ret
248484186Sjake	 nop
248584186SjakeEND(tl0_intr)
248684186Sjake
2487105733Sjake/*
2488105733Sjake * Initiate return to usermode.
2489105733Sjake *
2490105733Sjake * Called with a trapframe on the stack.  The window that was setup in
2491105733Sjake * tl0_trap may have been used by "fast" trap handlers that pretend to be
2492105733Sjake * leaf functions, so all ins and locals may have been clobbered since
2493105733Sjake * then.
2494105733Sjake *
2495105733Sjake * This code is rather long and complicated.
2496105733Sjake */
249782005SjakeENTRY(tl0_ret)
249893389Sjake	/*
249993389Sjake	 * Check for pending asts atomically with returning.  We must raise
2500182020Smarius	 * the PIL before checking, and if no asts are found the PIL must
250193389Sjake	 * remain raised until the retry is executed, or we risk missing asts
2502220939Smarius	 * caused by interrupts occurring after the test.  If the PIL is
2503182020Smarius	 * lowered, as it is when we call ast, the check must be re-executed.
250493389Sjake	 */
2505103784Sjake	wrpr	%g0, PIL_TICK, %pil
250684186Sjake	ldx	[PCPU(CURTHREAD)], %l0
2507111032Sjulian	lduw	[%l0 + TD_FLAGS], %l1
2508111032Sjulian	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2509111032Sjulian	and	%l1, %l2, %l1
2510111032Sjulian	brz,a,pt %l1, 1f
251182906Sjake	 nop
2512105733Sjake
2513105733Sjake	/*
2514182020Smarius	 * We have an AST.  Re-enable interrupts and handle it, then restart
2515105733Sjake	 * the return sequence.
2516105733Sjake	 */
251793389Sjake	wrpr	%g0, 0, %pil
251882906Sjake	call	ast
251982906Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2520103784Sjake	ba,a	%xcc, tl0_ret
252193389Sjake	 nop
252282906Sjake
252393389Sjake	/*
252493389Sjake	 * Check for windows that were spilled to the pcb and need to be
252593389Sjake	 * copied out.  This must be the last thing that is done before the
252693389Sjake	 * return to usermode.  If there are still user windows in the cpu
252793389Sjake	 * and we call a nested function after this, which causes them to be
252893389Sjake	 * spilled to the pcb, they will not be copied out and the stack will
252993389Sjake	 * be inconsistent.
253093389Sjake	 */
2531103784Sjake1:	ldx	[PCB_REG + PCB_NSAVED], %l1
2532103784Sjake	brz,a,pt %l1, 2f
2533103784Sjake	 nop
2534103784Sjake	wrpr	%g0, 0, %pil
253593389Sjake	mov	T_SPILL, %o0
2536105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2537103784Sjake	call	trap
2538103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2539103784Sjake	ba,a	%xcc, tl0_ret
2540103784Sjake	 nop
254182906Sjake
2542105733Sjake	/*
2543108377Sjake	 * Restore the out and most global registers from the trapframe.
2544108377Sjake	 * The ins will become the outs when we restore below.
2545105733Sjake	 */
2546103784Sjake2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
254782906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
254882906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
254982906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
255082906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
255182906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
255282906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
255382906Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
255481380Sjake
2555108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2556108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2557108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2558108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2559108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2560108377Sjake
2561105733Sjake	/*
2562105733Sjake	 * Load everything we need to restore below before disabling
2563105733Sjake	 * interrupts.
2564105733Sjake	 */
2565105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2566105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
256785243Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2568105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2569105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2570105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2571105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
257282906Sjake
2573105733Sjake	/*
2574108377Sjake	 * Disable interrupts to restore the special globals.  They are not
2575108377Sjake	 * saved and restored for all kernel traps, so an interrupt at the
2576108377Sjake	 * wrong time would clobber them.
2577105733Sjake	 */
257889050Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
257989050Sjake
258089050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
258189050Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
258289050Sjake
2583105733Sjake	/*
2584105733Sjake	 * Switch to alternate globals.  This frees up some registers we
2585105733Sjake	 * can use after the restore changes our window.
2586105733Sjake	 */
258782906Sjake	wrpr	%g0, PSTATE_ALT, %pstate
258882906Sjake
2589105733Sjake	/*
2590105733Sjake	 * Drop %pil to zero.  It must have been zero at the time of the
2591105733Sjake	 * trap, since we were in usermode, but it was raised above in
2592105733Sjake	 * order to check for asts atomically.  We have interrupts disabled
2593105733Sjake	 * so any interrupts will not be serviced until we complete the
2594105733Sjake	 * return to usermode.
2595105733Sjake	 */
259688644Sjake	wrpr	%g0, 0, %pil
2597105733Sjake
2598105733Sjake	/*
2599105733Sjake	 * Save %fprs in an alternate global so it can be restored after the
2600105733Sjake	 * restore instruction below.  If we restore it before the restore,
2601105733Sjake	 * and the restore traps we may run for a while with floating point
2602105733Sjake	 * enabled in the kernel, which we want to avoid.
2603105733Sjake	 */
2604105733Sjake	mov	%l0, %g1
2605105733Sjake
2606105733Sjake	/*
2607105733Sjake	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2608105733Sjake	 * so we set it temporarily and then clear it.
2609105733Sjake	 */
2610105733Sjake	wr	%g0, FPRS_FEF, %fprs
2611105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2612108379Sjake	wr	%l1, 0, %gsr
2613105733Sjake	wr	%g0, 0, %fprs
2614105733Sjake
2615105733Sjake	/*
2616105733Sjake	 * Restore program counters.  This could be done after the restore
2617105733Sjake	 * but we're out of alternate globals to store them in...
2618105733Sjake	 */
261988644Sjake	wrpr	%l2, 0, %tnpc
2620105733Sjake	wrpr	%l3, 0, %tpc
262182906Sjake
2622105733Sjake	/*
2623105733Sjake	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2624105733Sjake	 * will be affected by the restore below and we need to make sure it
2625105733Sjake	 * points to the current window at that time, not the window that was
2626105733Sjake	 * active at the time of the trap.
2627105733Sjake	 */
2628105733Sjake	andn	%l4, TSTATE_CWP_MASK, %g2
262982906Sjake
2630105733Sjake	/*
2631292943Smarius	 * Restore %y.  Could also be below if we had more alternate globals.
2632105733Sjake	 */
2633292943Smarius	wr	%l5, 0, %y
2634105733Sjake
2635105733Sjake	/*
2636105733Sjake	 * Setup %wstate for return.  We need to restore the user window state
2637105733Sjake	 * which we saved in wstate.other when we trapped.  We also need to
2638105733Sjake	 * set the transition bit so the restore will be handled specially
2639105733Sjake	 * if it traps, use the xor feature of wrpr to do that.
2640105733Sjake	 */
2641105733Sjake	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
264288644Sjake	wrpr	%g3, WSTATE_TRANSITION, %wstate
2643105733Sjake
2644105733Sjake	/*
2645105733Sjake	 * Setup window management registers for return.  If not all user
2646105733Sjake	 * windows were spilled in the kernel %otherwin will be non-zero,
2647105733Sjake	 * so we need to transfer it to %canrestore to correctly restore
2648105733Sjake	 * those windows.  Otherwise everything gets set to zero and the
2649105733Sjake	 * restore below will fill a window directly from the user stack.
2650105733Sjake	 */
265188644Sjake	rdpr	%otherwin, %o0
265288644Sjake	wrpr	%o0, 0, %canrestore
265382906Sjake	wrpr	%g0, 0, %otherwin
265488644Sjake	wrpr	%o0, 0, %cleanwin
265581380Sjake
265682005Sjake	/*
2657105733Sjake	 * Now do the restore.  If this instruction causes a fill trap which
2658105733Sjake	 * fails to fill a window from the user stack, we will resume at
2659105733Sjake	 * tl0_ret_fill_end and call back into the kernel.
266082005Sjake	 */
266182906Sjake	restore
266282906Sjaketl0_ret_fill:
266381380Sjake
2664105733Sjake	/*
2665105733Sjake	 * We made it.  We're back in the window that was active at the time
2666105733Sjake	 * of the trap, and ready to return to usermode.
2667105733Sjake	 */
2668105733Sjake
2669105733Sjake	/*
2670105733Sjake	 * Restore %frps.  This was saved in an alternate global above.
2671105733Sjake	 */
2672105733Sjake	wr	%g1, 0, %fprs
2673105733Sjake
2674105733Sjake	/*
2675105733Sjake	 * Fixup %tstate so the saved %cwp points to the current window and
2676105733Sjake	 * restore it.
2677105733Sjake	 */
2678292943Smarius	rdpr	%cwp, %g4
2679292943Smarius	wrpr	%g2, %g4, %tstate
2680105733Sjake
2681105733Sjake	/*
2682105733Sjake	 * Restore the user window state.  The transition bit was set above
2683105733Sjake	 * for special handling of the restore, this clears it.
2684105733Sjake	 */
268588644Sjake	wrpr	%g3, 0, %wstate
268685243Sjake
268784186Sjake#if KTR_COMPILE & KTR_TRAP
268888644Sjake	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2689292943Smarius	    , %g2, %g3, %g4, 7, 8, 9)
2690292943Smarius	ldx	[PCPU(CURTHREAD)], %g3
2691292943Smarius	stx	%g3, [%g2 + KTR_PARM1]
2692292943Smarius	rdpr	%pil, %g3
2693292943Smarius	stx	%g3, [%g2 + KTR_PARM2]
2694292943Smarius	rdpr	%tpc, %g3
2695292943Smarius	stx	%g3, [%g2 + KTR_PARM3]
2696292943Smarius	rdpr	%tnpc, %g3
2697292943Smarius	stx	%g3, [%g2 + KTR_PARM4]
2698292943Smarius	stx	%sp, [%g2 + KTR_PARM5]
269982906Sjake9:
270082906Sjake#endif
270181380Sjake
2702105733Sjake	/*
2703105733Sjake	 * Return to usermode.
2704105733Sjake	 */
270582906Sjake	retry
270682906Sjaketl0_ret_fill_end:
270782005Sjake
270884186Sjake#if KTR_COMPILE & KTR_TRAP
270988785Sjake	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
271082906Sjake	    , %l0, %l1, %l2, 7, 8, 9)
271188785Sjake	rdpr	%pstate, %l1
271288785Sjake	stx	%l1, [%l0 + KTR_PARM1]
2713222840Smarius	stx	%l6, [%l0 + KTR_PARM2]
271488785Sjake	stx	%sp, [%l0 + KTR_PARM3]
271582906Sjake9:
271682906Sjake#endif
271782906Sjake
271882906Sjake	/*
2719105733Sjake	 * The restore above caused a fill trap and the fill handler was
2720105733Sjake	 * unable to fill a window from the user stack.  The special fill
2721105733Sjake	 * handler recognized this and punted, sending us here.  We need
2722105733Sjake	 * to carefully undo any state that was restored before the restore
2723105733Sjake	 * was executed and call trap again.  Trap will copyin a window
2724105733Sjake	 * from the user stack which will fault in the page we need so the
2725105733Sjake	 * restore above will succeed when we try again.  If this fails
2726105733Sjake	 * the process has trashed its stack, so we kill it.
272782906Sjake	 */
2728105733Sjake
2729105733Sjake	/*
2730105733Sjake	 * Restore the kernel window state.  This was saved in %l6 above, and
2731105733Sjake	 * since the restore failed we're back in the same window.
2732105733Sjake	 */
2733105733Sjake	wrpr	%l6, 0, %wstate
2734105733Sjake
2735105733Sjake	/*
2736105733Sjake	 * Restore the normal globals which have predefined values in the
2737105733Sjake	 * kernel.  We clobbered them above restoring the user's globals
2738105733Sjake	 * so this is very important.
2739105733Sjake	 * XXX PSTATE_ALT must already be set.
2740105733Sjake	 */
274188785Sjake	wrpr	%g0, PSTATE_ALT, %pstate
274289050Sjake	mov	PCB_REG, %o0
274389050Sjake	mov	PCPU_REG, %o1
274488785Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
274589050Sjake	mov	%o0, PCB_REG
274689050Sjake	mov	%o1, PCPU_REG
274788644Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
2748105733Sjake
2749105733Sjake	/*
2750105733Sjake	 * Simulate a fill trap and then start the whole return sequence over
2751105733Sjake	 * again.  This is special because it only copies in 1 window, not 2
2752105733Sjake	 * as we would for a normal failed fill.  This may be the first time
2753105733Sjake	 * the process has been run, so there may not be 2 windows worth of
2754105733Sjake	 * stack to copyin.
2755105733Sjake	 */
2756103784Sjake	mov	T_FILL_RET, %o0
2757105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2758103784Sjake	call	trap
2759103784Sjake	 add	%sp, SPOFF + CCFSZ, %o0
2760103784Sjake	ba,a	%xcc, tl0_ret
2761103784Sjake	 nop
276282005SjakeEND(tl0_ret)
276381380Sjake
276480709Sjake/*
276582906Sjake * Kernel trap entry point
276682906Sjake *
276791246Sjake * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2768205409Smarius *     u_int sfsr)
276982906Sjake *
277082906Sjake * This is easy because the stack is already setup and the windows don't need
277182906Sjake * to be split.  We build a trapframe and call trap(), the same as above, but
277282906Sjake * the outs don't need to be saved.
277380709Sjake */
277480709SjakeENTRY(tl1_trap)
277580709Sjake	rdpr	%tstate, %l0
277680709Sjake	rdpr	%tpc, %l1
277780709Sjake	rdpr	%tnpc, %l2
277891246Sjake	rdpr	%pil, %l3
277991316Sjake	rd	%y, %l4
278091316Sjake	rdpr	%wstate, %l5
278180709Sjake
278284186Sjake#if KTR_COMPILE & KTR_TRAP
278388644Sjake	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
278488644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
278588644Sjake	ldx	[PCPU(CURTHREAD)], %g2
278688644Sjake	stx	%g2, [%g1 + KTR_PARM1]
278797265Sjake	stx	%o0, [%g1 + KTR_PARM2]
278891246Sjake	stx	%l3, [%g1 + KTR_PARM3]
278988644Sjake	stx	%l1, [%g1 + KTR_PARM4]
279088644Sjake	stx	%i6, [%g1 + KTR_PARM5]
279182906Sjake9:
279282906Sjake#endif
279382906Sjake
279480709Sjake	wrpr	%g0, 1, %tl
279588644Sjake
279691316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
279791316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
279891246Sjake
2799105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2800105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2801103919Sjake	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2802103919Sjake	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2803105733Sjake	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2804103919Sjake
280588644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
280688644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
280788644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2808105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2809105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
281088644Sjake
2811103919Sjake	mov	PCB_REG, %l0
2812103919Sjake	mov	PCPU_REG, %l1
281391158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
281491158Sjake
2815108377Sjake	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2816108377Sjake	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
281780709Sjake
2818103919Sjake	mov	%l0, PCB_REG
2819103919Sjake	mov	%l1, PCPU_REG
282091158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
282191158Sjake
2822103919Sjake	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2823103919Sjake	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2824103919Sjake	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2825103919Sjake	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2826103919Sjake	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2827103919Sjake	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2828103919Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2829103919Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2830103919Sjake
2831108377Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2832108377Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2833108377Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2834108377Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2835108377Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2836108377Sjake
2837103921Sjake	set	tl1_ret - 8, %o7
2838103921Sjake	jmpl	%o2, %g0
283980709Sjake	 add	%sp, CCFSZ + SPOFF, %o0
2840103921SjakeEND(tl1_trap)
284180709Sjake
2842103921SjakeENTRY(tl1_ret)
2843103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2844103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2845103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2846103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2847103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2848103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2849103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2850103919Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2851103919Sjake
2852108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2853108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2854108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2855108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2856108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2857108377Sjake
285888644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
285988644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
286088644Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2861105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2862105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
286388644Sjake
2864108377Sjake	set	VM_MIN_PROM_ADDRESS, %l5
2865108377Sjake	cmp	%l1, %l5
2866108377Sjake	bl,a,pt	%xcc, 1f
2867108377Sjake	 nop
2868182774Smarius	set	VM_MAX_PROM_ADDRESS, %l5
2869182774Smarius	cmp	%l1, %l5
2870182774Smarius	bg,a,pt	%xcc, 1f
2871182774Smarius	 nop
287280709Sjake
2873108377Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
287480709Sjake
2875108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2876108377Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2877108377Sjake
2878108377Sjake1:	wrpr	%g0, PSTATE_ALT, %pstate
2879108377Sjake
288088644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
288186519Sjake	mov	%l1, %g2
288286519Sjake	mov	%l2, %g3
288381380Sjake
288488644Sjake	wrpr	%l3, 0, %pil
2885292943Smarius	wr	%l4, 0, %y
288686519Sjake
288786519Sjake	restore
288886519Sjake
288980709Sjake	wrpr	%g0, 2, %tl
289080709Sjake
2891292943Smarius	rdpr	%cwp, %g4
2892292943Smarius	wrpr	%g1, %g4, %tstate
289386519Sjake	wrpr	%g2, 0, %tpc
289486519Sjake	wrpr	%g3, 0, %tnpc
289586519Sjake
289684186Sjake#if KTR_COMPILE & KTR_TRAP
2897103921Sjake	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2898292943Smarius	    , %g2, %g3, %g4, 7, 8, 9)
2899292943Smarius	ldx	[PCPU(CURTHREAD)], %g3
2900292943Smarius	stx	%g3, [%g2 + KTR_PARM1]
2901292943Smarius	rdpr	%pil, %g3
2902292943Smarius	stx	%g3, [%g2 + KTR_PARM2]
2903292943Smarius	rdpr	%tstate, %g3
2904292943Smarius	stx	%g3, [%g2 + KTR_PARM3]
2905292943Smarius	rdpr	%tpc, %g3
2906292943Smarius	stx	%g3, [%g2 + KTR_PARM4]
2907292943Smarius	stx	%sp, [%g2 + KTR_PARM5]
290882906Sjake9:
290982906Sjake#endif
291082906Sjake
291180709Sjake	retry
2912103921SjakeEND(tl1_ret)
291380709Sjake
291491246Sjake/*
291591246Sjake * void tl1_intr(u_int level, u_int mask)
291691246Sjake */
291784186SjakeENTRY(tl1_intr)
291884186Sjake	rdpr	%tstate, %l0
291984186Sjake	rdpr	%tpc, %l1
292084186Sjake	rdpr	%tnpc, %l2
292191246Sjake	rdpr	%pil, %l3
292291316Sjake	rd	%y, %l4
292391316Sjake	rdpr	%wstate, %l5
292484186Sjake
292584186Sjake#if KTR_COMPILE & KTR_INTR
292689050Sjake	CATR(KTR_INTR,
2927145153Smarius	    "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
292888644Sjake	    , %g1, %g2, %g3, 7, 8, 9)
292988644Sjake	ldx	[PCPU(CURTHREAD)], %g2
293088644Sjake	stx	%g2, [%g1 + KTR_PARM1]
293191246Sjake	stx	%o0, [%g1 + KTR_PARM2]
293291246Sjake	stx	%l3, [%g1 + KTR_PARM3]
293391246Sjake	stx	%l1, [%g1 + KTR_PARM4]
293491246Sjake	stx	%i6, [%g1 + KTR_PARM5]
293584186Sjake9:
293684186Sjake#endif
293784186Sjake
293891246Sjake	wrpr	%o0, 0, %pil
2939108379Sjake	wr	%o1, 0, %clear_softint
294091246Sjake
294184186Sjake	wrpr	%g0, 1, %tl
294288644Sjake
294391316Sjake	and	%l5, WSTATE_OTHER_MASK, %l5
294491316Sjake	wrpr	%l5, WSTATE_KERNEL, %wstate
294591246Sjake
294688644Sjake	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
294788644Sjake	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
294888644Sjake	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2949105733Sjake	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2950105733Sjake	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
295188644Sjake
295291246Sjake	mov	%o0, %l7
295391246Sjake	mov	T_INTERRUPT | T_KERNEL, %o1
295489050Sjake
2955105733Sjake	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2956105733Sjake	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
295788644Sjake
295888644Sjake	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
295988644Sjake	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
296088644Sjake
296191158Sjake	mov	PCB_REG, %l4
296291158Sjake	mov	PCPU_REG, %l5
296391158Sjake	wrpr	%g0, PSTATE_NORMAL, %pstate
296491158Sjake
296584186Sjake	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
296684186Sjake	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
296784186Sjake	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
296884186Sjake	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
296984186Sjake	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
297084186Sjake
297191158Sjake	mov	%l4, PCB_REG
297291158Sjake	mov	%l5, PCPU_REG
297391158Sjake	wrpr	%g0, PSTATE_KERNEL, %pstate
297491158Sjake
2975157825Smarius	SET(intr_handlers, %l5, %l4)
2976157825Smarius	sllx	%l7, IH_SHIFT, %l5
2977157825Smarius	ldx	[%l4 + %l5], %l5
2978157825Smarius	KASSERT(%l5, "tl1_intr: ih null")
2979157825Smarius	call	%l5
2980157825Smarius	 add	%sp, CCFSZ + SPOFF, %o0
2981157825Smarius
2982145153Smarius	/* %l7 contains PIL */
2983117658Sjmg	SET(intrcnt, %l5, %l4)
2984117658Sjmg	prefetcha [%l4] ASI_N, 1
2985117658Sjmg	SET(pil_countp, %l5, %l6)
2986117658Sjmg	sllx	%l7, 1, %l5
2987117658Sjmg	lduh	[%l5 + %l6], %l5
2988117658Sjmg	sllx	%l5, 3, %l5
2989117658Sjmg	add	%l5, %l4, %l4
2990145153Smarius	ldx	[%l4], %l5
2991145153Smarius	inc	%l5
2992145153Smarius	stx	%l5, [%l4]
2993117658Sjmg
2994145153Smarius	lduw	[PCPU(CNT) + V_INTR], %l4
2995145153Smarius	inc	%l4
2996145153Smarius	stw	%l4, [PCPU(CNT) + V_INTR]
299788644Sjake
2998105733Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
299991316Sjake
300084186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
300184186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
300284186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
300384186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
300484186Sjake	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
300584186Sjake
300684186Sjake	wrpr	%g0, PSTATE_ALT, %pstate
300784186Sjake
300888644Sjake	andn	%l0, TSTATE_CWP_MASK, %g1
300986519Sjake	mov	%l1, %g2
301086519Sjake	mov	%l2, %g3
301188644Sjake	wrpr	%l3, 0, %pil
3012292943Smarius	wr	%l4, 0, %y
301384186Sjake
301486519Sjake	restore
301586519Sjake
301684186Sjake	wrpr	%g0, 2, %tl
301784186Sjake
3018292943Smarius	rdpr	%cwp, %g4
3019292943Smarius	wrpr	%g1, %g4, %tstate
302086519Sjake	wrpr	%g2, 0, %tpc
302186519Sjake	wrpr	%g3, 0, %tnpc
302286519Sjake
302388644Sjake#if KTR_COMPILE & KTR_INTR
3024145153Smarius	CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3025292943Smarius	    , %g2, %g3, %g4, 7, 8, 9)
3026292943Smarius	ldx	[PCPU(CURTHREAD)], %g3
3027292943Smarius	stx	%g3, [%g2 + KTR_PARM1]
3028292943Smarius	rdpr	%pil, %g3
3029292943Smarius	stx	%g3, [%g2 + KTR_PARM2]
3030292943Smarius	rdpr	%tstate, %g3
3031292943Smarius	stx	%g3, [%g2 + KTR_PARM3]
3032292943Smarius	rdpr	%tpc, %g3
3033292943Smarius	stx	%g3, [%g2 + KTR_PARM4]
3034292943Smarius	stx	%sp, [%g2 + KTR_PARM5]
303584186Sjake9:
303684186Sjake#endif
303784186Sjake
303884186Sjake	retry
303984186SjakeEND(tl1_intr)
304084186Sjake
3041155839Smarius	.globl	tl_text_end
3042155839Smariustl_text_end:
3043155839Smarius	nop
3044155839Smarius
304582906Sjake/*
304682906Sjake * Freshly forked processes come here when switched to for the first time.
304782906Sjake * The arguments to fork_exit() have been setup in the locals, we must move
304882906Sjake * them to the outs.
304982906Sjake */
305080709SjakeENTRY(fork_trampoline)
305184186Sjake#if KTR_COMPILE & KTR_PROC
305284186Sjake	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
305382906Sjake	    , %g1, %g2, %g3, 7, 8, 9)
305483366Sjulian	ldx	[PCPU(CURTHREAD)], %g2
305582906Sjake	stx	%g2, [%g1 + KTR_PARM1]
305684186Sjake	ldx	[%g2 + TD_PROC], %g2
305782906Sjake	add	%g2, P_COMM, %g2
305882906Sjake	stx	%g2, [%g1 + KTR_PARM2]
305982906Sjake	rdpr	%cwp, %g2
306082906Sjake	stx	%g2, [%g1 + KTR_PARM3]
306182906Sjake9:
306282906Sjake#endif
306380709Sjake	mov	%l0, %o0
306480709Sjake	mov	%l1, %o1
306580709Sjake	call	fork_exit
306688644Sjake	 mov	%l2, %o2
3067116589Sjake	ba,a	%xcc, tl0_ret
306884186Sjake	 nop
306980709SjakeEND(fork_trampoline)
3070