1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56#include <machine/asm.h>
57__FBSDID("$FreeBSD$");
58
59#include "opt_compat.h"
60#include "opt_ddb.h"
61#include "opt_kstack_pages.h"
62
63#include <machine/asi.h>
64#include <machine/asmacros.h>
65#include <machine/frame.h>
66#include <machine/fsr.h>
67#include <machine/intr_machdep.h>
68#include <machine/ktr.h>
69#include <machine/pcb.h>
70#include <machine/pstate.h>
71#include <machine/trap.h>
72#include <machine/tsb.h>
73#include <machine/tstate.h>
74#include <machine/utrap.h>
75#include <machine/wstate.h>
76
77#include "assym.s"
78
79#define	TSB_ASI			0x0
80#define	TSB_KERNEL		0x0
81#define	TSB_KERNEL_MASK		0x0
82#define	TSB_KERNEL_PHYS		0x0
83#define	TSB_KERNEL_PHYS_END	0x0
84#define	TSB_QUAD_LDD		0x0
85
86	.register %g2,#ignore
87	.register %g3,#ignore
88	.register %g6,#ignore
89	.register %g7,#ignore
90
91/*
92 * Atomically set a bit in a TTE.
93 */
94#define	TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
95	add	r1, TTE_DATA, r1 ; \
96	LD(x, a) [r1] asi, r2 ; \
979:	or	r2, bit, r3 ; \
98	CAS(x, a) [r1] asi, r2, r3 ; \
99	cmp	r2, r3 ; \
100	bne,pn	%xcc, 9b ; \
101	 mov	r3, r2
102
103#define	TTE_SET_REF(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
104#define	TTE_SET_W(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
105
106/*
107 * Macros for spilling and filling live windows.
108 *
109 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
110 * handler will not use more than 24 instructions total, to leave room for
111 * resume vectors which occupy the last 8 instructions.
112 */
113
114#define	SPILL(storer, base, size, asi) \
115	storer	%l0, [base + (0 * size)] asi ; \
116	storer	%l1, [base + (1 * size)] asi ; \
117	storer	%l2, [base + (2 * size)] asi ; \
118	storer	%l3, [base + (3 * size)] asi ; \
119	storer	%l4, [base + (4 * size)] asi ; \
120	storer	%l5, [base + (5 * size)] asi ; \
121	storer	%l6, [base + (6 * size)] asi ; \
122	storer	%l7, [base + (7 * size)] asi ; \
123	storer	%i0, [base + (8 * size)] asi ; \
124	storer	%i1, [base + (9 * size)] asi ; \
125	storer	%i2, [base + (10 * size)] asi ; \
126	storer	%i3, [base + (11 * size)] asi ; \
127	storer	%i4, [base + (12 * size)] asi ; \
128	storer	%i5, [base + (13 * size)] asi ; \
129	storer	%i6, [base + (14 * size)] asi ; \
130	storer	%i7, [base + (15 * size)] asi
131
132#define	FILL(loader, base, size, asi) \
133	loader	[base + (0 * size)] asi, %l0 ; \
134	loader	[base + (1 * size)] asi, %l1 ; \
135	loader	[base + (2 * size)] asi, %l2 ; \
136	loader	[base + (3 * size)] asi, %l3 ; \
137	loader	[base + (4 * size)] asi, %l4 ; \
138	loader	[base + (5 * size)] asi, %l5 ; \
139	loader	[base + (6 * size)] asi, %l6 ; \
140	loader	[base + (7 * size)] asi, %l7 ; \
141	loader	[base + (8 * size)] asi, %i0 ; \
142	loader	[base + (9 * size)] asi, %i1 ; \
143	loader	[base + (10 * size)] asi, %i2 ; \
144	loader	[base + (11 * size)] asi, %i3 ; \
145	loader	[base + (12 * size)] asi, %i4 ; \
146	loader	[base + (13 * size)] asi, %i5 ; \
147	loader	[base + (14 * size)] asi, %i6 ; \
148	loader	[base + (15 * size)] asi, %i7
149
150#define	ERRATUM50(reg)	mov reg, reg
151
152#define	KSTACK_SLOP	1024
153
154/*
155 * Sanity check the kernel stack and bail out if it's wrong.
156 * XXX: doesn't handle being on the panic stack.
157 */
158#define	KSTACK_CHECK \
159	dec	16, ASP_REG ; \
160	stx	%g1, [ASP_REG + 0] ; \
161	stx	%g2, [ASP_REG + 8] ; \
162	add	%sp, SPOFF, %g1 ; \
163	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
164	bnz,a	%xcc, tl1_kstack_fault ; \
165	 inc	16, ASP_REG ; \
166	ldx	[PCPU(CURTHREAD)], %g2 ; \
167	ldx	[%g2 + TD_KSTACK], %g2 ; \
168	add	%g2, KSTACK_SLOP, %g2 ; \
169	subcc	%g1, %g2, %g1 ; \
170	ble,a	%xcc, tl1_kstack_fault ; \
171	 inc	16, ASP_REG ; \
172	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
173	cmp	%g1, %g2 ; \
174	bgt,a	%xcc, tl1_kstack_fault ; \
175	 inc	16, ASP_REG ; \
176	ldx	[ASP_REG + 8], %g2 ; \
177	ldx	[ASP_REG + 0], %g1 ; \
178	inc	16, ASP_REG
179
180	.globl	tl_text_begin
181tl_text_begin:
182	nop
183
184ENTRY(tl1_kstack_fault)
185	rdpr	%tl, %g1
1861:	cmp	%g1, 2
187	be,a	2f
188	 nop
189
190#if KTR_COMPILE & KTR_TRAP
191	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
192	    , %g2, %g3, %g4, 7, 8, 9)
193	rdpr	%tl, %g3
194	stx	%g3, [%g2 + KTR_PARM1]
195	rdpr	%tpc, %g3
196	stx	%g3, [%g2 + KTR_PARM1]
197	rdpr	%tnpc, %g3
198	stx	%g3, [%g2 + KTR_PARM1]
1999:
200#endif
201
202	sub	%g1, 1, %g1
203	wrpr	%g1, 0, %tl
204	ba,a	%xcc, 1b
205	 nop
206
2072:
208#if KTR_COMPILE & KTR_TRAP
209	CATR(KTR_TRAP,
210	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
211	    , %g1, %g2, %g3, 7, 8, 9)
212	add	%sp, SPOFF, %g2
213	stx	%g2, [%g1 + KTR_PARM1]
214	ldx	[PCPU(CURTHREAD)], %g2
215	ldx	[%g2 + TD_KSTACK], %g2
216	stx	%g2, [%g1 + KTR_PARM2]
217	rdpr	%canrestore, %g2
218	stx	%g2, [%g1 + KTR_PARM3]
219	rdpr	%cansave, %g2
220	stx	%g2, [%g1 + KTR_PARM4]
221	rdpr	%otherwin, %g2
222	stx	%g2, [%g1 + KTR_PARM5]
223	rdpr	%wstate, %g2
224	stx	%g2, [%g1 + KTR_PARM6]
2259:
226#endif
227
228	wrpr	%g0, 0, %canrestore
229	wrpr	%g0, 6, %cansave
230	wrpr	%g0, 0, %otherwin
231	wrpr	%g0, WSTATE_KERNEL, %wstate
232
233	sub	ASP_REG, SPOFF + CCFSZ, %sp
234	clr	%fp
235
236	set	trap, %o2
237	ba	%xcc, tl1_trap
238	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
239END(tl1_kstack_fault)
240
241/*
242 * Magic to resume from a spill or fill trap.  If we get an alignment or an
243 * MMU fault during a spill or a fill, this macro will detect the fault and
244 * resume at a set instruction offset in the trap handler.
245 *
246 * To check if the previous trap was a spill/fill we convert the trapped pc
247 * to a trap type and verify that it is in the range of spill/fill vectors.
248 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
249 * tl bit allows us to detect both ranges with one test.
250 *
251 * This is:
252 *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
253 *
254 * To calculate the new pc we take advantage of the xor feature of wrpr.
255 * Forcing all the low bits of the trapped pc on we can produce any offset
256 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
257 *
258 *	0x7f ^ 0x1f == 0x60
259 *	0x1f == (0x80 - 0x60) - 1
260 *
261 * Which are the offset and xor value used to resume from alignment faults.
262 */
263
264/*
265 * Determine if we have trapped inside of a spill/fill vector, and if so resume
266 * at a fixed instruction offset in the trap vector.  Must be called on
267 * alternate globals.
268 */
269#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
270	dec	16, ASP_REG ; \
271	stx	%g1, [ASP_REG + 0] ; \
272	stx	%g2, [ASP_REG + 8] ; \
273	rdpr	%tpc, %g1 ; \
274	ERRATUM50(%g1) ; \
275	rdpr	%tba, %g2 ; \
276	sub	%g1, %g2, %g2 ; \
277	srlx	%g2, 5, %g2 ; \
278	andn	%g2, 0x200, %g2 ; \
279	cmp	%g2, 0x80 ; \
280	blu,pt	%xcc, 9f ; \
281	 cmp	%g2, 0x100 ; \
282	bgeu,pt	%xcc, 9f ; \
283	 or	%g1, 0x7f, %g1 ; \
284	wrpr	%g1, xor, %tnpc ; \
285	stxa_g0_sfsr ; \
286	ldx	[ASP_REG + 8], %g2 ; \
287	ldx	[ASP_REG + 0], %g1 ; \
288	inc	16, ASP_REG ; \
289	done ; \
2909:	ldx	[ASP_REG + 8], %g2 ; \
291	ldx	[ASP_REG + 0], %g1 ; \
292	inc	16, ASP_REG
293
294/*
295 * For certain faults we need to clear the SFSR MMU register before returning.
296 */
297#define	RSF_CLR_SFSR \
298	wr	%g0, ASI_DMMU, %asi ; \
299	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
300
301#define	RSF_XOR(off)	((0x80 - off) - 1)
302
303/*
304 * Instruction offsets in spill and fill trap handlers for handling certain
305 * nested traps, and corresponding xor constants for wrpr.
306 */
307#define	RSF_OFF_ALIGN	0x60
308#define	RSF_OFF_MMU	0x70
309
310#define	RESUME_SPILLFILL_ALIGN \
311	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
312#define	RESUME_SPILLFILL_MMU \
313	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
314#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
315	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
316
317/*
318 * Constant to add to %tnpc when taking a fill trap just before returning to
319 * user mode.
320 */
321#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
322
323/*
324 * Generate a T_SPILL or T_FILL trap if the window operation fails.
325 */
326#define	RSF_TRAP(type) \
327	ba	%xcc, tl0_sftrap ; \
328	 mov	type, %g2 ; \
329	.align	16
330
331/*
332 * Game over if the window operation fails.
333 */
334#define	RSF_FATAL(type) \
335	ba	%xcc, rsf_fatal ; \
336	 mov	type, %g2 ; \
337	.align	16
338
339/*
340 * Magic to resume from a failed fill a few instructions after the corrsponding
341 * restore.  This is used on return from the kernel to usermode.
342 */
343#define	RSF_FILL_MAGIC \
344	rdpr	%tnpc, %g1 ; \
345	add	%g1, RSF_FILL_INC, %g1 ; \
346	wrpr	%g1, 0, %tnpc ; \
347	done ; \
348	.align	16
349
350/*
351 * Spill to the pcb if a spill to the user stack in kernel mode fails.
352 */
353#define	RSF_SPILL_TOPCB \
354	ba,a	%xcc, tl1_spill_topcb ; \
355	 nop ; \
356	.align	16
357
358ENTRY(rsf_fatal)
359#if KTR_COMPILE & KTR_TRAP
360	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
361	    , %g1, %g3, %g4, 7, 8, 9)
362	rdpr	%tt, %g3
363	stx	%g3, [%g1 + KTR_PARM1]
364	stx	%g2, [%g1 + KTR_PARM2]
3659:
366#endif
367
368	KSTACK_CHECK
369
370	sir
371END(rsf_fatal)
372
373	.data
374	_ALIGN_DATA
375	.globl	intrnames, sintrnames
376intrnames:
377	.space	(IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
378sintrnames:
379	.quad	(IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
380
381	.globl	intrcnt, sintrcnt
382intrcnt:
383	.space	(IV_MAX + PIL_MAX) * 8
384sintrcnt:
385	.quad	(IV_MAX + PIL_MAX) * 8
386
387	.text
388
389/*
390 * Trap table and associated macros
391 *
392 * Due to its size a trap table is an inherently hard thing to represent in
393 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
394 * instructions each, many of which are identical.  The way that this is
395 * laid out is the instructions (8 or 32) for the actual trap vector appear
396 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
397 * but if not supporting code can be placed just after the definition of the
398 * macro.  The macros are then instantiated in a different section (.trap),
399 * which is setup to be placed by the linker at the beginning of .text, and the
400 * code around the macros is moved to the end of trap table.  In this way the
401 * code that must be sequential in memory can be split up, and located near
402 * its supporting code so that it is easier to follow.
403 */
404
405	/*
406	 * Clean window traps occur when %cleanwin is zero to ensure that data
407	 * is not leaked between address spaces in registers.
408	 */
409	.macro	clean_window
410	clr	%o0
411	clr	%o1
412	clr	%o2
413	clr	%o3
414	clr	%o4
415	clr	%o5
416	clr	%o6
417	clr	%o7
418	clr	%l0
419	clr	%l1
420	clr	%l2
421	clr	%l3
422	clr	%l4
423	clr	%l5
424	clr	%l6
425	rdpr	%cleanwin, %l7
426	inc	%l7
427	wrpr	%l7, 0, %cleanwin
428	clr	%l7
429	retry
430	.align	128
431	.endm
432
433	/*
434	 * Stack fixups for entry from user mode.  We are still running on the
435	 * user stack, and with its live registers, so we must save soon.  We
436	 * are on alternate globals so we do have some registers.  Set the
437	 * transitional window state, and do the save.  If this traps we
438	 * attempt to spill a window to the user stack.  If this fails, we
439	 * spill the window to the pcb and continue.  Spilling to the pcb
440	 * must not fail.
441	 *
442	 * NOTE: Must be called with alternate globals and clobbers %g1.
443	 */
444
445	.macro	tl0_split
446	rdpr	%wstate, %g1
447	wrpr	%g1, WSTATE_TRANSITION, %wstate
448	save
449	.endm
450
451	.macro	tl0_setup	type
452	tl0_split
453	clr	%o1
454	set	trap, %o2
455	ba	%xcc, tl0_utrap
456	 mov	\type, %o0
457	.endm
458
459	/*
460	 * Generic trap type.  Call trap() with the specified type.
461	 */
462	.macro	tl0_gen		type
463	tl0_setup \type
464	.align	32
465	.endm
466
467	/*
468	 * This is used to suck up the massive swaths of reserved trap types.
469	 * Generates count "reserved" trap vectors.
470	 */
471	.macro	tl0_reserved	count
472	.rept	\count
473	tl0_gen	T_RESERVED
474	.endr
475	.endm
476
477	.macro	tl1_split
478	rdpr	%wstate, %g1
479	wrpr	%g1, WSTATE_NESTED, %wstate
480	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
481	.endm
482
483	.macro	tl1_setup	type
484	tl1_split
485	clr	%o1
486	set	trap, %o2
487	ba	%xcc, tl1_trap
488	 mov	\type | T_KERNEL, %o0
489	.endm
490
491	.macro	tl1_gen		type
492	tl1_setup \type
493	.align	32
494	.endm
495
496	.macro	tl1_reserved	count
497	.rept	\count
498	tl1_gen	T_RESERVED
499	.endr
500	.endm
501
502	.macro	tl0_insn_excptn
503	wrpr	%g0, PSTATE_ALT, %pstate
504	wr	%g0, ASI_IMMU, %asi
505	rdpr	%tpc, %g3
506	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
507	/*
508	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
509	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
510	 * this triggers a RED state exception though.
511	 */
512	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
513	membar	#Sync
514	ba	%xcc, tl0_sfsr_trap
515	 mov	T_INSTRUCTION_EXCEPTION, %g2
516	.align	32
517	.endm
518
519	.macro	tl0_data_excptn
520	wrpr	%g0, PSTATE_ALT, %pstate
521	wr	%g0, ASI_DMMU, %asi
522	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
523	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
524	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
525	membar	#Sync
526	ba	%xcc, tl0_sfsr_trap
527	 mov	T_DATA_EXCEPTION, %g2
528	.align	32
529	.endm
530
531	.macro	tl0_align
532	wr	%g0, ASI_DMMU, %asi
533	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
534	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
535	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
536	membar	#Sync
537	ba	%xcc, tl0_sfsr_trap
538	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
539	.align	32
540	.endm
541
542ENTRY(tl0_sfsr_trap)
543	tl0_split
544	clr	%o1
545	set	trap, %o2
546	mov	%g3, %o4
547	mov	%g4, %o5
548	ba	%xcc, tl0_utrap
549	 mov	%g2, %o0
550END(tl0_sfsr_trap)
551
552	.macro	tl0_intr level, mask
553	tl0_split
554	set	\mask, %o1
555	ba	%xcc, tl0_intr
556	 mov	\level, %o0
557	.align	32
558	.endm
559
560#define	INTR(level, traplvl)						\
561	tl ## traplvl ## _intr	level, 1 << level
562
563#define	TICK(traplvl) \
564	tl ## traplvl ## _intr	PIL_TICK, 0x10001
565
566#define	INTR_LEVEL(tl)							\
567	INTR(1, tl) ;							\
568	INTR(2, tl) ;							\
569	INTR(3, tl) ;							\
570	INTR(4, tl) ;							\
571	INTR(5, tl) ;							\
572	INTR(6, tl) ;							\
573	INTR(7, tl) ;							\
574	INTR(8, tl) ;							\
575	INTR(9, tl) ;							\
576	INTR(10, tl) ;							\
577	INTR(11, tl) ;							\
578	INTR(12, tl) ;							\
579	INTR(13, tl) ;							\
580	TICK(tl) ;							\
581	INTR(15, tl) ;
582
583	.macro	tl0_intr_level
584	INTR_LEVEL(0)
585	.endm
586
587	.macro	intr_vector
588	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
589	andcc	%g1, IRSR_BUSY, %g0
590	bnz,a,pt %xcc, intr_vector
591	 nop
592	ba,a,pt	%xcc, intr_vector_stray
593	 nop
594	.align	32
595	.endm
596
597	.macro	tl0_immu_miss
598	/*
599	 * Load the context and the virtual page number from the tag access
600	 * register.  We ignore the context.
601	 */
602	wr	%g0, ASI_IMMU, %asi
603	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
604
605	/*
606	 * Initialize the page size walker.
607	 */
608	mov	TS_MIN, %g2
609
610	/*
611	 * Loop over all supported page sizes.
612	 */
613
614	/*
615	 * Compute the page shift for the page size we are currently looking
616	 * for.
617	 */
6181:	add	%g2, %g2, %g3
619	add	%g3, %g2, %g3
620	add	%g3, PAGE_SHIFT, %g3
621
622	/*
623	 * Extract the virtual page number from the contents of the tag
624	 * access register.
625	 */
626	srlx	%g1, %g3, %g3
627
628	/*
629	 * Compute the TTE bucket address.
630	 */
631	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
632	and	%g3, TSB_BUCKET_MASK, %g4
633	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
634	add	%g4, %g5, %g4
635
636	/*
637	 * Compute the TTE tag target.
638	 */
639	sllx	%g3, TV_SIZE_BITS, %g3
640	or	%g3, %g2, %g3
641
642	/*
643	 * Loop over the TTEs in this bucket.
644	 */
645
646	/*
647	 * Load the TTE.  Note that this instruction may fault, clobbering
648	 * the contents of the tag access register, %g5, %g6, and %g7.  We
649	 * do not use %g5, and %g6 and %g7 are not used until this instruction
650	 * completes successfully.
651	 */
6522:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
653
654	/*
655	 * Check that it's valid and executable and that the TTE tags match.
656	 */
657	brgez,pn %g7, 3f
658	 andcc	%g7, TD_EXEC, %g0
659	bz,pn	%xcc, 3f
660	 cmp	%g3, %g6
661	bne,pn	%xcc, 3f
662	 EMPTY
663
664	/*
665	 * We matched a TTE, load the TLB.
666	 */
667
668	/*
669	 * Set the reference bit, if it's currently clear.
670	 */
671	 andcc	%g7, TD_REF, %g0
672	bz,a,pn	%xcc, tl0_immu_miss_set_ref
673	 nop
674
675	/*
676	 * Load the TTE tag and data into the TLB and retry the instruction.
677	 */
678	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
679	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
680	retry
681
682	/*
683	 * Advance to the next TTE in this bucket, and check the low bits
684	 * of the bucket pointer to see if we've finished the bucket.
685	 */
6863:	add	%g4, 1 << TTE_SHIFT, %g4
687	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
688	bnz,pt	%xcc, 2b
689	 EMPTY
690
691	/*
692	 * See if we just checked the largest page size, and advance to the
693	 * next one if not.
694	 */
695	 cmp	%g2, TS_MAX
696	bne,pt	%xcc, 1b
697	 add	%g2, 1, %g2
698
699	/*
700	 * Not in user TSB, call C code.
701	 */
702	ba,a	%xcc, tl0_immu_miss_trap
703	.align	128
704	.endm
705
706ENTRY(tl0_immu_miss_set_ref)
707	/*
708	 * Set the reference bit.
709	 */
710	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
711
712	/*
713	 * May have become invalid during casxa, in which case start over.
714	 */
715	brgez,pn %g2, 1f
716	 nop
717
718	/*
719	 * Load the TTE tag and data into the TLB and retry the instruction.
720	 */
721	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
722	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
7231:	retry
724END(tl0_immu_miss_set_ref)
725
726ENTRY(tl0_immu_miss_trap)
727	/*
728	 * Put back the contents of the tag access register, in case we
729	 * faulted.
730	 */
731	sethi	%hi(KERNBASE), %g2
732	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
733	flush	%g2
734
735	/*
736	 * Switch to alternate globals.
737	 */
738	wrpr	%g0, PSTATE_ALT, %pstate
739
740	/*
741	 * Reload the tag access register.
742	 */
743	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
744
745	/*
746	 * Save the tag access register, and call common trap code.
747	 */
748	tl0_split
749	clr	%o1
750	set	trap, %o2
751	mov	%g2, %o3
752	ba	%xcc, tl0_utrap
753	 mov	T_INSTRUCTION_MISS, %o0
754END(tl0_immu_miss_trap)
755
756	.macro	tl0_dmmu_miss
757	/*
758	 * Load the context and the virtual page number from the tag access
759	 * register.  We ignore the context.
760	 */
761	wr	%g0, ASI_DMMU, %asi
762	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
763
764	/*
765	 * Initialize the page size walker.
766	 */
767tl1_dmmu_miss_user:
768	mov	TS_MIN, %g2
769
770	/*
771	 * Loop over all supported page sizes.
772	 */
773
774	/*
775	 * Compute the page shift for the page size we are currently looking
776	 * for.
777	 */
7781:	add	%g2, %g2, %g3
779	add	%g3, %g2, %g3
780	add	%g3, PAGE_SHIFT, %g3
781
782	/*
783	 * Extract the virtual page number from the contents of the tag
784	 * access register.
785	 */
786	srlx	%g1, %g3, %g3
787
788	/*
789	 * Compute the TTE bucket address.
790	 */
791	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
792	and	%g3, TSB_BUCKET_MASK, %g4
793	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
794	add	%g4, %g5, %g4
795
796	/*
797	 * Compute the TTE tag target.
798	 */
799	sllx	%g3, TV_SIZE_BITS, %g3
800	or	%g3, %g2, %g3
801
802	/*
803	 * Loop over the TTEs in this bucket.
804	 */
805
806	/*
807	 * Load the TTE.  Note that this instruction may fault, clobbering
808	 * the contents of the tag access register, %g5, %g6, and %g7.  We
809	 * do not use %g5, and %g6 and %g7 are not used until this instruction
810	 * completes successfully.
811	 */
8122:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
813
814	/*
815	 * Check that it's valid and that the virtual page numbers match.
816	 */
817	brgez,pn %g7, 3f
818	 cmp	%g3, %g6
819	bne,pn	%xcc, 3f
820	 EMPTY
821
822	/*
823	 * We matched a TTE, load the TLB.
824	 */
825
826	/*
827	 * Set the reference bit, if it's currently clear.
828	 */
829	 andcc	%g7, TD_REF, %g0
830	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
831	 nop
832
833	/*
834	 * Load the TTE tag and data into the TLB and retry the instruction.
835	 */
836	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
837	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
838	retry
839
840	/*
841	 * Advance to the next TTE in this bucket, and check the low bits
842	 * of the bucket pointer to see if we've finished the bucket.
843	 */
8443:	add	%g4, 1 << TTE_SHIFT, %g4
845	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
846	bnz,pt	%xcc, 2b
847	 EMPTY
848
849	/*
850	 * See if we just checked the largest page size, and advance to the
851	 * next one if not.
852	 */
853	 cmp	%g2, TS_MAX
854	bne,pt	%xcc, 1b
855	 add	%g2, 1, %g2
856
857	/*
858	 * Not in user TSB, call C code.
859	 */
860	ba,a	%xcc, tl0_dmmu_miss_trap
861	.align	128
862	.endm
863
864ENTRY(tl0_dmmu_miss_set_ref)
865	/*
866	 * Set the reference bit.
867	 */
868	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
869
870	/*
871	 * May have become invalid during casxa, in which case start over.
872	 */
873	brgez,pn %g2, 1f
874	 nop
875
876	/*
877	 * Load the TTE tag and data into the TLB and retry the instruction.
878	 */
879	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
880	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
8811:	retry
882END(tl0_dmmu_miss_set_ref)
883
884ENTRY(tl0_dmmu_miss_trap)
885	/*
886	 * Put back the contents of the tag access register, in case we
887	 * faulted.
888	 */
889	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
890	membar	#Sync
891
892	/*
893	 * Switch to alternate globals.
894	 */
895	wrpr	%g0, PSTATE_ALT, %pstate
896
897	/*
898	 * Check if we actually came from the kernel.
899	 */
900	rdpr	%tl, %g1
901	cmp	%g1, 1
902	bgt,a,pn %xcc, 1f
903	 nop
904
905	/*
906	 * Reload the tag access register.
907	 */
908	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
909
910	/*
911	 * Save the tag access register and call common trap code.
912	 */
913	tl0_split
914	clr	%o1
915	set	trap, %o2
916	mov	%g2, %o3
917	ba	%xcc, tl0_utrap
918	 mov	T_DATA_MISS, %o0
919
920	/*
921	 * Handle faults during window spill/fill.
922	 */
9231:	RESUME_SPILLFILL_MMU
924
925	/*
926	 * Reload the tag access register.
927	 */
928	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
929
930	tl1_split
931	clr	%o1
932	set	trap, %o2
933	mov	%g2, %o3
934	ba	%xcc, tl1_trap
935	 mov	T_DATA_MISS | T_KERNEL, %o0
936END(tl0_dmmu_miss_trap)
937
938	.macro	tl0_dmmu_prot
939	ba,a	%xcc, tl0_dmmu_prot_1
940	 nop
941	.align	128
942	.endm
943
944ENTRY(tl0_dmmu_prot_1)
945	/*
946	 * Load the context and the virtual page number from the tag access
947	 * register.  We ignore the context.
948	 */
949	wr	%g0, ASI_DMMU, %asi
950	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
951
952	/*
953	 * Initialize the page size walker.
954	 */
955tl1_dmmu_prot_user:
956	mov	TS_MIN, %g2
957
958	/*
959	 * Loop over all supported page sizes.
960	 */
961
962	/*
963	 * Compute the page shift for the page size we are currently looking
964	 * for.
965	 */
9661:	add	%g2, %g2, %g3
967	add	%g3, %g2, %g3
968	add	%g3, PAGE_SHIFT, %g3
969
970	/*
971	 * Extract the virtual page number from the contents of the tag
972	 * access register.
973	 */
974	srlx	%g1, %g3, %g3
975
976	/*
977	 * Compute the TTE bucket address.
978	 */
979	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
980	and	%g3, TSB_BUCKET_MASK, %g4
981	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
982	add	%g4, %g5, %g4
983
984	/*
985	 * Compute the TTE tag target.
986	 */
987	sllx	%g3, TV_SIZE_BITS, %g3
988	or	%g3, %g2, %g3
989
990	/*
991	 * Loop over the TTEs in this bucket.
992	 */
993
994	/*
995	 * Load the TTE.  Note that this instruction may fault, clobbering
996	 * the contents of the tag access register, %g5, %g6, and %g7.  We
997	 * do not use %g5, and %g6 and %g7 are not used until this instruction
998	 * completes successfully.
999	 */
10002:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1001
1002	/*
1003	 * Check that it's valid and writable and that the virtual page
1004	 * numbers match.
1005	 */
1006	brgez,pn %g7, 4f
1007	 andcc	%g7, TD_SW, %g0
1008	bz,pn	%xcc, 4f
1009	 cmp	%g3, %g6
1010	bne,pn	%xcc, 4f
1011	 nop
1012
1013	/*
1014	 * Set the hardware write bit.
1015	 */
1016	TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
1017
1018	/*
1019	 * Delete the old TLB entry and clear the SFSR.
1020	 */
1021	srlx	%g1, PAGE_SHIFT, %g3
1022	sllx	%g3, PAGE_SHIFT, %g3
1023	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1024	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1025	membar	#Sync
1026
1027	/*
1028	 * May have become invalid during casxa, in which case start over.
1029	 */
1030	brgez,pn %g2, 3f
1031	 or	%g2, TD_W, %g2
1032
1033	/*
1034	 * Load the TTE data into the TLB and retry the instruction.
1035	 */
1036	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1037	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
10383:	retry
1039
1040	/*
1041	 * Check the low bits to see if we've finished the bucket.
1042	 */
10434:	add	%g4, 1 << TTE_SHIFT, %g4
1044	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1045	bnz,pt	%xcc, 2b
1046	 EMPTY
1047
1048	/*
1049	 * See if we just checked the largest page size, and advance to the
1050	 * next one if not.
1051	 */
1052	 cmp	%g2, TS_MAX
1053	bne,pt	%xcc, 1b
1054	 add	%g2, 1, %g2
1055
1056	/*
1057	 * Not in user TSB, call C code.
1058	 */
1059	ba,a	%xcc, tl0_dmmu_prot_trap
1060	 nop
1061END(tl0_dmmu_prot_1)
1062
1063ENTRY(tl0_dmmu_prot_trap)
1064	/*
1065	 * Put back the contents of the tag access register, in case we
1066	 * faulted.
1067	 */
1068	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1069	membar	#Sync
1070
1071	/*
1072	 * Switch to alternate globals.
1073	 */
1074	wrpr	%g0, PSTATE_ALT, %pstate
1075
1076	/*
1077	 * Check if we actually came from the kernel.
1078	 */
1079	rdpr	%tl, %g1
1080	cmp	%g1, 1
1081	bgt,a,pn %xcc, 1f
1082	 nop
1083
1084	/*
1085	 * Load the SFAR, SFSR and TAR.
1086	 */
1087	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1088	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1089	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1090	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1091	membar	#Sync
1092
1093	/*
1094	 * Save the MMU registers and call common trap code.
1095	 */
1096	tl0_split
1097	clr	%o1
1098	set	trap, %o2
1099	mov	%g2, %o3
1100	mov	%g3, %o4
1101	mov	%g4, %o5
1102	ba	%xcc, tl0_utrap
1103	 mov	T_DATA_PROTECTION, %o0
1104
1105	/*
1106	 * Handle faults during window spill/fill.
1107	 */
11081:	RESUME_SPILLFILL_MMU_CLR_SFSR
1109
1110	/*
1111	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1112	 */
1113	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1114	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1115	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1116	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1117	membar	#Sync
1118
1119	tl1_split
1120	clr	%o1
1121	set	trap, %o2
1122	mov	%g2, %o3
1123	mov	%g3, %o4
1124	mov	%g4, %o5
1125	ba	%xcc, tl1_trap
1126	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1127END(tl0_dmmu_prot_trap)
1128
1129	.macro	tl0_spill_0_n
1130	wr	%g0, ASI_AIUP, %asi
1131	SPILL(stxa, %sp + SPOFF, 8, %asi)
1132	saved
1133	retry
1134	.align	32
1135	RSF_TRAP(T_SPILL)
1136	RSF_TRAP(T_SPILL)
1137	.endm
1138
1139	.macro	tl0_spill_1_n
1140	wr	%g0, ASI_AIUP, %asi
1141	SPILL(stwa, %sp, 4, %asi)
1142	saved
1143	retry
1144	.align	32
1145	RSF_TRAP(T_SPILL)
1146	RSF_TRAP(T_SPILL)
1147	.endm
1148
1149	.macro	tl0_fill_0_n
1150	wr	%g0, ASI_AIUP, %asi
1151	FILL(ldxa, %sp + SPOFF, 8, %asi)
1152	restored
1153	retry
1154	.align	32
1155	RSF_TRAP(T_FILL)
1156	RSF_TRAP(T_FILL)
1157	.endm
1158
1159	.macro	tl0_fill_1_n
1160	wr	%g0, ASI_AIUP, %asi
1161	FILL(lduwa, %sp, 4, %asi)
1162	restored
1163	retry
1164	.align	32
1165	RSF_TRAP(T_FILL)
1166	RSF_TRAP(T_FILL)
1167	.endm
1168
1169ENTRY(tl0_sftrap)
1170	rdpr	%tstate, %g1
1171	and	%g1, TSTATE_CWP_MASK, %g1
1172	wrpr	%g1, 0, %cwp
1173	tl0_split
1174	clr	%o1
1175	set	trap, %o2
1176	ba	%xcc, tl0_trap
1177	 mov	%g2, %o0
1178END(tl0_sftrap)
1179
1180	.macro	tl0_spill_bad	count
1181	.rept	\count
1182	sir
1183	.align	128
1184	.endr
1185	.endm
1186
1187	.macro	tl0_fill_bad	count
1188	.rept	\count
1189	sir
1190	.align	128
1191	.endr
1192	.endm
1193
1194	.macro	tl0_syscall
1195	tl0_split
1196	clr	%o1
1197	set	syscall, %o2
1198	ba	%xcc, tl0_trap
1199	 mov	T_SYSCALL, %o0
1200	.align	32
1201	.endm
1202
1203	.macro	tl0_fp_restore
1204	ba,a	%xcc, tl0_fp_restore
1205	 nop
1206	.align	32
1207	.endm
1208
1209ENTRY(tl0_fp_restore)
1210	ldx	[PCB_REG + PCB_FLAGS], %g1
1211	andn	%g1, PCB_FEF, %g1
1212	stx	%g1, [PCB_REG + PCB_FLAGS]
1213
1214	wr	%g0, FPRS_FEF, %fprs
1215	wr	%g0, ASI_BLK_S, %asi
1216	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1217	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1218	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1219	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1220	membar	#Sync
1221	done
1222END(tl0_fp_restore)
1223
1224	.macro	tl1_insn_excptn
1225	wrpr	%g0, PSTATE_ALT, %pstate
1226	wr	%g0, ASI_IMMU, %asi
1227	rdpr	%tpc, %g3
1228	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1229	/*
1230	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1231	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
1232	 * this triggers a RED state exception though.
1233	 */
1234	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
1235	membar	#Sync
1236	ba	%xcc, tl1_insn_exceptn_trap
1237	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1238	.align	32
1239	.endm
1240
1241ENTRY(tl1_insn_exceptn_trap)
1242	tl1_split
1243	clr	%o1
1244	set	trap, %o2
1245	mov	%g3, %o4
1246	mov	%g4, %o5
1247	ba	%xcc, tl1_trap
1248	 mov	%g2, %o0
1249END(tl1_insn_exceptn_trap)
1250
1251	.macro	tl1_fp_disabled
1252	ba,a	%xcc, tl1_fp_disabled_1
1253	 nop
1254	.align	32
1255	.endm
1256
1257ENTRY(tl1_fp_disabled_1)
1258	rdpr	%tpc, %g1
1259	set	fpu_fault_begin, %g2
1260	sub	%g1, %g2, %g1
1261	cmp	%g1, fpu_fault_size
1262	bgeu,a,pn %xcc, 1f
1263	 nop
1264
1265	wr	%g0, FPRS_FEF, %fprs
1266	wr	%g0, ASI_BLK_S, %asi
1267	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1268	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1269	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1270	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1271	membar	#Sync
1272	retry
1273
12741:	tl1_split
1275	clr	%o1
1276	set	trap, %o2
1277	ba	%xcc, tl1_trap
1278	 mov	T_FP_DISABLED | T_KERNEL, %o0
1279END(tl1_fp_disabled_1)
1280
1281	.macro	tl1_data_excptn
1282	wrpr	%g0, PSTATE_ALT, %pstate
1283	ba,a	%xcc, tl1_data_excptn_trap
1284	 nop
1285	.align	32
1286	.endm
1287
1288ENTRY(tl1_data_excptn_trap)
1289	RESUME_SPILLFILL_MMU_CLR_SFSR
1290	ba	%xcc, tl1_sfsr_trap
1291	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
1292END(tl1_data_excptn_trap)
1293
1294	.macro	tl1_align
1295	wrpr	%g0, PSTATE_ALT, %pstate
1296	ba,a	%xcc, tl1_align_trap
1297	 nop
1298	.align	32
1299	.endm
1300
1301ENTRY(tl1_align_trap)
1302	RESUME_SPILLFILL_ALIGN
1303	ba	%xcc, tl1_sfsr_trap
1304	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1305END(tl1_align_trap)
1306
1307ENTRY(tl1_sfsr_trap)
1308	wr	%g0, ASI_DMMU, %asi
1309	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1310	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1311	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1312	membar	#Sync
1313
1314	tl1_split
1315	clr	%o1
1316	set	trap, %o2
1317	mov	%g3, %o4
1318	mov	%g4, %o5
1319	ba	%xcc, tl1_trap
1320	 mov	%g2, %o0
1321END(tl1_sfsr_trap)
1322
1323	.macro	tl1_intr level, mask
1324	tl1_split
1325	set	\mask, %o1
1326	ba	%xcc, tl1_intr
1327	 mov	\level, %o0
1328	.align	32
1329	.endm
1330
1331	.macro	tl1_intr_level
1332	INTR_LEVEL(1)
1333	.endm
1334
1335	.macro	tl1_immu_miss
1336	/*
1337	 * Load the context and the virtual page number from the tag access
1338	 * register.  We ignore the context.
1339	 */
1340	wr	%g0, ASI_IMMU, %asi
1341	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
1342
1343	/*
1344	 * Compute the address of the TTE.  The TSB mask and address of the
1345	 * TSB are patched at startup.
1346	 */
1347	.globl	tl1_immu_miss_patch_tsb_1
1348tl1_immu_miss_patch_tsb_1:
1349	sethi	%uhi(TSB_KERNEL), %g6
1350	or	%g6, %ulo(TSB_KERNEL), %g6
1351	sllx	%g6, 32, %g6
1352	sethi	%hi(TSB_KERNEL), %g7
1353	or	%g7, %g6, %g7
1354	.globl	tl1_immu_miss_patch_tsb_mask_1
1355tl1_immu_miss_patch_tsb_mask_1:
1356	sethi	%hi(TSB_KERNEL_MASK), %g6
1357	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1358
1359	srlx	%g5, TAR_VPN_SHIFT, %g5
1360	and	%g5, %g6, %g6
1361	sllx	%g6, TTE_SHIFT, %g6
1362	add	%g6, %g7, %g6
1363
1364	/*
1365	 * Load the TTE.
1366	 */
1367	.globl	tl1_immu_miss_patch_quad_ldd_1
1368tl1_immu_miss_patch_quad_ldd_1:
1369	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1370
1371	/*
1372	 * Check that it's valid and executable and that the virtual page
1373	 * numbers match.
1374	 */
1375	brgez,pn %g7, tl1_immu_miss_trap
1376	 andcc	%g7, TD_EXEC, %g0
1377	bz,pn	%xcc, tl1_immu_miss_trap
1378	 srlx	%g6, TV_SIZE_BITS, %g6
1379	cmp	%g5, %g6
1380	bne,pn	%xcc, tl1_immu_miss_trap
1381	 EMPTY
1382
1383	/*
1384	 * Set the reference bit if it's currently clear.
1385	 */
1386	 andcc	%g7, TD_REF, %g0
1387	bz,a,pn	%xcc, tl1_immu_miss_set_ref
1388	 nop
1389
1390	/*
1391	 * Load the TTE data into the TLB and retry the instruction.
1392	 */
1393	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1394	retry
1395	.align	128
1396	.endm
1397
1398ENTRY(tl1_immu_miss_set_ref)
1399	/*
1400	 * Recompute the TTE address, which we clobbered loading the TTE.
1401	 * The TSB mask and address of the TSB are patched at startup.
1402	 */
1403	.globl	tl1_immu_miss_patch_tsb_2
1404tl1_immu_miss_patch_tsb_2:
1405	sethi	%uhi(TSB_KERNEL), %g6
1406	or	%g6, %ulo(TSB_KERNEL), %g6
1407	sllx	%g6, 32, %g6
1408	sethi	%hi(TSB_KERNEL), %g7
1409	or	%g7, %g6, %g7
1410	.globl	tl1_immu_miss_patch_tsb_mask_2
1411tl1_immu_miss_patch_tsb_mask_2:
1412	sethi	%hi(TSB_KERNEL_MASK), %g6
1413	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1414
1415	and	%g5, %g6, %g5
1416	sllx	%g5, TTE_SHIFT, %g5
1417	add	%g5, %g7, %g5
1418
1419	/*
1420	 * Set the reference bit.
1421	 */
1422	.globl	tl1_immu_miss_patch_asi_1
1423tl1_immu_miss_patch_asi_1:
1424	wr	%g0, TSB_ASI, %asi
1425	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1426
1427	/*
1428	 * May have become invalid during casxa, in which case start over.
1429	 */
1430	brgez,pn %g6, 1f
1431	 nop
1432
1433	/*
1434	 * Load the TTE data into the TLB and retry the instruction.
1435	 */
1436	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
14371:	retry
1438END(tl1_immu_miss_set_ref)
1439
1440ENTRY(tl1_immu_miss_trap)
1441	/*
1442	 * Switch to alternate globals.
1443	 */
1444	wrpr	%g0, PSTATE_ALT, %pstate
1445
1446	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
1447
1448	tl1_split
1449	clr	%o1
1450	set	trap, %o2
1451	mov	%g2, %o3
1452	ba	%xcc, tl1_trap
1453	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
1454END(tl1_immu_miss_trap)
1455
1456	.macro	tl1_dmmu_miss
1457	/*
1458	 * Load the context and the virtual page number from the tag access
1459	 * register.
1460	 */
1461	wr	%g0, ASI_DMMU, %asi
1462	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
1463
1464	/*
1465	 * Extract the context from the contents of the tag access register.
1466	 * If it's non-zero this is a fault on a user address.  Note that the
1467	 * faulting address is passed in %g1.
1468	 */
1469	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1470	brnz,a,pn %g6, tl1_dmmu_miss_user
1471	 mov	%g5, %g1
1472
1473	/*
1474	 * Check for the direct mapped physical region.  These addresses have
1475	 * the high bit set so they are negative.
1476	 */
1477	brlz,pn %g5, tl1_dmmu_miss_direct
1478	 EMPTY
1479
1480	/*
1481	 * Compute the address of the TTE.  The TSB mask and address of the
1482	 * TSB are patched at startup.
1483	 */
1484	.globl	tl1_dmmu_miss_patch_tsb_1
1485tl1_dmmu_miss_patch_tsb_1:
1486	sethi	%uhi(TSB_KERNEL), %g6
1487	or	%g6, %ulo(TSB_KERNEL), %g6
1488	sllx	%g6, 32, %g6
1489	sethi	%hi(TSB_KERNEL), %g7
1490	or	%g7, %g6, %g7
1491	.globl	tl1_dmmu_miss_patch_tsb_mask_1
1492tl1_dmmu_miss_patch_tsb_mask_1:
1493	sethi	%hi(TSB_KERNEL_MASK), %g6
1494	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1495
1496	srlx	%g5, TAR_VPN_SHIFT, %g5
1497	and	%g5, %g6, %g6
1498	sllx	%g6, TTE_SHIFT, %g6
1499	add	%g6, %g7, %g6
1500
1501	/*
1502	 * Load the TTE.
1503	 */
1504	.globl	tl1_dmmu_miss_patch_quad_ldd_1
1505tl1_dmmu_miss_patch_quad_ldd_1:
1506	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1507
1508	/*
1509	 * Check that it's valid and that the virtual page numbers match.
1510	 */
1511	brgez,pn %g7, tl1_dmmu_miss_trap
1512	 srlx	%g6, TV_SIZE_BITS, %g6
1513	cmp	%g5, %g6
1514	bne,pn %xcc, tl1_dmmu_miss_trap
1515	 EMPTY
1516
1517	/*
1518	 * Set the reference bit if it's currently clear.
1519	 */
1520	 andcc	%g7, TD_REF, %g0
1521	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
1522	 nop
1523
1524	/*
1525	 * Load the TTE data into the TLB and retry the instruction.
1526	 */
1527	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1528	retry
1529	.align	128
1530	.endm
1531
1532ENTRY(tl1_dmmu_miss_set_ref)
1533	/*
1534	 * Recompute the TTE address, which we clobbered loading the TTE.
1535	 * The TSB mask and address of the TSB are patched at startup.
1536	 */
1537	.globl	tl1_dmmu_miss_patch_tsb_mask_2
1538tl1_dmmu_miss_patch_tsb_2:
1539	sethi	%uhi(TSB_KERNEL), %g6
1540	or	%g6, %ulo(TSB_KERNEL), %g6
1541	sllx	%g6, 32, %g6
1542	sethi	%hi(TSB_KERNEL), %g7
1543	or	%g7, %g6, %g7
1544	.globl	tl1_dmmu_miss_patch_tsb_2
1545tl1_dmmu_miss_patch_tsb_mask_2:
1546	sethi	%hi(TSB_KERNEL_MASK), %g6
1547	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1548
1549	and	%g5, %g6, %g5
1550	sllx	%g5, TTE_SHIFT, %g5
1551	add	%g5, %g7, %g5
1552
1553	/*
1554	 * Set the reference bit.
1555	 */
1556	.globl	tl1_dmmu_miss_patch_asi_1
1557tl1_dmmu_miss_patch_asi_1:
1558	wr	%g0, TSB_ASI, %asi
1559	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1560
1561	/*
1562	 * May have become invalid during casxa, in which case start over.
1563	 */
1564	brgez,pn %g6, 1f
1565	 nop
1566
1567	/*
1568	 * Load the TTE data into the TLB and retry the instruction.
1569	 */
1570	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
15711:	retry
1572END(tl1_dmmu_miss_set_ref)
1573
1574ENTRY(tl1_dmmu_miss_trap)
1575	/*
1576	 * Switch to alternate globals.
1577	 */
1578	wrpr	%g0, PSTATE_ALT, %pstate
1579
1580	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1581
1582	KSTACK_CHECK
1583
1584	tl1_split
1585	clr	%o1
1586	set	trap, %o2
1587	mov	%g2, %o3
1588	ba	%xcc, tl1_trap
1589	 mov	T_DATA_MISS | T_KERNEL, %o0
1590END(tl1_dmmu_miss_trap)
1591
1592ENTRY(tl1_dmmu_miss_direct)
1593	/*
1594	 * Mask off the high bits of the virtual address to get the physical
1595	 * address, and or in the TTE bits.  The virtual address bits that
1596	 * correspond to the TTE valid and page size bits are left set, so
1597	 * they don't have to be included in the TTE bits below.  We know they
1598	 * are set because the virtual address is in the upper va hole.
1599	 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1600	 * and we get a miss on the directly accessed kernel TSB we must not
1601	 * set TD_CV in order to access it uniformly bypassing the D$.
1602	 */
1603	setx	TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1604	and	%g5, %g4, %g4
1605	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1606	and	%g5, %g6, %g5
1607	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_1
1608tl1_dmmu_miss_direct_patch_tsb_phys_1:
1609	sethi	%uhi(TSB_KERNEL_PHYS), %g3
1610	or	%g3, %ulo(TSB_KERNEL_PHYS), %g3
1611	sllx	%g3, 32, %g3
1612	sethi	%hi(TSB_KERNEL_PHYS), %g3
1613	or	%g7, %g3, %g7
1614	cmp	%g4, %g7
1615	bl,pt	%xcc, 1f
1616	 or	%g5, TD_CP | TD_W, %g5
1617	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1618tl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1619	sethi	%uhi(TSB_KERNEL_PHYS_END), %g3
1620	or	%g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1621	sllx	%g3, 32, %g3
1622	sethi	%hi(TSB_KERNEL_PHYS_END), %g7
1623	or	%g7, %g3, %g7
1624	cmp	%g4, %g7
1625	bg,a,pt	%xcc, 1f
1626	 nop
1627	ba,pt	%xcc, 2f
1628	 nop
16291:	or	%g5, TD_CV, %g5
1630
1631	/*
1632	 * Load the TTE data into the TLB and retry the instruction.
1633	 */
16342:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1635	retry
1636END(tl1_dmmu_miss_direct)
1637
1638	.macro	tl1_dmmu_prot
1639	ba,a	%xcc, tl1_dmmu_prot_1
1640	 nop
1641	.align	128
1642	.endm
1643
1644ENTRY(tl1_dmmu_prot_1)
1645	/*
1646	 * Load the context and the virtual page number from the tag access
1647	 * register.
1648	 */
1649	wr	%g0, ASI_DMMU, %asi
1650	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
1651
1652	/*
1653	 * Extract the context from the contents of the tag access register.
1654	 * If it's non-zero this is a fault on a user address.  Note that the
1655	 * faulting address is passed in %g1.
1656	 */
1657	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1658	brnz,a,pn %g6, tl1_dmmu_prot_user
1659	 mov	%g5, %g1
1660
1661	/*
1662	 * Compute the address of the TTE.  The TSB mask and address of the
1663	 * TSB are patched at startup.
1664	 */
1665	.globl	tl1_dmmu_prot_patch_tsb_1
1666tl1_dmmu_prot_patch_tsb_1:
1667	sethi	%uhi(TSB_KERNEL), %g6
1668	or	%g6, %ulo(TSB_KERNEL), %g6
1669	sllx	%g6, 32, %g6
1670	sethi	%hi(TSB_KERNEL), %g7
1671	or	%g7, %g6, %g7
1672	.globl	tl1_dmmu_prot_patch_tsb_mask_1
1673tl1_dmmu_prot_patch_tsb_mask_1:
1674	sethi	%hi(TSB_KERNEL_MASK), %g6
1675	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1676
1677	srlx	%g5, TAR_VPN_SHIFT, %g5
1678	and	%g5, %g6, %g6
1679	sllx	%g6, TTE_SHIFT, %g6
1680	add	%g6, %g7, %g6
1681
1682	/*
1683	 * Load the TTE.
1684	 */
1685	.globl	tl1_dmmu_prot_patch_quad_ldd_1
1686tl1_dmmu_prot_patch_quad_ldd_1:
1687	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1688
1689	/*
1690	 * Check that it's valid and writeable and that the virtual page
1691	 * numbers match.
1692	 */
1693	brgez,pn %g7, tl1_dmmu_prot_trap
1694	 andcc	%g7, TD_SW, %g0
1695	bz,pn	%xcc, tl1_dmmu_prot_trap
1696	 srlx	%g6, TV_SIZE_BITS, %g6
1697	cmp	%g5, %g6
1698	bne,pn	%xcc, tl1_dmmu_prot_trap
1699	 EMPTY
1700
1701	/*
1702	 * Delete the old TLB entry and clear the SFSR.
1703	 */
1704	 sllx	%g5, TAR_VPN_SHIFT, %g6
1705	or	%g6, TLB_DEMAP_NUCLEUS, %g6
1706	stxa	%g0, [%g6] ASI_DMMU_DEMAP
1707	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1708	membar	#Sync
1709
1710	/*
1711	 * Recompute the TTE address, which we clobbered loading the TTE.
1712	 * The TSB mask and address of the TSB are patched at startup.
1713	 */
1714	.globl	tl1_dmmu_prot_patch_tsb_2
1715tl1_dmmu_prot_patch_tsb_2:
1716	sethi	%uhi(TSB_KERNEL), %g6
1717	or	%g6, %ulo(TSB_KERNEL), %g6
1718	sllx	%g6, 32, %g6
1719	sethi	%hi(TSB_KERNEL), %g7
1720	or	%g7, %g6, %g7
1721	.globl	tl1_dmmu_prot_patch_tsb_mask_2
1722tl1_dmmu_prot_patch_tsb_mask_2:
1723	sethi	%hi(TSB_KERNEL_MASK), %g6
1724	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1725	and	%g5, %g6, %g5
1726	sllx	%g5, TTE_SHIFT, %g5
1727	add	%g5, %g7, %g5
1728
1729	/*
1730	 * Set the hardware write bit.
1731	 */
1732	.globl	tl1_dmmu_prot_patch_asi_1
1733tl1_dmmu_prot_patch_asi_1:
1734	wr	%g0, TSB_ASI, %asi
1735	TTE_SET_W(%g5, %g6, %g7, a, %asi)
1736
1737	/*
1738	 * May have become invalid during casxa, in which case start over.
1739	 */
1740	brgez,pn %g6, 1f
1741	 or	%g6, TD_W, %g6
1742
1743	/*
1744	 * Load the TTE data into the TLB and retry the instruction.
1745	 */
1746	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
17471:	retry
1748END(tl1_dmmu_prot_1)
1749
1750ENTRY(tl1_dmmu_prot_trap)
1751	/*
1752	 * Switch to alternate globals.
1753	 */
1754	wrpr	%g0, PSTATE_ALT, %pstate
1755
1756	/*
1757	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1758	 */
1759	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1760	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1761	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1762	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1763	membar	#Sync
1764
1765	tl1_split
1766	clr	%o1
1767	set	trap, %o2
1768	mov	%g2, %o3
1769	mov	%g3, %o4
1770	mov	%g4, %o5
1771	ba	%xcc, tl1_trap
1772	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1773END(tl1_dmmu_prot_trap)
1774
1775	.macro	tl1_spill_0_n
1776	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1777	saved
1778	retry
1779	.align	32
1780	RSF_FATAL(T_SPILL)
1781	RSF_FATAL(T_SPILL)
1782	.endm
1783
1784	.macro	tl1_spill_2_n
1785	wr	%g0, ASI_AIUP, %asi
1786	SPILL(stxa, %sp + SPOFF, 8, %asi)
1787	saved
1788	retry
1789	.align	32
1790	RSF_SPILL_TOPCB
1791	RSF_SPILL_TOPCB
1792	.endm
1793
1794	.macro	tl1_spill_3_n
1795	wr	%g0, ASI_AIUP, %asi
1796	SPILL(stwa, %sp, 4, %asi)
1797	saved
1798	retry
1799	.align	32
1800	RSF_SPILL_TOPCB
1801	RSF_SPILL_TOPCB
1802	.endm
1803
1804	.macro	tl1_spill_7_n
1805	btst	1, %sp
1806	bnz,a,pn %xcc, tl1_spill_0_n
1807	 nop
1808	srl	%sp, 0, %sp
1809	SPILL(stw, %sp, 4, EMPTY)
1810	saved
1811	retry
1812	.align	32
1813	RSF_FATAL(T_SPILL)
1814	RSF_FATAL(T_SPILL)
1815	.endm
1816
1817	.macro	tl1_spill_0_o
1818	wr	%g0, ASI_AIUP, %asi
1819	SPILL(stxa, %sp + SPOFF, 8, %asi)
1820	saved
1821	retry
1822	.align	32
1823	RSF_SPILL_TOPCB
1824	RSF_SPILL_TOPCB
1825	.endm
1826
1827	.macro	tl1_spill_1_o
1828	wr	%g0, ASI_AIUP, %asi
1829	SPILL(stwa, %sp, 4, %asi)
1830	saved
1831	retry
1832	.align	32
1833	RSF_SPILL_TOPCB
1834	RSF_SPILL_TOPCB
1835	.endm
1836
1837	.macro	tl1_spill_2_o
1838	RSF_SPILL_TOPCB
1839	.align	128
1840	.endm
1841
1842	.macro	tl1_fill_0_n
1843	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1844	restored
1845	retry
1846	.align	32
1847	RSF_FATAL(T_FILL)
1848	RSF_FATAL(T_FILL)
1849	.endm
1850
1851	.macro	tl1_fill_2_n
1852	wr	%g0, ASI_AIUP, %asi
1853	FILL(ldxa, %sp + SPOFF, 8, %asi)
1854	restored
1855	retry
1856	.align 32
1857	RSF_FILL_MAGIC
1858	RSF_FILL_MAGIC
1859	.endm
1860
1861	.macro	tl1_fill_3_n
1862	wr	%g0, ASI_AIUP, %asi
1863	FILL(lduwa, %sp, 4, %asi)
1864	restored
1865	retry
1866	.align 32
1867	RSF_FILL_MAGIC
1868	RSF_FILL_MAGIC
1869	.endm
1870
1871	.macro	tl1_fill_7_n
1872	btst	1, %sp
1873	bnz,a,pt %xcc, tl1_fill_0_n
1874	 nop
1875	srl	%sp, 0, %sp
1876	FILL(lduw, %sp, 4, EMPTY)
1877	restored
1878	retry
1879	.align	32
1880	RSF_FATAL(T_FILL)
1881	RSF_FATAL(T_FILL)
1882	.endm
1883
1884/*
1885 * This is used to spill windows that are still occupied with user
1886 * data on kernel entry to the pcb.
1887 */
1888ENTRY(tl1_spill_topcb)
1889	wrpr	%g0, PSTATE_ALT, %pstate
1890
1891	/* Free some globals for our use. */
1892	dec	24, ASP_REG
1893	stx	%g1, [ASP_REG + 0]
1894	stx	%g2, [ASP_REG + 8]
1895	stx	%g3, [ASP_REG + 16]
1896
1897	ldx	[PCB_REG + PCB_NSAVED], %g1
1898
1899	sllx	%g1, PTR_SHIFT, %g2
1900	add	%g2, PCB_REG, %g2
1901	stx	%sp, [%g2 + PCB_RWSP]
1902
1903	sllx	%g1, RW_SHIFT, %g2
1904	add	%g2, PCB_REG, %g2
1905	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1906
1907	inc	%g1
1908	stx	%g1, [PCB_REG + PCB_NSAVED]
1909
1910#if KTR_COMPILE & KTR_TRAP
1911	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1912	   , %g1, %g2, %g3, 7, 8, 9)
1913	rdpr	%tpc, %g2
1914	stx	%g2, [%g1 + KTR_PARM1]
1915	rdpr	%tnpc, %g2
1916	stx	%g2, [%g1 + KTR_PARM2]
1917	stx	%sp, [%g1 + KTR_PARM3]
1918	ldx	[PCB_REG + PCB_NSAVED], %g2
1919	stx	%g2, [%g1 + KTR_PARM4]
19209:
1921#endif
1922
1923	saved
1924
1925	ldx	[ASP_REG + 16], %g3
1926	ldx	[ASP_REG + 8], %g2
1927	ldx	[ASP_REG + 0], %g1
1928	inc	24, ASP_REG
1929	retry
1930END(tl1_spill_topcb)
1931
1932	.macro	tl1_spill_bad	count
1933	.rept	\count
1934	sir
1935	.align	128
1936	.endr
1937	.endm
1938
1939	.macro	tl1_fill_bad	count
1940	.rept	\count
1941	sir
1942	.align	128
1943	.endr
1944	.endm
1945
1946	.macro	tl1_soft	count
1947	.rept	\count
1948	tl1_gen	T_SOFT | T_KERNEL
1949	.endr
1950	.endm
1951
1952	.sect	.trap
1953	.globl	tl_trap_begin
1954tl_trap_begin:
1955	nop
1956
1957	.align	0x8000
1958	.globl	tl0_base
1959
1960tl0_base:
1961	tl0_reserved	8				! 0x0-0x7
1962tl0_insn_excptn:
1963	tl0_insn_excptn					! 0x8
1964	tl0_reserved	1				! 0x9
1965tl0_insn_error:
1966	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
1967	tl0_reserved	5				! 0xb-0xf
1968tl0_insn_illegal:
1969	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
1970tl0_priv_opcode:
1971	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
1972	tl0_reserved	14				! 0x12-0x1f
1973tl0_fp_disabled:
1974	tl0_gen		T_FP_DISABLED			! 0x20
1975tl0_fp_ieee:
1976	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
1977tl0_fp_other:
1978	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
1979tl0_tag_ovflw:
1980	tl0_gen		T_TAG_OVERFLOW			! 0x23
1981tl0_clean_window:
1982	clean_window					! 0x24
1983tl0_divide:
1984	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
1985	tl0_reserved	7				! 0x29-0x2f
1986tl0_data_excptn:
1987	tl0_data_excptn					! 0x30
1988	tl0_reserved	1				! 0x31
1989tl0_data_error:
1990	tl0_gen		T_DATA_ERROR			! 0x32
1991	tl0_reserved	1				! 0x33
1992tl0_align:
1993	tl0_align					! 0x34
1994tl0_align_lddf:
1995	tl0_gen		T_RESERVED			! 0x35
1996tl0_align_stdf:
1997	tl0_gen		T_RESERVED			! 0x36
1998tl0_priv_action:
1999	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
2000	tl0_reserved	9				! 0x38-0x40
2001tl0_intr_level:
2002	tl0_intr_level					! 0x41-0x4f
2003	tl0_reserved	16				! 0x50-0x5f
2004tl0_intr_vector:
2005	intr_vector					! 0x60
2006tl0_watch_phys:
2007	tl0_gen		T_PA_WATCHPOINT			! 0x61
2008tl0_watch_virt:
2009	tl0_gen		T_VA_WATCHPOINT			! 0x62
2010tl0_ecc:
2011	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
2012tl0_immu_miss:
2013	tl0_immu_miss					! 0x64
2014tl0_dmmu_miss:
2015	tl0_dmmu_miss					! 0x68
2016tl0_dmmu_prot:
2017	tl0_dmmu_prot					! 0x6c
2018	tl0_reserved	16				! 0x70-0x7f
2019tl0_spill_0_n:
2020	tl0_spill_0_n					! 0x80
2021tl0_spill_1_n:
2022	tl0_spill_1_n					! 0x84
2023	tl0_spill_bad	14				! 0x88-0xbf
2024tl0_fill_0_n:
2025	tl0_fill_0_n					! 0xc0
2026tl0_fill_1_n:
2027	tl0_fill_1_n					! 0xc4
2028	tl0_fill_bad	14				! 0xc8-0xff
2029tl0_soft:
2030	tl0_gen		T_SYSCALL			! 0x100
2031	tl0_gen		T_BREAKPOINT			! 0x101
2032	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
2033	tl0_reserved	1				! 0x103
2034	tl0_gen		T_CLEAN_WINDOW			! 0x104
2035	tl0_gen		T_RANGE_CHECK			! 0x105
2036	tl0_gen		T_FIX_ALIGNMENT			! 0x106
2037	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
2038	tl0_gen		T_SYSCALL			! 0x108
2039	tl0_gen		T_SYSCALL			! 0x109
2040	tl0_fp_restore					! 0x10a
2041	tl0_reserved	5				! 0x10b-0x10f
2042	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
2043	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
2044	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
2045	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
2046	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
2047	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
2048	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
2049	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
2050	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
2051	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
2052	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
2053	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
2054	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
2055	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
2056	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
2057	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
2058	tl0_reserved	32				! 0x120-0x13f
2059	tl0_gen		T_SYSCALL			! 0x140
2060	tl0_syscall					! 0x141
2061	tl0_gen		T_SYSCALL			! 0x142
2062	tl0_gen		T_SYSCALL			! 0x143
2063	tl0_reserved	188				! 0x144-0x1ff
2064
2065tl1_base:
2066	tl1_reserved	8				! 0x200-0x207
2067tl1_insn_excptn:
2068	tl1_insn_excptn					! 0x208
2069	tl1_reserved	1				! 0x209
2070tl1_insn_error:
2071	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
2072	tl1_reserved	5				! 0x20b-0x20f
2073tl1_insn_illegal:
2074	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
2075tl1_priv_opcode:
2076	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
2077	tl1_reserved	14				! 0x212-0x21f
2078tl1_fp_disabled:
2079	tl1_fp_disabled					! 0x220
2080tl1_fp_ieee:
2081	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
2082tl1_fp_other:
2083	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
2084tl1_tag_ovflw:
2085	tl1_gen		T_TAG_OVERFLOW			! 0x223
2086tl1_clean_window:
2087	clean_window					! 0x224
2088tl1_divide:
2089	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
2090	tl1_reserved	7				! 0x229-0x22f
2091tl1_data_excptn:
2092	tl1_data_excptn					! 0x230
2093	tl1_reserved	1				! 0x231
2094tl1_data_error:
2095	tl1_gen		T_DATA_ERROR			! 0x232
2096	tl1_reserved	1				! 0x233
2097tl1_align:
2098	tl1_align					! 0x234
2099tl1_align_lddf:
2100	tl1_gen		T_RESERVED			! 0x235
2101tl1_align_stdf:
2102	tl1_gen		T_RESERVED			! 0x236
2103tl1_priv_action:
2104	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
2105	tl1_reserved	9				! 0x238-0x240
2106tl1_intr_level:
2107	tl1_intr_level					! 0x241-0x24f
2108	tl1_reserved	16				! 0x250-0x25f
2109tl1_intr_vector:
2110	intr_vector					! 0x260
2111tl1_watch_phys:
2112	tl1_gen		T_PA_WATCHPOINT			! 0x261
2113tl1_watch_virt:
2114	tl1_gen		T_VA_WATCHPOINT			! 0x262
2115tl1_ecc:
2116	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
2117tl1_immu_miss:
2118	tl1_immu_miss					! 0x264
2119tl1_dmmu_miss:
2120	tl1_dmmu_miss					! 0x268
2121tl1_dmmu_prot:
2122	tl1_dmmu_prot					! 0x26c
2123	tl1_reserved	16				! 0x270-0x27f
2124tl1_spill_0_n:
2125	tl1_spill_0_n					! 0x280
2126	tl1_spill_bad	1				! 0x284
2127tl1_spill_2_n:
2128	tl1_spill_2_n					! 0x288
2129tl1_spill_3_n:
2130	tl1_spill_3_n					! 0x28c
2131	tl1_spill_bad	3				! 0x290-0x29b
2132tl1_spill_7_n:
2133	tl1_spill_7_n					! 0x29c
2134tl1_spill_0_o:
2135	tl1_spill_0_o					! 0x2a0
2136tl1_spill_1_o:
2137	tl1_spill_1_o					! 0x2a4
2138tl1_spill_2_o:
2139	tl1_spill_2_o					! 0x2a8
2140	tl1_spill_bad	5				! 0x2ac-0x2bf
2141tl1_fill_0_n:
2142	tl1_fill_0_n					! 0x2c0
2143	tl1_fill_bad	1				! 0x2c4
2144tl1_fill_2_n:
2145	tl1_fill_2_n					! 0x2c8
2146tl1_fill_3_n:
2147	tl1_fill_3_n					! 0x2cc
2148	tl1_fill_bad	3				! 0x2d0-0x2db
2149tl1_fill_7_n:
2150	tl1_fill_7_n					! 0x2dc
2151	tl1_fill_bad	8				! 0x2e0-0x2ff
2152	tl1_reserved	1				! 0x300
2153tl1_breakpoint:
2154	tl1_gen		T_BREAKPOINT			! 0x301
2155	tl1_gen		T_RSTRWP_PHYS			! 0x302
2156	tl1_gen		T_RSTRWP_VIRT			! 0x303
2157	tl1_reserved	252				! 0x304-0x3ff
2158
2159	.globl	tl_trap_end
2160tl_trap_end:
2161	nop
2162
2163/*
2164 * User trap entry point
2165 *
2166 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2167 *     u_long sfsr)
2168 *
2169 * This handles redirecting a trap back to usermode as a user trap.  The user
2170 * program must have first registered a trap handler with the kernel using
2171 * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2172 * for it to return to the trapping code directly, it will not return through
2173 * the kernel.  The trap type is passed in %o0, all out registers must be
2174 * passed through to tl0_trap or to usermode untouched.  Note that the
2175 * parameters passed in out registers may be used by the user trap handler.
2176 * Do not change the registers they are passed in or you will break the ABI.
2177 *
2178 * If the trap type allows user traps, setup state to execute the user trap
2179 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2180 */
2181ENTRY(tl0_utrap)
2182	/*
2183	 * Check if the trap type allows user traps.
2184	 */
2185	cmp	%o0, UT_MAX
2186	bge,a,pt %xcc, tl0_trap
2187	 nop
2188
2189	/*
2190	 * Load the user trap handler from the utrap table.
2191	 */
2192	ldx	[PCPU(CURTHREAD)], %l0
2193	ldx	[%l0 + TD_PROC], %l0
2194	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2195	brz,pt	%l0, tl0_trap
2196	 sllx	%o0, PTR_SHIFT, %l1
2197	ldx	[%l0 + %l1], %l0
2198	brz,a,pt %l0, tl0_trap
2199	 nop
2200
2201	/*
2202	 * If the save we did on entry to the kernel had to spill a window
2203	 * to the pcb, pretend we took a spill trap instead.  Any windows
2204	 * that are in the pcb must be copied out or the fill handler will
2205	 * not be able to find them, since the user trap handler returns
2206	 * directly to the trapping code.  Note that we only support precise
2207	 * user traps, which implies that the condition that caused the trap
2208	 * in the first place is still valid, so it will occur again when we
2209	 * re-execute the trapping instruction.
2210	 */
2211	ldx	[PCB_REG + PCB_NSAVED], %l1
2212	brnz,a,pn %l1, tl0_trap
2213	 mov	T_SPILL, %o0
2214
2215	/*
2216	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2217	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2218	 * it may be clobbered by an interrupt before the user trap code
2219	 * can read it, and we must pass %tstate in order to restore %ccr
2220	 * and %asi.  The %fsr must be stored to memory, so we use the
2221	 * temporary stack for that.
2222	 */
2223	rd	%fprs, %l1
2224	or	%l1, FPRS_FEF, %l2
2225	wr	%l2, 0, %fprs
2226	dec	8, ASP_REG
2227	stx	%fsr, [ASP_REG]
2228	ldx	[ASP_REG], %l4
2229	inc	8, ASP_REG
2230	wr	%l1, 0, %fprs
2231
2232	rdpr	%tstate, %l5
2233	rdpr	%tpc, %l6
2234	rdpr	%tnpc, %l7
2235
2236	/*
2237	 * Setup %tnpc to return to.
2238	 */
2239	wrpr	%l0, 0, %tnpc
2240
2241	/*
2242	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2243	 */
2244	rdpr	%wstate, %l1
2245	and	%l1, WSTATE_NORMAL_MASK, %l1
2246	wrpr	%l1, 0, %wstate
2247
2248	/*
2249	 * Setup %tstate for return, change the saved cwp to point to the
2250	 * current window instead of the window at the time of the trap.
2251	 */
2252	andn	%l5, TSTATE_CWP_MASK, %l1
2253	rdpr	%cwp, %l2
2254	wrpr	%l1, %l2, %tstate
2255
2256	/*
2257	 * Setup %sp.  Userland processes will crash if this is not setup.
2258	 */
2259	sub	%fp, CCFSZ, %sp
2260
2261	/*
2262	 * Execute the user trap handler.
2263	 */
2264	done
2265END(tl0_utrap)
2266
2267/*
2268 * (Real) User trap entry point
2269 *
2270 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2271 *     u_int sfsr)
2272 *
2273 * The following setup has been performed:
2274 *	- the windows have been split and the active user window has been saved
2275 *	  (maybe just to the pcb)
2276 *	- we are on alternate globals and interrupts are disabled
2277 *
2278 * We switch to the kernel stack, build a trapframe, switch to normal
2279 * globals, enable interrupts and call trap.
2280 *
2281 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
2282 * it has been pre-set in alternate globals, so we read it from there and setup
2283 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
2284 * of cpu migration and using the wrong pcpup.
2285 */
2286ENTRY(tl0_trap)
2287	/*
2288	 * Force kernel store order.
2289	 */
2290	wrpr	%g0, PSTATE_ALT, %pstate
2291
2292	rdpr	%tstate, %l0
2293	rdpr	%tpc, %l1
2294	rdpr	%tnpc, %l2
2295	rd	%y, %l3
2296	rd	%fprs, %l4
2297	rdpr	%wstate, %l5
2298
2299#if KTR_COMPILE & KTR_TRAP
2300	CATR(KTR_TRAP,
2301	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2302	    , %g1, %g2, %g3, 7, 8, 9)
2303	ldx	[PCPU(CURTHREAD)], %g2
2304	stx	%g2, [%g1 + KTR_PARM1]
2305	stx	%o0, [%g1 + KTR_PARM2]
2306	rdpr	%pil, %g2
2307	stx	%g2, [%g1 + KTR_PARM3]
2308	stx	%l1, [%g1 + KTR_PARM4]
2309	stx	%l2, [%g1 + KTR_PARM5]
2310	stx	%i6, [%g1 + KTR_PARM6]
23119:
2312#endif
2313
23141:	and	%l5, WSTATE_NORMAL_MASK, %l5
2315	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2316	wrpr	%l5, WSTATE_KERNEL, %wstate
2317	rdpr	%canrestore, %l6
2318	wrpr	%l6, 0, %otherwin
2319	wrpr	%g0, 0, %canrestore
2320
2321	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2322
2323	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2324	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2325	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2326	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2327	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2328
2329	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2330	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2331	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2332	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2333	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2334	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2335
2336	wr	%g0, FPRS_FEF, %fprs
2337	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2338	rd	%gsr, %l6
2339	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2340	wr	%g0, 0, %fprs
2341
2342	mov	PCB_REG, %l0
2343	mov	PCPU_REG, %l1
2344	wrpr	%g0, PSTATE_NORMAL, %pstate
2345
2346	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2347	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2348
2349	mov	%l0, PCB_REG
2350	mov	%l1, PCPU_REG
2351	wrpr	%g0, PSTATE_KERNEL, %pstate
2352
2353	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2354	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2355	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2356	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2357	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2358	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2359	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2360	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2361
2362	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2363	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2364	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2365	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2366	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2367
2368	set	tl0_ret - 8, %o7
2369	jmpl	%o2, %g0
2370	 add	%sp, CCFSZ + SPOFF, %o0
2371END(tl0_trap)
2372
2373/*
2374 * void tl0_intr(u_int level, u_int mask)
2375 */
2376ENTRY(tl0_intr)
2377	/*
2378	 * Force kernel store order.
2379	 */
2380	wrpr	%g0, PSTATE_ALT, %pstate
2381
2382	rdpr	%tstate, %l0
2383	rdpr	%tpc, %l1
2384	rdpr	%tnpc, %l2
2385	rd	%y, %l3
2386	rd	%fprs, %l4
2387	rdpr	%wstate, %l5
2388
2389#if KTR_COMPILE & KTR_INTR
2390	CATR(KTR_INTR,
2391	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2392	    , %g1, %g2, %g3, 7, 8, 9)
2393	ldx	[PCPU(CURTHREAD)], %g2
2394	stx	%g2, [%g1 + KTR_PARM1]
2395	stx	%o0, [%g1 + KTR_PARM2]
2396	rdpr	%pil, %g2
2397	stx	%g2, [%g1 + KTR_PARM3]
2398	stx	%l1, [%g1 + KTR_PARM4]
2399	stx	%l2, [%g1 + KTR_PARM5]
2400	stx	%i6, [%g1 + KTR_PARM6]
24019:
2402#endif
2403
2404	wrpr	%o0, 0, %pil
2405	wr	%o1, 0, %clear_softint
2406
2407	and	%l5, WSTATE_NORMAL_MASK, %l5
2408	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2409	wrpr	%l5, WSTATE_KERNEL, %wstate
2410	rdpr	%canrestore, %l6
2411	wrpr	%l6, 0, %otherwin
2412	wrpr	%g0, 0, %canrestore
2413
2414	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2415
2416	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2417	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2418	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2419	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2420	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2421	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2422
2423	wr	%g0, FPRS_FEF, %fprs
2424	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2425	rd	%gsr, %l6
2426	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2427	wr	%g0, 0, %fprs
2428
2429	mov	%o0, %l3
2430	mov	T_INTERRUPT, %o1
2431
2432	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2433	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2434
2435	mov	PCB_REG, %l0
2436	mov	PCPU_REG, %l1
2437	wrpr	%g0, PSTATE_NORMAL, %pstate
2438
2439	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2440	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2441	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2442	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2443	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2444	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2445	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2446
2447	mov	%l0, PCB_REG
2448	mov	%l1, PCPU_REG
2449	wrpr	%g0, PSTATE_KERNEL, %pstate
2450
2451	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2452	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2453	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2454	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2455	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2456	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2457	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2458	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2459
2460	SET(intr_handlers, %l1, %l0)
2461	sllx	%l3, IH_SHIFT, %l1
2462	ldx	[%l0 + %l1], %l1
2463	KASSERT(%l1, "tl0_intr: ih null")
2464	call	%l1
2465	 add	%sp, CCFSZ + SPOFF, %o0
2466
2467	/* %l3 contains PIL */
2468	SET(intrcnt, %l1, %l2)
2469	prefetcha [%l2] ASI_N, 1
2470	SET(pil_countp, %l1, %l0)
2471	sllx	%l3, 1, %l1
2472	lduh	[%l0 + %l1], %l0
2473	sllx	%l0, 3, %l0
2474	add	%l0, %l2, %l0
2475	ldx	[%l0], %l1
2476	inc	%l1
2477	stx	%l1, [%l0]
2478
2479	lduw	[PCPU(CNT) + V_INTR], %l0
2480	inc	%l0
2481	stw	%l0, [PCPU(CNT) + V_INTR]
2482
2483	ba,a	%xcc, tl0_ret
2484	 nop
2485END(tl0_intr)
2486
2487/*
2488 * Initiate return to usermode.
2489 *
2490 * Called with a trapframe on the stack.  The window that was setup in
2491 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2492 * leaf functions, so all ins and locals may have been clobbered since
2493 * then.
2494 *
2495 * This code is rather long and complicated.
2496 */
2497ENTRY(tl0_ret)
2498	/*
2499	 * Check for pending asts atomically with returning.  We must raise
2500	 * the PIL before checking, and if no asts are found the PIL must
2501	 * remain raised until the retry is executed, or we risk missing asts
2502	 * caused by interrupts occurring after the test.  If the PIL is
2503	 * lowered, as it is when we call ast, the check must be re-executed.
2504	 */
2505	wrpr	%g0, PIL_TICK, %pil
2506	ldx	[PCPU(CURTHREAD)], %l0
2507	lduw	[%l0 + TD_FLAGS], %l1
2508	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2509	and	%l1, %l2, %l1
2510	brz,a,pt %l1, 1f
2511	 nop
2512
2513	/*
2514	 * We have an AST.  Re-enable interrupts and handle it, then restart
2515	 * the return sequence.
2516	 */
2517	wrpr	%g0, 0, %pil
2518	call	ast
2519	 add	%sp, CCFSZ + SPOFF, %o0
2520	ba,a	%xcc, tl0_ret
2521	 nop
2522
2523	/*
2524	 * Check for windows that were spilled to the pcb and need to be
2525	 * copied out.  This must be the last thing that is done before the
2526	 * return to usermode.  If there are still user windows in the cpu
2527	 * and we call a nested function after this, which causes them to be
2528	 * spilled to the pcb, they will not be copied out and the stack will
2529	 * be inconsistent.
2530	 */
25311:	ldx	[PCB_REG + PCB_NSAVED], %l1
2532	brz,a,pt %l1, 2f
2533	 nop
2534	wrpr	%g0, 0, %pil
2535	mov	T_SPILL, %o0
2536	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2537	call	trap
2538	 add	%sp, SPOFF + CCFSZ, %o0
2539	ba,a	%xcc, tl0_ret
2540	 nop
2541
2542	/*
2543	 * Restore the out and most global registers from the trapframe.
2544	 * The ins will become the outs when we restore below.
2545	 */
25462:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2547	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2548	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2549	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2550	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2551	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2552	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2553	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2554
2555	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2556	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2557	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2558	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2559	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2560
2561	/*
2562	 * Load everything we need to restore below before disabling
2563	 * interrupts.
2564	 */
2565	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2566	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
2567	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2568	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2569	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2570	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2571	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2572
2573	/*
2574	 * Disable interrupts to restore the special globals.  They are not
2575	 * saved and restored for all kernel traps, so an interrupt at the
2576	 * wrong time would clobber them.
2577	 */
2578	wrpr	%g0, PSTATE_NORMAL, %pstate
2579
2580	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2581	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2582
2583	/*
2584	 * Switch to alternate globals.  This frees up some registers we
2585	 * can use after the restore changes our window.
2586	 */
2587	wrpr	%g0, PSTATE_ALT, %pstate
2588
2589	/*
2590	 * Drop %pil to zero.  It must have been zero at the time of the
2591	 * trap, since we were in usermode, but it was raised above in
2592	 * order to check for asts atomically.  We have interrupts disabled
2593	 * so any interrupts will not be serviced until we complete the
2594	 * return to usermode.
2595	 */
2596	wrpr	%g0, 0, %pil
2597
2598	/*
2599	 * Save %fprs in an alternate global so it can be restored after the
2600	 * restore instruction below.  If we restore it before the restore,
2601	 * and the restore traps we may run for a while with floating point
2602	 * enabled in the kernel, which we want to avoid.
2603	 */
2604	mov	%l0, %g1
2605
2606	/*
2607	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2608	 * so we set it temporarily and then clear it.
2609	 */
2610	wr	%g0, FPRS_FEF, %fprs
2611	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2612	wr	%l1, 0, %gsr
2613	wr	%g0, 0, %fprs
2614
2615	/*
2616	 * Restore program counters.  This could be done after the restore
2617	 * but we're out of alternate globals to store them in...
2618	 */
2619	wrpr	%l2, 0, %tnpc
2620	wrpr	%l3, 0, %tpc
2621
2622	/*
2623	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2624	 * will be affected by the restore below and we need to make sure it
2625	 * points to the current window at that time, not the window that was
2626	 * active at the time of the trap.
2627	 */
2628	andn	%l4, TSTATE_CWP_MASK, %g2
2629
2630	/*
2631	 * Restore %y.  Could also be below if we had more alternate globals.
2632	 */
2633	wr	%l5, 0, %y
2634
2635	/*
2636	 * Setup %wstate for return.  We need to restore the user window state
2637	 * which we saved in wstate.other when we trapped.  We also need to
2638	 * set the transition bit so the restore will be handled specially
2639	 * if it traps, use the xor feature of wrpr to do that.
2640	 */
2641	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
2642	wrpr	%g3, WSTATE_TRANSITION, %wstate
2643
2644	/*
2645	 * Setup window management registers for return.  If not all user
2646	 * windows were spilled in the kernel %otherwin will be non-zero,
2647	 * so we need to transfer it to %canrestore to correctly restore
2648	 * those windows.  Otherwise everything gets set to zero and the
2649	 * restore below will fill a window directly from the user stack.
2650	 */
2651	rdpr	%otherwin, %o0
2652	wrpr	%o0, 0, %canrestore
2653	wrpr	%g0, 0, %otherwin
2654	wrpr	%o0, 0, %cleanwin
2655
2656	/*
2657	 * Now do the restore.  If this instruction causes a fill trap which
2658	 * fails to fill a window from the user stack, we will resume at
2659	 * tl0_ret_fill_end and call back into the kernel.
2660	 */
2661	restore
2662tl0_ret_fill:
2663
2664	/*
2665	 * We made it.  We're back in the window that was active at the time
2666	 * of the trap, and ready to return to usermode.
2667	 */
2668
2669	/*
2670	 * Restore %frps.  This was saved in an alternate global above.
2671	 */
2672	wr	%g1, 0, %fprs
2673
2674	/*
2675	 * Fixup %tstate so the saved %cwp points to the current window and
2676	 * restore it.
2677	 */
2678	rdpr	%cwp, %g4
2679	wrpr	%g2, %g4, %tstate
2680
2681	/*
2682	 * Restore the user window state.  The transition bit was set above
2683	 * for special handling of the restore, this clears it.
2684	 */
2685	wrpr	%g3, 0, %wstate
2686
2687#if KTR_COMPILE & KTR_TRAP
2688	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2689	    , %g2, %g3, %g4, 7, 8, 9)
2690	ldx	[PCPU(CURTHREAD)], %g3
2691	stx	%g3, [%g2 + KTR_PARM1]
2692	rdpr	%pil, %g3
2693	stx	%g3, [%g2 + KTR_PARM2]
2694	rdpr	%tpc, %g3
2695	stx	%g3, [%g2 + KTR_PARM3]
2696	rdpr	%tnpc, %g3
2697	stx	%g3, [%g2 + KTR_PARM4]
2698	stx	%sp, [%g2 + KTR_PARM5]
26999:
2700#endif
2701
2702	/*
2703	 * Return to usermode.
2704	 */
2705	retry
2706tl0_ret_fill_end:
2707
2708#if KTR_COMPILE & KTR_TRAP
2709	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2710	    , %l0, %l1, %l2, 7, 8, 9)
2711	rdpr	%pstate, %l1
2712	stx	%l1, [%l0 + KTR_PARM1]
2713	stx	%l6, [%l0 + KTR_PARM2]
2714	stx	%sp, [%l0 + KTR_PARM3]
27159:
2716#endif
2717
2718	/*
2719	 * The restore above caused a fill trap and the fill handler was
2720	 * unable to fill a window from the user stack.  The special fill
2721	 * handler recognized this and punted, sending us here.  We need
2722	 * to carefully undo any state that was restored before the restore
2723	 * was executed and call trap again.  Trap will copyin a window
2724	 * from the user stack which will fault in the page we need so the
2725	 * restore above will succeed when we try again.  If this fails
2726	 * the process has trashed its stack, so we kill it.
2727	 */
2728
2729	/*
2730	 * Restore the kernel window state.  This was saved in %l6 above, and
2731	 * since the restore failed we're back in the same window.
2732	 */
2733	wrpr	%l6, 0, %wstate
2734
2735	/*
2736	 * Restore the normal globals which have predefined values in the
2737	 * kernel.  We clobbered them above restoring the user's globals
2738	 * so this is very important.
2739	 * XXX PSTATE_ALT must already be set.
2740	 */
2741	wrpr	%g0, PSTATE_ALT, %pstate
2742	mov	PCB_REG, %o0
2743	mov	PCPU_REG, %o1
2744	wrpr	%g0, PSTATE_NORMAL, %pstate
2745	mov	%o0, PCB_REG
2746	mov	%o1, PCPU_REG
2747	wrpr	%g0, PSTATE_KERNEL, %pstate
2748
2749	/*
2750	 * Simulate a fill trap and then start the whole return sequence over
2751	 * again.  This is special because it only copies in 1 window, not 2
2752	 * as we would for a normal failed fill.  This may be the first time
2753	 * the process has been run, so there may not be 2 windows worth of
2754	 * stack to copyin.
2755	 */
2756	mov	T_FILL_RET, %o0
2757	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2758	call	trap
2759	 add	%sp, SPOFF + CCFSZ, %o0
2760	ba,a	%xcc, tl0_ret
2761	 nop
2762END(tl0_ret)
2763
2764/*
2765 * Kernel trap entry point
2766 *
2767 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2768 *     u_int sfsr)
2769 *
2770 * This is easy because the stack is already setup and the windows don't need
2771 * to be split.  We build a trapframe and call trap(), the same as above, but
2772 * the outs don't need to be saved.
2773 */
2774ENTRY(tl1_trap)
2775	rdpr	%tstate, %l0
2776	rdpr	%tpc, %l1
2777	rdpr	%tnpc, %l2
2778	rdpr	%pil, %l3
2779	rd	%y, %l4
2780	rdpr	%wstate, %l5
2781
2782#if KTR_COMPILE & KTR_TRAP
2783	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2784	    , %g1, %g2, %g3, 7, 8, 9)
2785	ldx	[PCPU(CURTHREAD)], %g2
2786	stx	%g2, [%g1 + KTR_PARM1]
2787	stx	%o0, [%g1 + KTR_PARM2]
2788	stx	%l3, [%g1 + KTR_PARM3]
2789	stx	%l1, [%g1 + KTR_PARM4]
2790	stx	%i6, [%g1 + KTR_PARM5]
27919:
2792#endif
2793
2794	wrpr	%g0, 1, %tl
2795
2796	and	%l5, WSTATE_OTHER_MASK, %l5
2797	wrpr	%l5, WSTATE_KERNEL, %wstate
2798
2799	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2800	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2801	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2802	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2803	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2804
2805	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2806	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2807	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2808	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2809	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2810
2811	mov	PCB_REG, %l0
2812	mov	PCPU_REG, %l1
2813	wrpr	%g0, PSTATE_NORMAL, %pstate
2814
2815	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2816	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2817
2818	mov	%l0, PCB_REG
2819	mov	%l1, PCPU_REG
2820	wrpr	%g0, PSTATE_KERNEL, %pstate
2821
2822	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2823	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2824	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2825	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2826	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2827	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2828	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2829	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2830
2831	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2832	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2833	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2834	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2835	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2836
2837	set	tl1_ret - 8, %o7
2838	jmpl	%o2, %g0
2839	 add	%sp, CCFSZ + SPOFF, %o0
2840END(tl1_trap)
2841
2842ENTRY(tl1_ret)
2843	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2844	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2845	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2846	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2847	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2848	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2849	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2850	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2851
2852	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2853	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2854	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2855	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2856	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2857
2858	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2859	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2860	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2861	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2862	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2863
2864	set	VM_MIN_PROM_ADDRESS, %l5
2865	cmp	%l1, %l5
2866	bl,a,pt	%xcc, 1f
2867	 nop
2868	set	VM_MAX_PROM_ADDRESS, %l5
2869	cmp	%l1, %l5
2870	bg,a,pt	%xcc, 1f
2871	 nop
2872
2873	wrpr	%g0, PSTATE_NORMAL, %pstate
2874
2875	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2876	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2877
28781:	wrpr	%g0, PSTATE_ALT, %pstate
2879
2880	andn	%l0, TSTATE_CWP_MASK, %g1
2881	mov	%l1, %g2
2882	mov	%l2, %g3
2883
2884	wrpr	%l3, 0, %pil
2885	wr	%l4, 0, %y
2886
2887	restore
2888
2889	wrpr	%g0, 2, %tl
2890
2891	rdpr	%cwp, %g4
2892	wrpr	%g1, %g4, %tstate
2893	wrpr	%g2, 0, %tpc
2894	wrpr	%g3, 0, %tnpc
2895
2896#if KTR_COMPILE & KTR_TRAP
2897	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2898	    , %g2, %g3, %g4, 7, 8, 9)
2899	ldx	[PCPU(CURTHREAD)], %g3
2900	stx	%g3, [%g2 + KTR_PARM1]
2901	rdpr	%pil, %g3
2902	stx	%g3, [%g2 + KTR_PARM2]
2903	rdpr	%tstate, %g3
2904	stx	%g3, [%g2 + KTR_PARM3]
2905	rdpr	%tpc, %g3
2906	stx	%g3, [%g2 + KTR_PARM4]
2907	stx	%sp, [%g2 + KTR_PARM5]
29089:
2909#endif
2910
2911	retry
2912END(tl1_ret)
2913
2914/*
2915 * void tl1_intr(u_int level, u_int mask)
2916 */
2917ENTRY(tl1_intr)
2918	rdpr	%tstate, %l0
2919	rdpr	%tpc, %l1
2920	rdpr	%tnpc, %l2
2921	rdpr	%pil, %l3
2922	rd	%y, %l4
2923	rdpr	%wstate, %l5
2924
2925#if KTR_COMPILE & KTR_INTR
2926	CATR(KTR_INTR,
2927	    "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2928	    , %g1, %g2, %g3, 7, 8, 9)
2929	ldx	[PCPU(CURTHREAD)], %g2
2930	stx	%g2, [%g1 + KTR_PARM1]
2931	stx	%o0, [%g1 + KTR_PARM2]
2932	stx	%l3, [%g1 + KTR_PARM3]
2933	stx	%l1, [%g1 + KTR_PARM4]
2934	stx	%i6, [%g1 + KTR_PARM5]
29359:
2936#endif
2937
2938	wrpr	%o0, 0, %pil
2939	wr	%o1, 0, %clear_softint
2940
2941	wrpr	%g0, 1, %tl
2942
2943	and	%l5, WSTATE_OTHER_MASK, %l5
2944	wrpr	%l5, WSTATE_KERNEL, %wstate
2945
2946	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2947	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2948	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2949	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2950	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2951
2952	mov	%o0, %l7
2953	mov	T_INTERRUPT | T_KERNEL, %o1
2954
2955	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2956	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2957
2958	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2959	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2960
2961	mov	PCB_REG, %l4
2962	mov	PCPU_REG, %l5
2963	wrpr	%g0, PSTATE_NORMAL, %pstate
2964
2965	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2966	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2967	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2968	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2969	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2970
2971	mov	%l4, PCB_REG
2972	mov	%l5, PCPU_REG
2973	wrpr	%g0, PSTATE_KERNEL, %pstate
2974
2975	SET(intr_handlers, %l5, %l4)
2976	sllx	%l7, IH_SHIFT, %l5
2977	ldx	[%l4 + %l5], %l5
2978	KASSERT(%l5, "tl1_intr: ih null")
2979	call	%l5
2980	 add	%sp, CCFSZ + SPOFF, %o0
2981
2982	/* %l7 contains PIL */
2983	SET(intrcnt, %l5, %l4)
2984	prefetcha [%l4] ASI_N, 1
2985	SET(pil_countp, %l5, %l6)
2986	sllx	%l7, 1, %l5
2987	lduh	[%l5 + %l6], %l5
2988	sllx	%l5, 3, %l5
2989	add	%l5, %l4, %l4
2990	ldx	[%l4], %l5
2991	inc	%l5
2992	stx	%l5, [%l4]
2993
2994	lduw	[PCPU(CNT) + V_INTR], %l4
2995	inc	%l4
2996	stw	%l4, [PCPU(CNT) + V_INTR]
2997
2998	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2999
3000	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
3001	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
3002	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
3003	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
3004	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
3005
3006	wrpr	%g0, PSTATE_ALT, %pstate
3007
3008	andn	%l0, TSTATE_CWP_MASK, %g1
3009	mov	%l1, %g2
3010	mov	%l2, %g3
3011	wrpr	%l3, 0, %pil
3012	wr	%l4, 0, %y
3013
3014	restore
3015
3016	wrpr	%g0, 2, %tl
3017
3018	rdpr	%cwp, %g4
3019	wrpr	%g1, %g4, %tstate
3020	wrpr	%g2, 0, %tpc
3021	wrpr	%g3, 0, %tnpc
3022
3023#if KTR_COMPILE & KTR_INTR
3024	CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3025	    , %g2, %g3, %g4, 7, 8, 9)
3026	ldx	[PCPU(CURTHREAD)], %g3
3027	stx	%g3, [%g2 + KTR_PARM1]
3028	rdpr	%pil, %g3
3029	stx	%g3, [%g2 + KTR_PARM2]
3030	rdpr	%tstate, %g3
3031	stx	%g3, [%g2 + KTR_PARM3]
3032	rdpr	%tpc, %g3
3033	stx	%g3, [%g2 + KTR_PARM4]
3034	stx	%sp, [%g2 + KTR_PARM5]
30359:
3036#endif
3037
3038	retry
3039END(tl1_intr)
3040
3041	.globl	tl_text_end
3042tl_text_end:
3043	nop
3044
3045/*
3046 * Freshly forked processes come here when switched to for the first time.
3047 * The arguments to fork_exit() have been setup in the locals, we must move
3048 * them to the outs.
3049 */
3050ENTRY(fork_trampoline)
3051#if KTR_COMPILE & KTR_PROC
3052	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
3053	    , %g1, %g2, %g3, 7, 8, 9)
3054	ldx	[PCPU(CURTHREAD)], %g2
3055	stx	%g2, [%g1 + KTR_PARM1]
3056	ldx	[%g2 + TD_PROC], %g2
3057	add	%g2, P_COMM, %g2
3058	stx	%g2, [%g1 + KTR_PARM2]
3059	rdpr	%cwp, %g2
3060	stx	%g2, [%g1 + KTR_PARM3]
30619:
3062#endif
3063	mov	%l0, %o0
3064	mov	%l1, %o1
3065	call	fork_exit
3066	 mov	%l2, %o2
3067	ba,a	%xcc, tl0_ret
3068	 nop
3069END(fork_trampoline)
3070