exception.S revision 222828
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56#include <machine/asm.h>
57__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/exception.S 222828 2011-06-07 17:33:39Z marius $");
58
59#include "opt_compat.h"
60#include "opt_ddb.h"
61
62#include <machine/asi.h>
63#include <machine/asmacros.h>
64#include <machine/frame.h>
65#include <machine/fsr.h>
66#include <machine/intr_machdep.h>
67#include <machine/ktr.h>
68#include <machine/pcb.h>
69#include <machine/pstate.h>
70#include <machine/trap.h>
71#include <machine/tsb.h>
72#include <machine/tstate.h>
73#include <machine/utrap.h>
74#include <machine/wstate.h>
75
76#include "assym.s"
77
78#define	TSB_ASI			0x0
79#define	TSB_KERNEL		0x0
80#define	TSB_KERNEL_MASK		0x0
81#define	TSB_KERNEL_PHYS		0x0
82#define	TSB_KERNEL_PHYS_END	0x0
83#define	TSB_QUAD_LDD		0x0
84
85	.register %g2,#ignore
86	.register %g3,#ignore
87	.register %g6,#ignore
88	.register %g7,#ignore
89
90/*
91 * Atomically set a bit in a TTE.
92 */
93#define	TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
94	add	r1, TTE_DATA, r1 ; \
95	LD(x, a) [r1] asi, r2 ; \
969:	or	r2, bit, r3 ; \
97	CAS(x, a) [r1] asi, r2, r3 ; \
98	cmp	r2, r3 ; \
99	bne,pn	%xcc, 9b ; \
100	 mov	r3, r2
101
102#define	TTE_SET_REF(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
103#define	TTE_SET_W(r1, r2, r3, a, asi)	TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
104
105/*
106 * Macros for spilling and filling live windows.
107 *
108 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
109 * handler will not use more than 24 instructions total, to leave room for
110 * resume vectors which occupy the last 8 instructions.
111 */
112
113#define	SPILL(storer, base, size, asi) \
114	storer	%l0, [base + (0 * size)] asi ; \
115	storer	%l1, [base + (1 * size)] asi ; \
116	storer	%l2, [base + (2 * size)] asi ; \
117	storer	%l3, [base + (3 * size)] asi ; \
118	storer	%l4, [base + (4 * size)] asi ; \
119	storer	%l5, [base + (5 * size)] asi ; \
120	storer	%l6, [base + (6 * size)] asi ; \
121	storer	%l7, [base + (7 * size)] asi ; \
122	storer	%i0, [base + (8 * size)] asi ; \
123	storer	%i1, [base + (9 * size)] asi ; \
124	storer	%i2, [base + (10 * size)] asi ; \
125	storer	%i3, [base + (11 * size)] asi ; \
126	storer	%i4, [base + (12 * size)] asi ; \
127	storer	%i5, [base + (13 * size)] asi ; \
128	storer	%i6, [base + (14 * size)] asi ; \
129	storer	%i7, [base + (15 * size)] asi
130
131#define	FILL(loader, base, size, asi) \
132	loader	[base + (0 * size)] asi, %l0 ; \
133	loader	[base + (1 * size)] asi, %l1 ; \
134	loader	[base + (2 * size)] asi, %l2 ; \
135	loader	[base + (3 * size)] asi, %l3 ; \
136	loader	[base + (4 * size)] asi, %l4 ; \
137	loader	[base + (5 * size)] asi, %l5 ; \
138	loader	[base + (6 * size)] asi, %l6 ; \
139	loader	[base + (7 * size)] asi, %l7 ; \
140	loader	[base + (8 * size)] asi, %i0 ; \
141	loader	[base + (9 * size)] asi, %i1 ; \
142	loader	[base + (10 * size)] asi, %i2 ; \
143	loader	[base + (11 * size)] asi, %i3 ; \
144	loader	[base + (12 * size)] asi, %i4 ; \
145	loader	[base + (13 * size)] asi, %i5 ; \
146	loader	[base + (14 * size)] asi, %i6 ; \
147	loader	[base + (15 * size)] asi, %i7
148
149#define	ERRATUM50(reg)	mov reg, reg
150
151#define	KSTACK_SLOP	1024
152
153/*
154 * Sanity check the kernel stack and bail out if it's wrong.
155 * XXX: doesn't handle being on the panic stack.
156 */
157#define	KSTACK_CHECK \
158	dec	16, ASP_REG ; \
159	stx	%g1, [ASP_REG + 0] ; \
160	stx	%g2, [ASP_REG + 8] ; \
161	add	%sp, SPOFF, %g1 ; \
162	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
163	bnz,a	%xcc, tl1_kstack_fault ; \
164	 inc	16, ASP_REG ; \
165	ldx	[PCPU(CURTHREAD)], %g2 ; \
166	ldx	[%g2 + TD_KSTACK], %g2 ; \
167	add	%g2, KSTACK_SLOP, %g2 ; \
168	subcc	%g1, %g2, %g1 ; \
169	ble,a	%xcc, tl1_kstack_fault ; \
170	 inc	16, ASP_REG ; \
171	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
172	cmp	%g1, %g2 ; \
173	bgt,a	%xcc, tl1_kstack_fault ; \
174	 inc	16, ASP_REG ; \
175	ldx	[ASP_REG + 8], %g2 ; \
176	ldx	[ASP_REG + 0], %g1 ; \
177	inc	16, ASP_REG
178
179	.globl	tl_text_begin
180tl_text_begin:
181	nop
182
183ENTRY(tl1_kstack_fault)
184	rdpr	%tl, %g1
1851:	cmp	%g1, 2
186	be,a	2f
187	 nop
188
189#if KTR_COMPILE & KTR_TRAP
190	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
191	    , %g2, %g3, %g4, 7, 8, 9)
192	rdpr	%tl, %g3
193	stx	%g3, [%g2 + KTR_PARM1]
194	rdpr	%tpc, %g3
195	stx	%g3, [%g2 + KTR_PARM1]
196	rdpr	%tnpc, %g3
197	stx	%g3, [%g2 + KTR_PARM1]
1989:
199#endif
200
201	sub	%g1, 1, %g1
202	wrpr	%g1, 0, %tl
203	ba,a	%xcc, 1b
204	 nop
205
2062:
207#if KTR_COMPILE & KTR_TRAP
208	CATR(KTR_TRAP,
209	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
210	    , %g1, %g2, %g3, 7, 8, 9)
211	add	%sp, SPOFF, %g2
212	stx	%g2, [%g1 + KTR_PARM1]
213	ldx	[PCPU(CURTHREAD)], %g2
214	ldx	[%g2 + TD_KSTACK], %g2
215	stx	%g2, [%g1 + KTR_PARM2]
216	rdpr	%canrestore, %g2
217	stx	%g2, [%g1 + KTR_PARM3]
218	rdpr	%cansave, %g2
219	stx	%g2, [%g1 + KTR_PARM4]
220	rdpr	%otherwin, %g2
221	stx	%g2, [%g1 + KTR_PARM5]
222	rdpr	%wstate, %g2
223	stx	%g2, [%g1 + KTR_PARM6]
2249:
225#endif
226
227	wrpr	%g0, 0, %canrestore
228	wrpr	%g0, 6, %cansave
229	wrpr	%g0, 0, %otherwin
230	wrpr	%g0, WSTATE_KERNEL, %wstate
231
232	sub	ASP_REG, SPOFF + CCFSZ, %sp
233	clr	%fp
234
235	set	trap, %o2
236	ba	%xcc, tl1_trap
237	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
238END(tl1_kstack_fault)
239
240/*
241 * Magic to resume from a spill or fill trap.  If we get an alignment or an
242 * MMU fault during a spill or a fill, this macro will detect the fault and
243 * resume at a set instruction offset in the trap handler.
244 *
245 * To check if the previous trap was a spill/fill we convert the trapped pc
246 * to a trap type and verify that it is in the range of spill/fill vectors.
247 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
248 * tl bit allows us to detect both ranges with one test.
249 *
250 * This is:
251 *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
252 *
253 * To calculate the new pc we take advantage of the xor feature of wrpr.
254 * Forcing all the low bits of the trapped pc on we can produce any offset
255 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
256 *
257 *	0x7f ^ 0x1f == 0x60
258 *	0x1f == (0x80 - 0x60) - 1
259 *
260 * Which are the offset and xor value used to resume from alignment faults.
261 */
262
263/*
264 * Determine if we have trapped inside of a spill/fill vector, and if so resume
265 * at a fixed instruction offset in the trap vector.  Must be called on
266 * alternate globals.
267 */
268#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
269	dec	16, ASP_REG ; \
270	stx	%g1, [ASP_REG + 0] ; \
271	stx	%g2, [ASP_REG + 8] ; \
272	rdpr	%tpc, %g1 ; \
273	ERRATUM50(%g1) ; \
274	rdpr	%tba, %g2 ; \
275	sub	%g1, %g2, %g2 ; \
276	srlx	%g2, 5, %g2 ; \
277	andn	%g2, 0x200, %g2 ; \
278	cmp	%g2, 0x80 ; \
279	blu,pt	%xcc, 9f ; \
280	 cmp	%g2, 0x100 ; \
281	bgeu,pt	%xcc, 9f ; \
282	 or	%g1, 0x7f, %g1 ; \
283	wrpr	%g1, xor, %tnpc ; \
284	stxa_g0_sfsr ; \
285	ldx	[ASP_REG + 8], %g2 ; \
286	ldx	[ASP_REG + 0], %g1 ; \
287	inc	16, ASP_REG ; \
288	done ; \
2899:	ldx	[ASP_REG + 8], %g2 ; \
290	ldx	[ASP_REG + 0], %g1 ; \
291	inc	16, ASP_REG
292
293/*
294 * For certain faults we need to clear the SFSR MMU register before returning.
295 */
296#define	RSF_CLR_SFSR \
297	wr	%g0, ASI_DMMU, %asi ; \
298	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
299
300#define	RSF_XOR(off)	((0x80 - off) - 1)
301
302/*
303 * Instruction offsets in spill and fill trap handlers for handling certain
304 * nested traps, and corresponding xor constants for wrpr.
305 */
306#define	RSF_OFF_ALIGN	0x60
307#define	RSF_OFF_MMU	0x70
308
309#define	RESUME_SPILLFILL_ALIGN \
310	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
311#define	RESUME_SPILLFILL_MMU \
312	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
313#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
314	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
315
316/*
317 * Constant to add to %tnpc when taking a fill trap just before returning to
318 * user mode.
319 */
320#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
321
322/*
323 * Generate a T_SPILL or T_FILL trap if the window operation fails.
324 */
325#define	RSF_TRAP(type) \
326	ba	%xcc, tl0_sftrap ; \
327	 mov	type, %g2 ; \
328	.align	16
329
330/*
331 * Game over if the window operation fails.
332 */
333#define	RSF_FATAL(type) \
334	ba	%xcc, rsf_fatal ; \
335	 mov	type, %g2 ; \
336	.align	16
337
338/*
339 * Magic to resume from a failed fill a few instructions after the corrsponding
340 * restore.  This is used on return from the kernel to usermode.
341 */
342#define	RSF_FILL_MAGIC \
343	rdpr	%tnpc, %g1 ; \
344	add	%g1, RSF_FILL_INC, %g1 ; \
345	wrpr	%g1, 0, %tnpc ; \
346	done ; \
347	.align	16
348
349/*
350 * Spill to the pcb if a spill to the user stack in kernel mode fails.
351 */
352#define	RSF_SPILL_TOPCB \
353	ba,a	%xcc, tl1_spill_topcb ; \
354	 nop ; \
355	.align	16
356
357ENTRY(rsf_fatal)
358#if KTR_COMPILE & KTR_TRAP
359	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
360	    , %g1, %g3, %g4, 7, 8, 9)
361	rdpr	%tt, %g3
362	stx	%g3, [%g1 + KTR_PARM1]
363	stx	%g2, [%g1 + KTR_PARM2]
3649:
365#endif
366
367	KSTACK_CHECK
368
369	sir
370END(rsf_fatal)
371
372	.comm	intrnames, IV_MAX * (MAXCOMLEN + 1)
373	.comm	eintrnames, 0
374
375	.comm	intrcnt, IV_MAX * 8
376	.comm	eintrcnt, 0
377
378/*
379 * Trap table and associated macros
380 *
381 * Due to its size a trap table is an inherently hard thing to represent in
382 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
383 * instructions each, many of which are identical.  The way that this is
384 * laid out is the instructions (8 or 32) for the actual trap vector appear
385 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
386 * but if not supporting code can be placed just after the definition of the
387 * macro.  The macros are then instantiated in a different section (.trap),
388 * which is setup to be placed by the linker at the beginning of .text, and the
389 * code around the macros is moved to the end of trap table.  In this way the
390 * code that must be sequential in memory can be split up, and located near
391 * its supporting code so that it is easier to follow.
392 */
393
394	/*
395	 * Clean window traps occur when %cleanwin is zero to ensure that data
396	 * is not leaked between address spaces in registers.
397	 */
398	.macro	clean_window
399	clr	%o0
400	clr	%o1
401	clr	%o2
402	clr	%o3
403	clr	%o4
404	clr	%o5
405	clr	%o6
406	clr	%o7
407	clr	%l0
408	clr	%l1
409	clr	%l2
410	clr	%l3
411	clr	%l4
412	clr	%l5
413	clr	%l6
414	rdpr	%cleanwin, %l7
415	inc	%l7
416	wrpr	%l7, 0, %cleanwin
417	clr	%l7
418	retry
419	.align	128
420	.endm
421
422	/*
423	 * Stack fixups for entry from user mode.  We are still running on the
424	 * user stack, and with its live registers, so we must save soon.  We
425	 * are on alternate globals so we do have some registers.  Set the
426	 * transitional window state, and do the save.  If this traps we
427	 * attempt to spill a window to the user stack.  If this fails, we
428	 * spill the window to the pcb and continue.  Spilling to the pcb
429	 * must not fail.
430	 *
431	 * NOTE: Must be called with alternate globals and clobbers %g1.
432	 */
433
434	.macro	tl0_split
435	rdpr	%wstate, %g1
436	wrpr	%g1, WSTATE_TRANSITION, %wstate
437	save
438	.endm
439
440	.macro	tl0_setup	type
441	tl0_split
442	clr	%o1
443	set	trap, %o2
444	ba	%xcc, tl0_utrap
445	 mov	\type, %o0
446	.endm
447
448	/*
449	 * Generic trap type.  Call trap() with the specified type.
450	 */
451	.macro	tl0_gen		type
452	tl0_setup \type
453	.align	32
454	.endm
455
456	/*
457	 * This is used to suck up the massive swaths of reserved trap types.
458	 * Generates count "reserved" trap vectors.
459	 */
460	.macro	tl0_reserved	count
461	.rept	\count
462	tl0_gen	T_RESERVED
463	.endr
464	.endm
465
466	.macro	tl1_split
467	rdpr	%wstate, %g1
468	wrpr	%g1, WSTATE_NESTED, %wstate
469	save	%sp, -(CCFSZ + TF_SIZEOF), %sp
470	.endm
471
472	.macro	tl1_setup	type
473	tl1_split
474	clr	%o1
475	set	trap, %o2
476	ba	%xcc, tl1_trap
477	 mov	\type | T_KERNEL, %o0
478	.endm
479
480	.macro	tl1_gen		type
481	tl1_setup \type
482	.align	32
483	.endm
484
485	.macro	tl1_reserved	count
486	.rept	\count
487	tl1_gen	T_RESERVED
488	.endr
489	.endm
490
491	.macro	tl0_insn_excptn
492	wrpr	%g0, PSTATE_ALT, %pstate
493	wr	%g0, ASI_IMMU, %asi
494	rdpr	%tpc, %g3
495	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
496	/*
497	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
498	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
499	 * this triggers a RED state exception though.
500	 */
501	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
502	membar	#Sync
503	ba	%xcc, tl0_sfsr_trap
504	 mov	T_INSTRUCTION_EXCEPTION, %g2
505	.align	32
506	.endm
507
508	.macro	tl0_data_excptn
509	wrpr	%g0, PSTATE_ALT, %pstate
510	wr	%g0, ASI_DMMU, %asi
511	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
512	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
513	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
514	membar	#Sync
515	ba	%xcc, tl0_sfsr_trap
516	 mov	T_DATA_EXCEPTION, %g2
517	.align	32
518	.endm
519
520	.macro	tl0_align
521	wr	%g0, ASI_DMMU, %asi
522	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
523	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
524	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
525	membar	#Sync
526	ba	%xcc, tl0_sfsr_trap
527	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
528	.align	32
529	.endm
530
531ENTRY(tl0_sfsr_trap)
532	tl0_split
533	clr	%o1
534	set	trap, %o2
535	mov	%g3, %o4
536	mov	%g4, %o5
537	ba	%xcc, tl0_utrap
538	 mov	%g2, %o0
539END(tl0_sfsr_trap)
540
541	.macro	tl0_intr level, mask
542	tl0_split
543	set	\mask, %o1
544	ba	%xcc, tl0_intr
545	 mov	\level, %o0
546	.align	32
547	.endm
548
549#define	INTR(level, traplvl)						\
550	tl ## traplvl ## _intr	level, 1 << level
551
552#define	TICK(traplvl) \
553	tl ## traplvl ## _intr	PIL_TICK, 0x10001
554
555#define	INTR_LEVEL(tl)							\
556	INTR(1, tl) ;							\
557	INTR(2, tl) ;							\
558	INTR(3, tl) ;							\
559	INTR(4, tl) ;							\
560	INTR(5, tl) ;							\
561	INTR(6, tl) ;							\
562	INTR(7, tl) ;							\
563	INTR(8, tl) ;							\
564	INTR(9, tl) ;							\
565	INTR(10, tl) ;							\
566	INTR(11, tl) ;							\
567	INTR(12, tl) ;							\
568	INTR(13, tl) ;							\
569	TICK(tl) ;							\
570	INTR(15, tl) ;
571
572	.macro	tl0_intr_level
573	INTR_LEVEL(0)
574	.endm
575
576	.macro	intr_vector
577	ldxa	[%g0] ASI_INTR_RECEIVE, %g1
578	andcc	%g1, IRSR_BUSY, %g0
579	bnz,a,pt %xcc, intr_vector
580	 nop
581	sir
582	.align	32
583	.endm
584
585	.macro	tl0_immu_miss
586	/*
587	 * Load the context and the virtual page number from the tag access
588	 * register.  We ignore the context.
589	 */
590	wr	%g0, ASI_IMMU, %asi
591	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g1
592
593	/*
594	 * Initialize the page size walker.
595	 */
596	mov	TS_MIN, %g2
597
598	/*
599	 * Loop over all supported page sizes.
600	 */
601
602	/*
603	 * Compute the page shift for the page size we are currently looking
604	 * for.
605	 */
6061:	add	%g2, %g2, %g3
607	add	%g3, %g2, %g3
608	add	%g3, PAGE_SHIFT, %g3
609
610	/*
611	 * Extract the virtual page number from the contents of the tag
612	 * access register.
613	 */
614	srlx	%g1, %g3, %g3
615
616	/*
617	 * Compute the TTE bucket address.
618	 */
619	ldxa	[%g0 + AA_IMMU_TSB] %asi, %g5
620	and	%g3, TSB_BUCKET_MASK, %g4
621	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
622	add	%g4, %g5, %g4
623
624	/*
625	 * Compute the TTE tag target.
626	 */
627	sllx	%g3, TV_SIZE_BITS, %g3
628	or	%g3, %g2, %g3
629
630	/*
631	 * Loop over the TTEs in this bucket.
632	 */
633
634	/*
635	 * Load the TTE.  Note that this instruction may fault, clobbering
636	 * the contents of the tag access register, %g5, %g6, and %g7.  We
637	 * do not use %g5, and %g6 and %g7 are not used until this instruction
638	 * completes successfully.
639	 */
6402:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
641
642	/*
643	 * Check that it's valid and executable and that the TTE tags match.
644	 */
645	brgez,pn %g7, 3f
646	 andcc	%g7, TD_EXEC, %g0
647	bz,pn	%xcc, 3f
648	 cmp	%g3, %g6
649	bne,pn	%xcc, 3f
650	 EMPTY
651
652	/*
653	 * We matched a TTE, load the TLB.
654	 */
655
656	/*
657	 * Set the reference bit, if it's currently clear.
658	 */
659	 andcc	%g7, TD_REF, %g0
660	bz,a,pn	%xcc, tl0_immu_miss_set_ref
661	 nop
662
663	/*
664	 * Load the TTE tag and data into the TLB and retry the instruction.
665	 */
666	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
667	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
668	retry
669
670	/*
671	 * Advance to the next TTE in this bucket, and check the low bits
672	 * of the bucket pointer to see if we've finished the bucket.
673	 */
6743:	add	%g4, 1 << TTE_SHIFT, %g4
675	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
676	bnz,pt	%xcc, 2b
677	 EMPTY
678
679	/*
680	 * See if we just checked the largest page size, and advance to the
681	 * next one if not.
682	 */
683	 cmp	%g2, TS_MAX
684	bne,pt	%xcc, 1b
685	 add	%g2, 1, %g2
686
687	/*
688	 * Not in user TSB, call C code.
689	 */
690	ba,a	%xcc, tl0_immu_miss_trap
691	.align	128
692	.endm
693
694ENTRY(tl0_immu_miss_set_ref)
695	/*
696	 * Set the reference bit.
697	 */
698	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
699
700	/*
701	 * May have become invalid during casxa, in which case start over.
702	 */
703	brgez,pn %g2, 1f
704	 nop
705
706	/*
707	 * Load the TTE tag and data into the TLB and retry the instruction.
708	 */
709	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
710	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
7111:	retry
712END(tl0_immu_miss_set_ref)
713
714ENTRY(tl0_immu_miss_trap)
715	/*
716	 * Put back the contents of the tag access register, in case we
717	 * faulted.
718	 */
719	sethi	%hi(KERNBASE), %g2
720	stxa	%g1, [%g0 + AA_IMMU_TAR] %asi
721	flush	%g2
722
723	/*
724	 * Switch to alternate globals.
725	 */
726	wrpr	%g0, PSTATE_ALT, %pstate
727
728	/*
729	 * Reload the tag access register.
730	 */
731	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
732
733	/*
734	 * Save the tag access register, and call common trap code.
735	 */
736	tl0_split
737	clr	%o1
738	set	trap, %o2
739	mov	%g2, %o3
740	ba	%xcc, tl0_utrap
741	 mov	T_INSTRUCTION_MISS, %o0
742END(tl0_immu_miss_trap)
743
744	.macro	tl0_dmmu_miss
745	/*
746	 * Load the context and the virtual page number from the tag access
747	 * register.  We ignore the context.
748	 */
749	wr	%g0, ASI_DMMU, %asi
750	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
751
752	/*
753	 * Initialize the page size walker.
754	 */
755tl1_dmmu_miss_user:
756	mov	TS_MIN, %g2
757
758	/*
759	 * Loop over all supported page sizes.
760	 */
761
762	/*
763	 * Compute the page shift for the page size we are currently looking
764	 * for.
765	 */
7661:	add	%g2, %g2, %g3
767	add	%g3, %g2, %g3
768	add	%g3, PAGE_SHIFT, %g3
769
770	/*
771	 * Extract the virtual page number from the contents of the tag
772	 * access register.
773	 */
774	srlx	%g1, %g3, %g3
775
776	/*
777	 * Compute the TTE bucket address.
778	 */
779	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
780	and	%g3, TSB_BUCKET_MASK, %g4
781	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
782	add	%g4, %g5, %g4
783
784	/*
785	 * Compute the TTE tag target.
786	 */
787	sllx	%g3, TV_SIZE_BITS, %g3
788	or	%g3, %g2, %g3
789
790	/*
791	 * Loop over the TTEs in this bucket.
792	 */
793
794	/*
795	 * Load the TTE.  Note that this instruction may fault, clobbering
796	 * the contents of the tag access register, %g5, %g6, and %g7.  We
797	 * do not use %g5, and %g6 and %g7 are not used until this instruction
798	 * completes successfully.
799	 */
8002:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
801
802	/*
803	 * Check that it's valid and that the virtual page numbers match.
804	 */
805	brgez,pn %g7, 3f
806	 cmp	%g3, %g6
807	bne,pn	%xcc, 3f
808	 EMPTY
809
810	/*
811	 * We matched a TTE, load the TLB.
812	 */
813
814	/*
815	 * Set the reference bit, if it's currently clear.
816	 */
817	 andcc	%g7, TD_REF, %g0
818	bz,a,pn	%xcc, tl0_dmmu_miss_set_ref
819	 nop
820
821	/*
822	 * Load the TTE tag and data into the TLB and retry the instruction.
823	 */
824	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
825	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
826	retry
827
828	/*
829	 * Advance to the next TTE in this bucket, and check the low bits
830	 * of the bucket pointer to see if we've finished the bucket.
831	 */
8323:	add	%g4, 1 << TTE_SHIFT, %g4
833	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
834	bnz,pt	%xcc, 2b
835	 EMPTY
836
837	/*
838	 * See if we just checked the largest page size, and advance to the
839	 * next one if not.
840	 */
841	 cmp	%g2, TS_MAX
842	bne,pt	%xcc, 1b
843	 add	%g2, 1, %g2
844
845	/*
846	 * Not in user TSB, call C code.
847	 */
848	ba,a	%xcc, tl0_dmmu_miss_trap
849	.align	128
850	.endm
851
852ENTRY(tl0_dmmu_miss_set_ref)
853	/*
854	 * Set the reference bit.
855	 */
856	TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
857
858	/*
859	 * May have become invalid during casxa, in which case start over.
860	 */
861	brgez,pn %g2, 1f
862	 nop
863
864	/*
865	 * Load the TTE tag and data into the TLB and retry the instruction.
866	 */
867	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
868	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
8691:	retry
870END(tl0_dmmu_miss_set_ref)
871
872ENTRY(tl0_dmmu_miss_trap)
873	/*
874	 * Put back the contents of the tag access register, in case we
875	 * faulted.
876	 */
877	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
878	membar	#Sync
879
880	/*
881	 * Switch to alternate globals.
882	 */
883	wrpr	%g0, PSTATE_ALT, %pstate
884
885	/*
886	 * Check if we actually came from the kernel.
887	 */
888	rdpr	%tl, %g1
889	cmp	%g1, 1
890	bgt,a,pn %xcc, 1f
891	 nop
892
893	/*
894	 * Reload the tag access register.
895	 */
896	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
897
898	/*
899	 * Save the tag access register and call common trap code.
900	 */
901	tl0_split
902	clr	%o1
903	set	trap, %o2
904	mov	%g2, %o3
905	ba	%xcc, tl0_utrap
906	 mov	T_DATA_MISS, %o0
907
908	/*
909	 * Handle faults during window spill/fill.
910	 */
9111:	RESUME_SPILLFILL_MMU
912
913	/*
914	 * Reload the tag access register.
915	 */
916	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
917
918	tl1_split
919	clr	%o1
920	set	trap, %o2
921	mov	%g2, %o3
922	ba	%xcc, tl1_trap
923	 mov	T_DATA_MISS | T_KERNEL, %o0
924END(tl0_dmmu_miss_trap)
925
926	.macro	tl0_dmmu_prot
927	ba,a	%xcc, tl0_dmmu_prot_1
928	 nop
929	.align	128
930	.endm
931
932ENTRY(tl0_dmmu_prot_1)
933	/*
934	 * Load the context and the virtual page number from the tag access
935	 * register.  We ignore the context.
936	 */
937	wr	%g0, ASI_DMMU, %asi
938	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
939
940	/*
941	 * Initialize the page size walker.
942	 */
943tl1_dmmu_prot_user:
944	mov	TS_MIN, %g2
945
946	/*
947	 * Loop over all supported page sizes.
948	 */
949
950	/*
951	 * Compute the page shift for the page size we are currently looking
952	 * for.
953	 */
9541:	add	%g2, %g2, %g3
955	add	%g3, %g2, %g3
956	add	%g3, PAGE_SHIFT, %g3
957
958	/*
959	 * Extract the virtual page number from the contents of the tag
960	 * access register.
961	 */
962	srlx	%g1, %g3, %g3
963
964	/*
965	 * Compute the TTE bucket address.
966	 */
967	ldxa	[%g0 + AA_DMMU_TSB] %asi, %g5
968	and	%g3, TSB_BUCKET_MASK, %g4
969	sllx	%g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
970	add	%g4, %g5, %g4
971
972	/*
973	 * Compute the TTE tag target.
974	 */
975	sllx	%g3, TV_SIZE_BITS, %g3
976	or	%g3, %g2, %g3
977
978	/*
979	 * Loop over the TTEs in this bucket.
980	 */
981
982	/*
983	 * Load the TTE.  Note that this instruction may fault, clobbering
984	 * the contents of the tag access register, %g5, %g6, and %g7.  We
985	 * do not use %g5, and %g6 and %g7 are not used until this instruction
986	 * completes successfully.
987	 */
9882:	ldda	[%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
989
990	/*
991	 * Check that it's valid and writable and that the virtual page
992	 * numbers match.
993	 */
994	brgez,pn %g7, 4f
995	 andcc	%g7, TD_SW, %g0
996	bz,pn	%xcc, 4f
997	 cmp	%g3, %g6
998	bne,pn	%xcc, 4f
999	 nop
1000
1001	/*
1002	 * Set the hardware write bit.
1003	 */
1004	TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
1005
1006	/*
1007	 * Delete the old TLB entry and clear the SFSR.
1008	 */
1009	srlx	%g1, PAGE_SHIFT, %g3
1010	sllx	%g3, PAGE_SHIFT, %g3
1011	stxa	%g0, [%g3] ASI_DMMU_DEMAP
1012	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1013	membar	#Sync
1014
1015	/*
1016	 * May have become invalid during casxa, in which case start over.
1017	 */
1018	brgez,pn %g2, 3f
1019	 or	%g2, TD_W, %g2
1020
1021	/*
1022	 * Load the TTE data into the TLB and retry the instruction.
1023	 */
1024	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1025	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
10263:	retry
1027
1028	/*
1029	 * Check the low bits to see if we've finished the bucket.
1030	 */
10314:	add	%g4, 1 << TTE_SHIFT, %g4
1032	andcc	%g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1033	bnz,pt	%xcc, 2b
1034	 EMPTY
1035
1036	/*
1037	 * See if we just checked the largest page size, and advance to the
1038	 * next one if not.
1039	 */
1040	 cmp	%g2, TS_MAX
1041	bne,pt	%xcc, 1b
1042	 add	%g2, 1, %g2
1043
1044	/*
1045	 * Not in user TSB, call C code.
1046	 */
1047	ba,a	%xcc, tl0_dmmu_prot_trap
1048	 nop
1049END(tl0_dmmu_prot_1)
1050
1051ENTRY(tl0_dmmu_prot_trap)
1052	/*
1053	 * Put back the contents of the tag access register, in case we
1054	 * faulted.
1055	 */
1056	stxa	%g1, [%g0 + AA_DMMU_TAR] %asi
1057	membar	#Sync
1058
1059	/*
1060	 * Switch to alternate globals.
1061	 */
1062	wrpr	%g0, PSTATE_ALT, %pstate
1063
1064	/*
1065	 * Check if we actually came from the kernel.
1066	 */
1067	rdpr	%tl, %g1
1068	cmp	%g1, 1
1069	bgt,a,pn %xcc, 1f
1070	 nop
1071
1072	/*
1073	 * Load the SFAR, SFSR and TAR.
1074	 */
1075	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1076	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1077	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1078	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1079	membar	#Sync
1080
1081	/*
1082	 * Save the MMU registers and call common trap code.
1083	 */
1084	tl0_split
1085	clr	%o1
1086	set	trap, %o2
1087	mov	%g2, %o3
1088	mov	%g3, %o4
1089	mov	%g4, %o5
1090	ba	%xcc, tl0_utrap
1091	 mov	T_DATA_PROTECTION, %o0
1092
1093	/*
1094	 * Handle faults during window spill/fill.
1095	 */
10961:	RESUME_SPILLFILL_MMU_CLR_SFSR
1097
1098	/*
1099	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1100	 */
1101	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1102	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1103	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1104	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1105	membar	#Sync
1106
1107	tl1_split
1108	clr	%o1
1109	set	trap, %o2
1110	mov	%g2, %o3
1111	mov	%g3, %o4
1112	mov	%g4, %o5
1113	ba	%xcc, tl1_trap
1114	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1115END(tl0_dmmu_prot_trap)
1116
1117	.macro	tl0_spill_0_n
1118	wr	%g0, ASI_AIUP, %asi
1119	SPILL(stxa, %sp + SPOFF, 8, %asi)
1120	saved
1121	retry
1122	.align	32
1123	RSF_TRAP(T_SPILL)
1124	RSF_TRAP(T_SPILL)
1125	.endm
1126
1127	.macro	tl0_spill_1_n
1128	wr	%g0, ASI_AIUP, %asi
1129	SPILL(stwa, %sp, 4, %asi)
1130	saved
1131	retry
1132	.align	32
1133	RSF_TRAP(T_SPILL)
1134	RSF_TRAP(T_SPILL)
1135	.endm
1136
1137	.macro	tl0_fill_0_n
1138	wr	%g0, ASI_AIUP, %asi
1139	FILL(ldxa, %sp + SPOFF, 8, %asi)
1140	restored
1141	retry
1142	.align	32
1143	RSF_TRAP(T_FILL)
1144	RSF_TRAP(T_FILL)
1145	.endm
1146
1147	.macro	tl0_fill_1_n
1148	wr	%g0, ASI_AIUP, %asi
1149	FILL(lduwa, %sp, 4, %asi)
1150	restored
1151	retry
1152	.align	32
1153	RSF_TRAP(T_FILL)
1154	RSF_TRAP(T_FILL)
1155	.endm
1156
1157ENTRY(tl0_sftrap)
1158	rdpr	%tstate, %g1
1159	and	%g1, TSTATE_CWP_MASK, %g1
1160	wrpr	%g1, 0, %cwp
1161	tl0_split
1162	clr	%o1
1163	set	trap, %o2
1164	ba	%xcc, tl0_trap
1165	 mov	%g2, %o0
1166END(tl0_sftrap)
1167
1168	.macro	tl0_spill_bad	count
1169	.rept	\count
1170	sir
1171	.align	128
1172	.endr
1173	.endm
1174
1175	.macro	tl0_fill_bad	count
1176	.rept	\count
1177	sir
1178	.align	128
1179	.endr
1180	.endm
1181
1182	.macro	tl0_syscall
1183	tl0_split
1184	clr	%o1
1185	set	syscall, %o2
1186	ba	%xcc, tl0_trap
1187	 mov	T_SYSCALL, %o0
1188	.align	32
1189	.endm
1190
1191	.macro	tl0_fp_restore
1192	ba,a	%xcc, tl0_fp_restore
1193	 nop
1194	.align	32
1195	.endm
1196
1197ENTRY(tl0_fp_restore)
1198	ldx	[PCB_REG + PCB_FLAGS], %g1
1199	andn	%g1, PCB_FEF, %g1
1200	stx	%g1, [PCB_REG + PCB_FLAGS]
1201
1202	wr	%g0, FPRS_FEF, %fprs
1203	wr	%g0, ASI_BLK_S, %asi
1204	ldda	[PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1205	ldda	[PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1206	ldda	[PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1207	ldda	[PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1208	membar	#Sync
1209	done
1210END(tl0_fp_restore)
1211
1212	.macro	tl1_insn_excptn
1213	wrpr	%g0, PSTATE_ALT, %pstate
1214	wr	%g0, ASI_IMMU, %asi
1215	rdpr	%tpc, %g3
1216	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1217	/*
1218	 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1219	 * followed by a DONE, FLUSH or RETRY for USIII.  In practice,
1220	 * this triggers a RED state exception though.
1221	 */
1222	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
1223	membar	#Sync
1224	ba	%xcc, tl1_insn_exceptn_trap
1225	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1226	.align	32
1227	.endm
1228
1229ENTRY(tl1_insn_exceptn_trap)
1230	tl1_split
1231	clr	%o1
1232	set	trap, %o2
1233	mov	%g3, %o4
1234	mov	%g4, %o5
1235	ba	%xcc, tl1_trap
1236	 mov	%g2, %o0
1237END(tl1_insn_exceptn_trap)
1238
1239	.macro	tl1_fp_disabled
1240	ba,a	%xcc, tl1_fp_disabled_1
1241	 nop
1242	.align	32
1243	.endm
1244
1245ENTRY(tl1_fp_disabled_1)
1246	rdpr	%tpc, %g1
1247	set	fpu_fault_begin, %g2
1248	sub	%g1, %g2, %g1
1249	cmp	%g1, fpu_fault_size
1250	bgeu,a,pn %xcc, 1f
1251	 nop
1252
1253	wr	%g0, FPRS_FEF, %fprs
1254	wr	%g0, ASI_BLK_S, %asi
1255	ldda	[PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1256	ldda	[PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1257	ldda	[PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1258	ldda	[PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1259	membar	#Sync
1260	retry
1261
12621:	tl1_split
1263	clr	%o1
1264	set	trap, %o2
1265	ba	%xcc, tl1_trap
1266	 mov	T_FP_DISABLED | T_KERNEL, %o0
1267END(tl1_fp_disabled_1)
1268
1269	.macro	tl1_data_excptn
1270	wrpr	%g0, PSTATE_ALT, %pstate
1271	ba,a	%xcc, tl1_data_excptn_trap
1272	 nop
1273	.align	32
1274	.endm
1275
1276ENTRY(tl1_data_excptn_trap)
1277	RESUME_SPILLFILL_MMU_CLR_SFSR
1278	ba	%xcc, tl1_sfsr_trap
1279	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
1280END(tl1_data_excptn_trap)
1281
1282	.macro	tl1_align
1283	ba,a	%xcc, tl1_align_trap
1284	 nop
1285	.align	32
1286	.endm
1287
1288ENTRY(tl1_align_trap)
1289	RESUME_SPILLFILL_ALIGN
1290	ba	%xcc, tl1_sfsr_trap
1291	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1292END(tl1_data_excptn_trap)
1293
1294ENTRY(tl1_sfsr_trap)
1295	wr	%g0, ASI_DMMU, %asi
1296	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1297	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1298	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1299	membar	#Sync
1300
1301	tl1_split
1302	clr	%o1
1303	set	trap, %o2
1304	mov	%g3, %o4
1305	mov	%g4, %o5
1306	ba	%xcc, tl1_trap
1307	 mov	%g2, %o0
1308END(tl1_sfsr_trap)
1309
1310	.macro	tl1_intr level, mask
1311	tl1_split
1312	set	\mask, %o1
1313	ba	%xcc, tl1_intr
1314	 mov	\level, %o0
1315	.align	32
1316	.endm
1317
1318	.macro	tl1_intr_level
1319	INTR_LEVEL(1)
1320	.endm
1321
1322	.macro	tl1_immu_miss
1323	/*
1324	 * Load the context and the virtual page number from the tag access
1325	 * register.  We ignore the context.
1326	 */
1327	wr	%g0, ASI_IMMU, %asi
1328	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g5
1329
1330	/*
1331	 * Compute the address of the TTE.  The TSB mask and address of the
1332	 * TSB are patched at startup.
1333	 */
1334	.globl	tl1_immu_miss_patch_tsb_1
1335tl1_immu_miss_patch_tsb_1:
1336	sethi	%uhi(TSB_KERNEL), %g6
1337	or	%g6, %ulo(TSB_KERNEL), %g6
1338	sllx	%g6, 32, %g6
1339	sethi	%hi(TSB_KERNEL), %g7
1340	or	%g7, %g6, %g7
1341	.globl	tl1_immu_miss_patch_tsb_mask_1
1342tl1_immu_miss_patch_tsb_mask_1:
1343	sethi	%hi(TSB_KERNEL_MASK), %g6
1344	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1345
1346	srlx	%g5, TAR_VPN_SHIFT, %g5
1347	and	%g5, %g6, %g6
1348	sllx	%g6, TTE_SHIFT, %g6
1349	add	%g6, %g7, %g6
1350
1351	/*
1352	 * Load the TTE.
1353	 */
1354	.globl	tl1_immu_miss_patch_quad_ldd_1
1355tl1_immu_miss_patch_quad_ldd_1:
1356	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1357
1358	/*
1359	 * Check that it's valid and executable and that the virtual page
1360	 * numbers match.
1361	 */
1362	brgez,pn %g7, tl1_immu_miss_trap
1363	 andcc	%g7, TD_EXEC, %g0
1364	bz,pn	%xcc, tl1_immu_miss_trap
1365	 srlx	%g6, TV_SIZE_BITS, %g6
1366	cmp	%g5, %g6
1367	bne,pn	%xcc, tl1_immu_miss_trap
1368	 EMPTY
1369
1370	/*
1371	 * Set the reference bit if it's currently clear.
1372	 */
1373	 andcc	%g7, TD_REF, %g0
1374	bz,a,pn	%xcc, tl1_immu_miss_set_ref
1375	 nop
1376
1377	/*
1378	 * Load the TTE data into the TLB and retry the instruction.
1379	 */
1380	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
1381	retry
1382	.align	128
1383	.endm
1384
1385ENTRY(tl1_immu_miss_set_ref)
1386	/*
1387	 * Recompute the TTE address, which we clobbered loading the TTE.
1388	 * The TSB mask and address of the TSB are patched at startup.
1389	 */
1390	.globl	tl1_immu_miss_patch_tsb_2
1391tl1_immu_miss_patch_tsb_2:
1392	sethi	%uhi(TSB_KERNEL), %g6
1393	or	%g6, %ulo(TSB_KERNEL), %g6
1394	sllx	%g6, 32, %g6
1395	sethi	%hi(TSB_KERNEL), %g7
1396	or	%g7, %g6, %g7
1397	.globl	tl1_immu_miss_patch_tsb_mask_2
1398tl1_immu_miss_patch_tsb_mask_2:
1399	sethi	%hi(TSB_KERNEL_MASK), %g6
1400	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1401
1402	and	%g5, %g6, %g5
1403	sllx	%g5, TTE_SHIFT, %g5
1404	add	%g5, %g7, %g5
1405
1406	/*
1407	 * Set the reference bit.
1408	 */
1409	.globl	tl1_immu_miss_patch_asi_1
1410tl1_immu_miss_patch_asi_1:
1411	wr	%g0, TSB_ASI, %asi
1412	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1413
1414	/*
1415	 * May have become invalid during casxa, in which case start over.
1416	 */
1417	brgez,pn %g6, 1f
1418	 nop
1419
1420	/*
1421	 * Load the TTE data into the TLB and retry the instruction.
1422	 */
1423	stxa	%g6, [%g0] ASI_ITLB_DATA_IN_REG
14241:	retry
1425END(tl1_immu_miss_set_ref)
1426
1427ENTRY(tl1_immu_miss_trap)
1428	/*
1429	 * Switch to alternate globals.
1430	 */
1431	wrpr	%g0, PSTATE_ALT, %pstate
1432
1433	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
1434
1435	tl1_split
1436	clr	%o1
1437	set	trap, %o2
1438	mov	%g2, %o3
1439	ba	%xcc, tl1_trap
1440	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
1441END(tl1_immu_miss_trap)
1442
1443	.macro	tl1_dmmu_miss
1444	/*
1445	 * Load the context and the virtual page number from the tag access
1446	 * register.
1447	 */
1448	wr	%g0, ASI_DMMU, %asi
1449	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
1450
1451	/*
1452	 * Extract the context from the contents of the tag access register.
1453	 * If it's non-zero this is a fault on a user address.  Note that the
1454	 * faulting address is passed in %g1.
1455	 */
1456	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1457	brnz,a,pn %g6, tl1_dmmu_miss_user
1458	 mov	%g5, %g1
1459
1460	/*
1461	 * Check for the direct mapped physical region.  These addresses have
1462	 * the high bit set so they are negative.
1463	 */
1464	brlz,pn %g5, tl1_dmmu_miss_direct
1465	 EMPTY
1466
1467	/*
1468	 * Compute the address of the TTE.  The TSB mask and address of the
1469	 * TSB are patched at startup.
1470	 */
1471	.globl	tl1_dmmu_miss_patch_tsb_1
1472tl1_dmmu_miss_patch_tsb_1:
1473	sethi	%uhi(TSB_KERNEL), %g6
1474	or	%g6, %ulo(TSB_KERNEL), %g6
1475	sllx	%g6, 32, %g6
1476	sethi	%hi(TSB_KERNEL), %g7
1477	or	%g7, %g6, %g7
1478	.globl	tl1_dmmu_miss_patch_tsb_mask_1
1479tl1_dmmu_miss_patch_tsb_mask_1:
1480	sethi	%hi(TSB_KERNEL_MASK), %g6
1481	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1482
1483	srlx	%g5, TAR_VPN_SHIFT, %g5
1484	and	%g5, %g6, %g6
1485	sllx	%g6, TTE_SHIFT, %g6
1486	add	%g6, %g7, %g6
1487
1488	/*
1489	 * Load the TTE.
1490	 */
1491	.globl	tl1_dmmu_miss_patch_quad_ldd_1
1492tl1_dmmu_miss_patch_quad_ldd_1:
1493	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1494
1495	/*
1496	 * Check that it's valid and that the virtual page numbers match.
1497	 */
1498	brgez,pn %g7, tl1_dmmu_miss_trap
1499	 srlx	%g6, TV_SIZE_BITS, %g6
1500	cmp	%g5, %g6
1501	bne,pn %xcc, tl1_dmmu_miss_trap
1502	 EMPTY
1503
1504	/*
1505	 * Set the reference bit if it's currently clear.
1506	 */
1507	 andcc	%g7, TD_REF, %g0
1508	bz,a,pt	%xcc, tl1_dmmu_miss_set_ref
1509	 nop
1510
1511	/*
1512	 * Load the TTE data into the TLB and retry the instruction.
1513	 */
1514	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
1515	retry
1516	.align	128
1517	.endm
1518
1519ENTRY(tl1_dmmu_miss_set_ref)
1520	/*
1521	 * Recompute the TTE address, which we clobbered loading the TTE.
1522	 * The TSB mask and address of the TSB are patched at startup.
1523	 */
1524	.globl	tl1_dmmu_miss_patch_tsb_mask_2
1525tl1_dmmu_miss_patch_tsb_2:
1526	sethi	%uhi(TSB_KERNEL), %g6
1527	or	%g6, %ulo(TSB_KERNEL), %g6
1528	sllx	%g6, 32, %g6
1529	sethi	%hi(TSB_KERNEL), %g7
1530	or	%g7, %g6, %g7
1531	.globl	tl1_dmmu_miss_patch_tsb_2
1532tl1_dmmu_miss_patch_tsb_mask_2:
1533	sethi	%hi(TSB_KERNEL_MASK), %g6
1534	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1535
1536	and	%g5, %g6, %g5
1537	sllx	%g5, TTE_SHIFT, %g5
1538	add	%g5, %g7, %g5
1539
1540	/*
1541	 * Set the reference bit.
1542	 */
1543	.globl	tl1_dmmu_miss_patch_asi_1
1544tl1_dmmu_miss_patch_asi_1:
1545	wr	%g0, TSB_ASI, %asi
1546	TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1547
1548	/*
1549	 * May have become invalid during casxa, in which case start over.
1550	 */
1551	brgez,pn %g6, 1f
1552	 nop
1553
1554	/*
1555	 * Load the TTE data into the TLB and retry the instruction.
1556	 */
1557	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
15581:	retry
1559END(tl1_dmmu_miss_set_ref)
1560
1561ENTRY(tl1_dmmu_miss_trap)
1562	/*
1563	 * Switch to alternate globals.
1564	 */
1565	wrpr	%g0, PSTATE_ALT, %pstate
1566
1567	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1568
1569	KSTACK_CHECK
1570
1571	tl1_split
1572	clr	%o1
1573	set	trap, %o2
1574	mov	%g2, %o3
1575	ba	%xcc, tl1_trap
1576	 mov	T_DATA_MISS | T_KERNEL, %o0
1577END(tl1_dmmu_miss_trap)
1578
1579ENTRY(tl1_dmmu_miss_direct)
1580	/*
1581	 * Mask off the high bits of the virtual address to get the physical
1582	 * address, and or in the TTE bits.  The virtual address bits that
1583	 * correspond to the TTE valid and page size bits are left set, so
1584	 * they don't have to be included in the TTE bits below.  We know they
1585	 * are set because the virtual address is in the upper va hole.
1586	 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1587	 * and we get a miss on the directly accessed kernel TSB we must not
1588	 * set TD_CV in order to access it uniformly bypassing the D$.
1589	 */
1590	setx	TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1591	and	%g5, %g4, %g4
1592	setx	TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1593	and	%g5, %g6, %g5
1594	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_1
1595tl1_dmmu_miss_direct_patch_tsb_phys_1:
1596	sethi	%uhi(TSB_KERNEL_PHYS), %g3
1597	or	%g3, %ulo(TSB_KERNEL_PHYS), %g3
1598	sllx	%g3, 32, %g3
1599	sethi	%hi(TSB_KERNEL_PHYS), %g3
1600	or	%g7, %g3, %g7
1601	cmp	%g4, %g7
1602	bl,pt	%xcc, 1f
1603	 or	%g5, TD_CP | TD_W, %g5
1604	.globl	tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1605tl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1606	sethi	%uhi(TSB_KERNEL_PHYS_END), %g3
1607	or	%g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1608	sllx	%g3, 32, %g3
1609	sethi	%hi(TSB_KERNEL_PHYS_END), %g7
1610	or	%g7, %g3, %g7
1611	cmp	%g4, %g7
1612	bg,a,pt	%xcc, 1f
1613	 nop
1614	ba,pt	%xcc, 2f
1615	 nop
16161:	or	%g5, TD_CV, %g5
1617
1618	/*
1619	 * Load the TTE data into the TLB and retry the instruction.
1620	 */
16212:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1622	retry
1623END(tl1_dmmu_miss_direct)
1624
1625	.macro	tl1_dmmu_prot
1626	ba,a	%xcc, tl1_dmmu_prot_1
1627	 nop
1628	.align	128
1629	.endm
1630
1631ENTRY(tl1_dmmu_prot_1)
1632	/*
1633	 * Load the context and the virtual page number from the tag access
1634	 * register.
1635	 */
1636	wr	%g0, ASI_DMMU, %asi
1637	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
1638
1639	/*
1640	 * Extract the context from the contents of the tag access register.
1641	 * If it's non-zero this is a fault on a user address.  Note that the
1642	 * faulting address is passed in %g1.
1643	 */
1644	sllx	%g5, 64 - TAR_VPN_SHIFT, %g6
1645	brnz,a,pn %g6, tl1_dmmu_prot_user
1646	 mov	%g5, %g1
1647
1648	/*
1649	 * Compute the address of the TTE.  The TSB mask and address of the
1650	 * TSB are patched at startup.
1651	 */
1652	.globl	tl1_dmmu_prot_patch_tsb_1
1653tl1_dmmu_prot_patch_tsb_1:
1654	sethi	%uhi(TSB_KERNEL), %g6
1655	or	%g6, %ulo(TSB_KERNEL), %g6
1656	sllx	%g6, 32, %g6
1657	sethi	%hi(TSB_KERNEL), %g7
1658	or	%g7, %g6, %g7
1659	.globl	tl1_dmmu_prot_patch_tsb_mask_1
1660tl1_dmmu_prot_patch_tsb_mask_1:
1661	sethi	%hi(TSB_KERNEL_MASK), %g6
1662	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1663
1664	srlx	%g5, TAR_VPN_SHIFT, %g5
1665	and	%g5, %g6, %g6
1666	sllx	%g6, TTE_SHIFT, %g6
1667	add	%g6, %g7, %g6
1668
1669	/*
1670	 * Load the TTE.
1671	 */
1672	.globl	tl1_dmmu_prot_patch_quad_ldd_1
1673tl1_dmmu_prot_patch_quad_ldd_1:
1674	ldda	[%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1675
1676	/*
1677	 * Check that it's valid and writeable and that the virtual page
1678	 * numbers match.
1679	 */
1680	brgez,pn %g7, tl1_dmmu_prot_trap
1681	 andcc	%g7, TD_SW, %g0
1682	bz,pn	%xcc, tl1_dmmu_prot_trap
1683	 srlx	%g6, TV_SIZE_BITS, %g6
1684	cmp	%g5, %g6
1685	bne,pn	%xcc, tl1_dmmu_prot_trap
1686	 EMPTY
1687
1688	/*
1689	 * Delete the old TLB entry and clear the SFSR.
1690	 */
1691	 sllx	%g5, TAR_VPN_SHIFT, %g6
1692	or	%g6, TLB_DEMAP_NUCLEUS, %g6
1693	stxa	%g0, [%g6] ASI_DMMU_DEMAP
1694	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1695	membar	#Sync
1696
1697	/*
1698	 * Recompute the TTE address, which we clobbered loading the TTE.
1699	 * The TSB mask and address of the TSB are patched at startup.
1700	 */
1701	.globl	tl1_dmmu_prot_patch_tsb_2
1702tl1_dmmu_prot_patch_tsb_2:
1703	sethi	%uhi(TSB_KERNEL), %g6
1704	or	%g6, %ulo(TSB_KERNEL), %g6
1705	sllx	%g6, 32, %g6
1706	sethi	%hi(TSB_KERNEL), %g7
1707	or	%g7, %g6, %g7
1708	.globl	tl1_dmmu_prot_patch_tsb_mask_2
1709tl1_dmmu_prot_patch_tsb_mask_2:
1710	sethi	%hi(TSB_KERNEL_MASK), %g6
1711	or	%g6, %lo(TSB_KERNEL_MASK), %g6
1712	and	%g5, %g6, %g5
1713	sllx	%g5, TTE_SHIFT, %g5
1714	add	%g5, %g7, %g5
1715
1716	/*
1717	 * Set the hardware write bit.
1718	 */
1719	.globl	tl1_dmmu_prot_patch_asi_1
1720tl1_dmmu_prot_patch_asi_1:
1721	wr	%g0, TSB_ASI, %asi
1722	TTE_SET_W(%g5, %g6, %g7, a, %asi)
1723
1724	/*
1725	 * May have become invalid during casxa, in which case start over.
1726	 */
1727	brgez,pn %g6, 1f
1728	 or	%g6, TD_W, %g6
1729
1730	/*
1731	 * Load the TTE data into the TLB and retry the instruction.
1732	 */
1733	stxa	%g6, [%g0] ASI_DTLB_DATA_IN_REG
17341:	retry
1735END(tl1_dmmu_prot_1)
1736
1737ENTRY(tl1_dmmu_prot_trap)
1738	/*
1739	 * Switch to alternate globals.
1740	 */
1741	wrpr	%g0, PSTATE_ALT, %pstate
1742
1743	/*
1744	 * Load the SFAR, SFSR and TAR.  Clear the SFSR.
1745	 */
1746	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1747	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1748	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1749	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1750	membar	#Sync
1751
1752	tl1_split
1753	clr	%o1
1754	set	trap, %o2
1755	mov	%g2, %o3
1756	mov	%g3, %o4
1757	mov	%g4, %o5
1758	ba	%xcc, tl1_trap
1759	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1760END(tl1_dmmu_prot_trap)
1761
1762	.macro	tl1_spill_0_n
1763	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1764	saved
1765	retry
1766	.align	32
1767	RSF_FATAL(T_SPILL)
1768	RSF_FATAL(T_SPILL)
1769	.endm
1770
1771	.macro	tl1_spill_2_n
1772	wr	%g0, ASI_AIUP, %asi
1773	SPILL(stxa, %sp + SPOFF, 8, %asi)
1774	saved
1775	retry
1776	.align	32
1777	RSF_SPILL_TOPCB
1778	RSF_SPILL_TOPCB
1779	.endm
1780
1781	.macro	tl1_spill_3_n
1782	wr	%g0, ASI_AIUP, %asi
1783	SPILL(stwa, %sp, 4, %asi)
1784	saved
1785	retry
1786	.align	32
1787	RSF_SPILL_TOPCB
1788	RSF_SPILL_TOPCB
1789	.endm
1790
1791	.macro	tl1_spill_7_n
1792	btst	1, %sp
1793	bnz,a,pn %xcc, tl1_spill_0_n
1794	 nop
1795	srl	%sp, 0, %sp
1796	SPILL(stw, %sp, 4, EMPTY)
1797	saved
1798	retry
1799	.align	32
1800	RSF_FATAL(T_SPILL)
1801	RSF_FATAL(T_SPILL)
1802	.endm
1803
1804	.macro	tl1_spill_0_o
1805	wr	%g0, ASI_AIUP, %asi
1806	SPILL(stxa, %sp + SPOFF, 8, %asi)
1807	saved
1808	retry
1809	.align	32
1810	RSF_SPILL_TOPCB
1811	RSF_SPILL_TOPCB
1812	.endm
1813
1814	.macro	tl1_spill_1_o
1815	wr	%g0, ASI_AIUP, %asi
1816	SPILL(stwa, %sp, 4, %asi)
1817	saved
1818	retry
1819	.align	32
1820	RSF_SPILL_TOPCB
1821	RSF_SPILL_TOPCB
1822	.endm
1823
1824	.macro	tl1_spill_2_o
1825	RSF_SPILL_TOPCB
1826	.align	128
1827	.endm
1828
1829	.macro	tl1_fill_0_n
1830	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1831	restored
1832	retry
1833	.align	32
1834	RSF_FATAL(T_FILL)
1835	RSF_FATAL(T_FILL)
1836	.endm
1837
1838	.macro	tl1_fill_2_n
1839	wr	%g0, ASI_AIUP, %asi
1840	FILL(ldxa, %sp + SPOFF, 8, %asi)
1841	restored
1842	retry
1843	.align 32
1844	RSF_FILL_MAGIC
1845	RSF_FILL_MAGIC
1846	.endm
1847
1848	.macro	tl1_fill_3_n
1849	wr	%g0, ASI_AIUP, %asi
1850	FILL(lduwa, %sp, 4, %asi)
1851	restored
1852	retry
1853	.align 32
1854	RSF_FILL_MAGIC
1855	RSF_FILL_MAGIC
1856	.endm
1857
1858	.macro	tl1_fill_7_n
1859	btst	1, %sp
1860	bnz,a,pt %xcc, tl1_fill_0_n
1861	 nop
1862	srl	%sp, 0, %sp
1863	FILL(lduw, %sp, 4, EMPTY)
1864	restored
1865	retry
1866	.align	32
1867	RSF_FATAL(T_FILL)
1868	RSF_FATAL(T_FILL)
1869	.endm
1870
1871/*
1872 * This is used to spill windows that are still occupied with user
1873 * data on kernel entry to the pcb.
1874 */
1875ENTRY(tl1_spill_topcb)
1876	wrpr	%g0, PSTATE_ALT, %pstate
1877
1878	/* Free some globals for our use. */
1879	dec	24, ASP_REG
1880	stx	%g1, [ASP_REG + 0]
1881	stx	%g2, [ASP_REG + 8]
1882	stx	%g3, [ASP_REG + 16]
1883
1884	ldx	[PCB_REG + PCB_NSAVED], %g1
1885
1886	sllx	%g1, PTR_SHIFT, %g2
1887	add	%g2, PCB_REG, %g2
1888	stx	%sp, [%g2 + PCB_RWSP]
1889
1890	sllx	%g1, RW_SHIFT, %g2
1891	add	%g2, PCB_REG, %g2
1892	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1893
1894	inc	%g1
1895	stx	%g1, [PCB_REG + PCB_NSAVED]
1896
1897#if KTR_COMPILE & KTR_TRAP
1898	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1899	   , %g1, %g2, %g3, 7, 8, 9)
1900	rdpr	%tpc, %g2
1901	stx	%g2, [%g1 + KTR_PARM1]
1902	rdpr	%tnpc, %g2
1903	stx	%g2, [%g1 + KTR_PARM2]
1904	stx	%sp, [%g1 + KTR_PARM3]
1905	ldx	[PCB_REG + PCB_NSAVED], %g2
1906	stx	%g2, [%g1 + KTR_PARM4]
19079:
1908#endif
1909
1910	saved
1911
1912	ldx	[ASP_REG + 16], %g3
1913	ldx	[ASP_REG + 8], %g2
1914	ldx	[ASP_REG + 0], %g1
1915	inc	24, ASP_REG
1916	retry
1917END(tl1_spill_topcb)
1918
1919	.macro	tl1_spill_bad	count
1920	.rept	\count
1921	sir
1922	.align	128
1923	.endr
1924	.endm
1925
1926	.macro	tl1_fill_bad	count
1927	.rept	\count
1928	sir
1929	.align	128
1930	.endr
1931	.endm
1932
1933	.macro	tl1_soft	count
1934	.rept	\count
1935	tl1_gen	T_SOFT | T_KERNEL
1936	.endr
1937	.endm
1938
1939	.sect	.trap
1940	.globl	tl_trap_begin
1941tl_trap_begin:
1942	nop
1943
1944	.align	0x8000
1945	.globl	tl0_base
1946
1947tl0_base:
1948	tl0_reserved	8				! 0x0-0x7
1949tl0_insn_excptn:
1950	tl0_insn_excptn					! 0x8
1951	tl0_reserved	1				! 0x9
1952tl0_insn_error:
1953	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
1954	tl0_reserved	5				! 0xb-0xf
1955tl0_insn_illegal:
1956	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
1957tl0_priv_opcode:
1958	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
1959	tl0_reserved	14				! 0x12-0x1f
1960tl0_fp_disabled:
1961	tl0_gen		T_FP_DISABLED			! 0x20
1962tl0_fp_ieee:
1963	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
1964tl0_fp_other:
1965	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
1966tl0_tag_ovflw:
1967	tl0_gen		T_TAG_OVERFLOW			! 0x23
1968tl0_clean_window:
1969	clean_window					! 0x24
1970tl0_divide:
1971	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
1972	tl0_reserved	7				! 0x29-0x2f
1973tl0_data_excptn:
1974	tl0_data_excptn					! 0x30
1975	tl0_reserved	1				! 0x31
1976tl0_data_error:
1977	tl0_gen		T_DATA_ERROR			! 0x32
1978	tl0_reserved	1				! 0x33
1979tl0_align:
1980	tl0_align					! 0x34
1981tl0_align_lddf:
1982	tl0_gen		T_RESERVED			! 0x35
1983tl0_align_stdf:
1984	tl0_gen		T_RESERVED			! 0x36
1985tl0_priv_action:
1986	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
1987	tl0_reserved	9				! 0x38-0x40
1988tl0_intr_level:
1989	tl0_intr_level					! 0x41-0x4f
1990	tl0_reserved	16				! 0x50-0x5f
1991tl0_intr_vector:
1992	intr_vector					! 0x60
1993tl0_watch_phys:
1994	tl0_gen		T_PA_WATCHPOINT			! 0x61
1995tl0_watch_virt:
1996	tl0_gen		T_VA_WATCHPOINT			! 0x62
1997tl0_ecc:
1998	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
1999tl0_immu_miss:
2000	tl0_immu_miss					! 0x64
2001tl0_dmmu_miss:
2002	tl0_dmmu_miss					! 0x68
2003tl0_dmmu_prot:
2004	tl0_dmmu_prot					! 0x6c
2005	tl0_reserved	16				! 0x70-0x7f
2006tl0_spill_0_n:
2007	tl0_spill_0_n					! 0x80
2008tl0_spill_1_n:
2009	tl0_spill_1_n					! 0x84
2010	tl0_spill_bad	14				! 0x88-0xbf
2011tl0_fill_0_n:
2012	tl0_fill_0_n					! 0xc0
2013tl0_fill_1_n:
2014	tl0_fill_1_n					! 0xc4
2015	tl0_fill_bad	14				! 0xc8-0xff
2016tl0_soft:
2017	tl0_gen		T_SYSCALL			! 0x100
2018	tl0_gen		T_BREAKPOINT			! 0x101
2019	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
2020	tl0_reserved	1				! 0x103
2021	tl0_gen		T_CLEAN_WINDOW			! 0x104
2022	tl0_gen		T_RANGE_CHECK			! 0x105
2023	tl0_gen		T_FIX_ALIGNMENT			! 0x106
2024	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
2025	tl0_gen		T_SYSCALL			! 0x108
2026	tl0_gen		T_SYSCALL			! 0x109
2027	tl0_fp_restore					! 0x10a
2028	tl0_reserved	5				! 0x10b-0x10f
2029	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
2030	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
2031	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
2032	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
2033	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
2034	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
2035	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
2036	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
2037	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
2038	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
2039	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
2040	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
2041	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
2042	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
2043	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
2044	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
2045	tl0_reserved	32				! 0x120-0x13f
2046	tl0_gen		T_SYSCALL			! 0x140
2047	tl0_syscall					! 0x141
2048	tl0_gen		T_SYSCALL			! 0x142
2049	tl0_gen		T_SYSCALL			! 0x143
2050	tl0_reserved	188				! 0x144-0x1ff
2051
2052tl1_base:
2053	tl1_reserved	8				! 0x200-0x207
2054tl1_insn_excptn:
2055	tl1_insn_excptn					! 0x208
2056	tl1_reserved	1				! 0x209
2057tl1_insn_error:
2058	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
2059	tl1_reserved	5				! 0x20b-0x20f
2060tl1_insn_illegal:
2061	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
2062tl1_priv_opcode:
2063	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
2064	tl1_reserved	14				! 0x212-0x21f
2065tl1_fp_disabled:
2066	tl1_fp_disabled					! 0x220
2067tl1_fp_ieee:
2068	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
2069tl1_fp_other:
2070	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
2071tl1_tag_ovflw:
2072	tl1_gen		T_TAG_OVERFLOW			! 0x223
2073tl1_clean_window:
2074	clean_window					! 0x224
2075tl1_divide:
2076	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
2077	tl1_reserved	7				! 0x229-0x22f
2078tl1_data_excptn:
2079	tl1_data_excptn					! 0x230
2080	tl1_reserved	1				! 0x231
2081tl1_data_error:
2082	tl1_gen		T_DATA_ERROR			! 0x232
2083	tl1_reserved	1				! 0x233
2084tl1_align:
2085	tl1_align					! 0x234
2086tl1_align_lddf:
2087	tl1_gen		T_RESERVED			! 0x235
2088tl1_align_stdf:
2089	tl1_gen		T_RESERVED			! 0x236
2090tl1_priv_action:
2091	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
2092	tl1_reserved	9				! 0x238-0x240
2093tl1_intr_level:
2094	tl1_intr_level					! 0x241-0x24f
2095	tl1_reserved	16				! 0x250-0x25f
2096tl1_intr_vector:
2097	intr_vector					! 0x260
2098tl1_watch_phys:
2099	tl1_gen		T_PA_WATCHPOINT			! 0x261
2100tl1_watch_virt:
2101	tl1_gen		T_VA_WATCHPOINT			! 0x262
2102tl1_ecc:
2103	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
2104tl1_immu_miss:
2105	tl1_immu_miss					! 0x264
2106tl1_dmmu_miss:
2107	tl1_dmmu_miss					! 0x268
2108tl1_dmmu_prot:
2109	tl1_dmmu_prot					! 0x26c
2110	tl1_reserved	16				! 0x270-0x27f
2111tl1_spill_0_n:
2112	tl1_spill_0_n					! 0x280
2113	tl1_spill_bad	1				! 0x284
2114tl1_spill_2_n:
2115	tl1_spill_2_n					! 0x288
2116tl1_spill_3_n:
2117	tl1_spill_3_n					! 0x28c
2118	tl1_spill_bad	3				! 0x290-0x29b
2119tl1_spill_7_n:
2120	tl1_spill_7_n					! 0x29c
2121tl1_spill_0_o:
2122	tl1_spill_0_o					! 0x2a0
2123tl1_spill_1_o:
2124	tl1_spill_1_o					! 0x2a4
2125tl1_spill_2_o:
2126	tl1_spill_2_o					! 0x2a8
2127	tl1_spill_bad	5				! 0x2ac-0x2bf
2128tl1_fill_0_n:
2129	tl1_fill_0_n					! 0x2c0
2130	tl1_fill_bad	1				! 0x2c4
2131tl1_fill_2_n:
2132	tl1_fill_2_n					! 0x2c8
2133tl1_fill_3_n:
2134	tl1_fill_3_n					! 0x2cc
2135	tl1_fill_bad	3				! 0x2d0-0x2db
2136tl1_fill_7_n:
2137	tl1_fill_7_n					! 0x2dc
2138	tl1_fill_bad	8				! 0x2e0-0x2ff
2139	tl1_reserved	1				! 0x300
2140tl1_breakpoint:
2141	tl1_gen		T_BREAKPOINT			! 0x301
2142	tl1_gen		T_RSTRWP_PHYS			! 0x302
2143	tl1_gen		T_RSTRWP_VIRT			! 0x303
2144	tl1_reserved	252				! 0x304-0x3ff
2145
2146	.globl	tl_trap_end
2147tl_trap_end:
2148	nop
2149
2150/*
2151 * User trap entry point
2152 *
2153 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2154 *     u_long sfsr)
2155 *
2156 * This handles redirecting a trap back to usermode as a user trap.  The user
2157 * program must have first registered a trap handler with the kernel using
2158 * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
2159 * for it to return to the trapping code directly, it will not return through
2160 * the kernel.  The trap type is passed in %o0, all out registers must be
2161 * passed through to tl0_trap or to usermode untouched.  Note that the
2162 * parameters passed in out registers may be used by the user trap handler.
2163 * Do not change the registers they are passed in or you will break the ABI.
2164 *
2165 * If the trap type allows user traps, setup state to execute the user trap
2166 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2167 */
2168ENTRY(tl0_utrap)
2169	/*
2170	 * Check if the trap type allows user traps.
2171	 */
2172	cmp	%o0, UT_MAX
2173	bge,a,pt %xcc, tl0_trap
2174	 nop
2175
2176	/*
2177	 * Load the user trap handler from the utrap table.
2178	 */
2179	ldx	[PCPU(CURTHREAD)], %l0
2180	ldx	[%l0 + TD_PROC], %l0
2181	ldx	[%l0 + P_MD + MD_UTRAP], %l0
2182	brz,pt	%l0, tl0_trap
2183	 sllx	%o0, PTR_SHIFT, %l1
2184	ldx	[%l0 + %l1], %l0
2185	brz,a,pt %l0, tl0_trap
2186	 nop
2187
2188	/*
2189	 * If the save we did on entry to the kernel had to spill a window
2190	 * to the pcb, pretend we took a spill trap instead.  Any windows
2191	 * that are in the pcb must be copied out or the fill handler will
2192	 * not be able to find them, since the user trap handler returns
2193	 * directly to the trapping code.  Note that we only support precise
2194	 * user traps, which implies that the condition that caused the trap
2195	 * in the first place is still valid, so it will occur again when we
2196	 * re-execute the trapping instruction.
2197	 */
2198	ldx	[PCB_REG + PCB_NSAVED], %l1
2199	brnz,a,pn %l1, tl0_trap
2200	 mov	T_SPILL, %o0
2201
2202	/*
2203	 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2204	 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2205	 * it may be clobbered by an interrupt before the user trap code
2206	 * can read it, and we must pass %tstate in order to restore %ccr
2207	 * and %asi.  The %fsr must be stored to memory, so we use the
2208	 * temporary stack for that.
2209	 */
2210	rd	%fprs, %l1
2211	or	%l1, FPRS_FEF, %l2
2212	wr	%l2, 0, %fprs
2213	dec	8, ASP_REG
2214	stx	%fsr, [ASP_REG]
2215	ldx	[ASP_REG], %l4
2216	inc	8, ASP_REG
2217	wr	%l1, 0, %fprs
2218
2219	rdpr	%tstate, %l5
2220	rdpr	%tpc, %l6
2221	rdpr	%tnpc, %l7
2222
2223	/*
2224	 * Setup %tnpc to return to.
2225	 */
2226	wrpr	%l0, 0, %tnpc
2227
2228	/*
2229	 * Setup %wstate for return, clear WSTATE_TRANSITION.
2230	 */
2231	rdpr	%wstate, %l1
2232	and	%l1, WSTATE_NORMAL_MASK, %l1
2233	wrpr	%l1, 0, %wstate
2234
2235	/*
2236	 * Setup %tstate for return, change the saved cwp to point to the
2237	 * current window instead of the window at the time of the trap.
2238	 */
2239	andn	%l5, TSTATE_CWP_MASK, %l1
2240	rdpr	%cwp, %l2
2241	wrpr	%l1, %l2, %tstate
2242
2243	/*
2244	 * Setup %sp.  Userland processes will crash if this is not setup.
2245	 */
2246	sub	%fp, CCFSZ, %sp
2247
2248	/*
2249	 * Execute the user trap handler.
2250	 */
2251	done
2252END(tl0_utrap)
2253
2254/*
2255 * (Real) User trap entry point
2256 *
2257 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2258 *     u_int sfsr)
2259 *
2260 * The following setup has been performed:
2261 *	- the windows have been split and the active user window has been saved
2262 *	  (maybe just to the pcb)
2263 *	- we are on alternate globals and interrupts are disabled
2264 *
2265 * We switch to the kernel stack, build a trapframe, switch to normal
2266 * globals, enable interrupts and call trap.
2267 *
2268 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
2269 * it has been pre-set in alternate globals, so we read it from there and setup
2270 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
2271 * of cpu migration and using the wrong pcpup.
2272 */
2273ENTRY(tl0_trap)
2274	/*
2275	 * Force kernel store order.
2276	 */
2277	wrpr	%g0, PSTATE_ALT, %pstate
2278
2279	rdpr	%tstate, %l0
2280	rdpr	%tpc, %l1
2281	rdpr	%tnpc, %l2
2282	rd	%y, %l3
2283	rd	%fprs, %l4
2284	rdpr	%wstate, %l5
2285
2286#if KTR_COMPILE & KTR_TRAP
2287	CATR(KTR_TRAP,
2288	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2289	    , %g1, %g2, %g3, 7, 8, 9)
2290	ldx	[PCPU(CURTHREAD)], %g2
2291	stx	%g2, [%g1 + KTR_PARM1]
2292	stx	%o0, [%g1 + KTR_PARM2]
2293	rdpr	%pil, %g2
2294	stx	%g2, [%g1 + KTR_PARM3]
2295	stx	%l1, [%g1 + KTR_PARM4]
2296	stx	%l2, [%g1 + KTR_PARM5]
2297	stx	%i6, [%g1 + KTR_PARM6]
22989:
2299#endif
2300
23011:	and	%l5, WSTATE_NORMAL_MASK, %l5
2302	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2303	wrpr	%l5, WSTATE_KERNEL, %wstate
2304	rdpr	%canrestore, %l6
2305	wrpr	%l6, 0, %otherwin
2306	wrpr	%g0, 0, %canrestore
2307
2308	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2309
2310	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2311	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2312	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2313	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2314	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2315
2316	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2317	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2318	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2319	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2320	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2321	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2322
2323	wr	%g0, FPRS_FEF, %fprs
2324	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2325	rd	%gsr, %l6
2326	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2327	wr	%g0, 0, %fprs
2328
2329	mov	PCB_REG, %l0
2330	mov	PCPU_REG, %l1
2331	wrpr	%g0, PSTATE_NORMAL, %pstate
2332
2333	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2334	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2335
2336	mov	%l0, PCB_REG
2337	mov	%l1, PCPU_REG
2338	wrpr	%g0, PSTATE_KERNEL, %pstate
2339
2340	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2341	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2342	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2343	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2344	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2345	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2346	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2347	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2348
2349	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2350	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2351	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2352	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2353	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2354
2355	set	tl0_ret - 8, %o7
2356	jmpl	%o2, %g0
2357	 add	%sp, CCFSZ + SPOFF, %o0
2358END(tl0_trap)
2359
2360/*
2361 * void tl0_intr(u_int level, u_int mask)
2362 */
2363ENTRY(tl0_intr)
2364	/*
2365	 * Force kernel store order.
2366	 */
2367	wrpr	%g0, PSTATE_ALT, %pstate
2368
2369	rdpr	%tstate, %l0
2370	rdpr	%tpc, %l1
2371	rdpr	%tnpc, %l2
2372	rd	%y, %l3
2373	rd	%fprs, %l4
2374	rdpr	%wstate, %l5
2375
2376#if KTR_COMPILE & KTR_INTR
2377	CATR(KTR_INTR,
2378	    "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2379	    , %g1, %g2, %g3, 7, 8, 9)
2380	ldx	[PCPU(CURTHREAD)], %g2
2381	stx	%g2, [%g1 + KTR_PARM1]
2382	stx	%o0, [%g1 + KTR_PARM2]
2383	rdpr	%pil, %g2
2384	stx	%g2, [%g1 + KTR_PARM3]
2385	stx	%l1, [%g1 + KTR_PARM4]
2386	stx	%l2, [%g1 + KTR_PARM5]
2387	stx	%i6, [%g1 + KTR_PARM6]
23889:
2389#endif
2390
2391	wrpr	%o0, 0, %pil
2392	wr	%o1, 0, %clear_softint
2393
2394	and	%l5, WSTATE_NORMAL_MASK, %l5
2395	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2396	wrpr	%l5, WSTATE_KERNEL, %wstate
2397	rdpr	%canrestore, %l6
2398	wrpr	%l6, 0, %otherwin
2399	wrpr	%g0, 0, %canrestore
2400
2401	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2402
2403	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2404	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2405	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2406	stx	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2407	stx	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2408	stx	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2409
2410	wr	%g0, FPRS_FEF, %fprs
2411	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2412	rd	%gsr, %l6
2413	stx	%l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2414	wr	%g0, 0, %fprs
2415
2416	mov	%o0, %l3
2417	mov	T_INTERRUPT, %o1
2418
2419	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2420	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2421
2422	mov	PCB_REG, %l0
2423	mov	PCPU_REG, %l1
2424	wrpr	%g0, PSTATE_NORMAL, %pstate
2425
2426	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2427	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2428	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2429	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2430	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2431	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2432	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2433
2434	mov	%l0, PCB_REG
2435	mov	%l1, PCPU_REG
2436	wrpr	%g0, PSTATE_KERNEL, %pstate
2437
2438	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2439	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2440	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2441	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2442	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2443	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2444	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2445	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2446
2447	SET(intr_handlers, %l1, %l0)
2448	sllx	%l3, IH_SHIFT, %l1
2449	ldx	[%l0 + %l1], %l1
2450	KASSERT(%l1, "tl0_intr: ih null")
2451	call	%l1
2452	 add	%sp, CCFSZ + SPOFF, %o0
2453
2454	/* %l3 contains PIL */
2455	SET(intrcnt, %l1, %l2)
2456	prefetcha [%l2] ASI_N, 1
2457	SET(pil_countp, %l1, %l0)
2458	sllx	%l3, 1, %l1
2459	lduh	[%l0 + %l1], %l0
2460	sllx	%l0, 3, %l0
2461	add	%l0, %l2, %l0
2462	ldx	[%l0], %l1
2463	inc	%l1
2464	stx	%l1, [%l0]
2465
2466	lduw	[PCPU(CNT) + V_INTR], %l0
2467	inc	%l0
2468	stw	%l0, [PCPU(CNT) + V_INTR]
2469
2470	ba,a	%xcc, tl0_ret
2471	 nop
2472END(tl0_intr)
2473
2474/*
2475 * Initiate return to usermode.
2476 *
2477 * Called with a trapframe on the stack.  The window that was setup in
2478 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2479 * leaf functions, so all ins and locals may have been clobbered since
2480 * then.
2481 *
2482 * This code is rather long and complicated.
2483 */
2484ENTRY(tl0_ret)
2485	/*
2486	 * Check for pending asts atomically with returning.  We must raise
2487	 * the PIL before checking, and if no asts are found the PIL must
2488	 * remain raised until the retry is executed, or we risk missing asts
2489	 * caused by interrupts occurring after the test.  If the PIL is
2490	 * lowered, as it is when we call ast, the check must be re-executed.
2491	 */
2492	wrpr	%g0, PIL_TICK, %pil
2493	ldx	[PCPU(CURTHREAD)], %l0
2494	lduw	[%l0 + TD_FLAGS], %l1
2495	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2496	and	%l1, %l2, %l1
2497	brz,a,pt %l1, 1f
2498	 nop
2499
2500	/*
2501	 * We have an AST.  Re-enable interrupts and handle it, then restart
2502	 * the return sequence.
2503	 */
2504	wrpr	%g0, 0, %pil
2505	call	ast
2506	 add	%sp, CCFSZ + SPOFF, %o0
2507	ba,a	%xcc, tl0_ret
2508	 nop
2509
2510	/*
2511	 * Check for windows that were spilled to the pcb and need to be
2512	 * copied out.  This must be the last thing that is done before the
2513	 * return to usermode.  If there are still user windows in the cpu
2514	 * and we call a nested function after this, which causes them to be
2515	 * spilled to the pcb, they will not be copied out and the stack will
2516	 * be inconsistent.
2517	 */
25181:	ldx	[PCB_REG + PCB_NSAVED], %l1
2519	brz,a,pt %l1, 2f
2520	 nop
2521	wrpr	%g0, 0, %pil
2522	mov	T_SPILL, %o0
2523	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2524	call	trap
2525	 add	%sp, SPOFF + CCFSZ, %o0
2526	ba,a	%xcc, tl0_ret
2527	 nop
2528
2529	/*
2530	 * Restore the out and most global registers from the trapframe.
2531	 * The ins will become the outs when we restore below.
2532	 */
25332:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2534	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2535	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2536	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2537	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2538	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2539	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2540	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2541
2542	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2543	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2544	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2545	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2546	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2547
2548	/*
2549	 * Load everything we need to restore below before disabling
2550	 * interrupts.
2551	 */
2552	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2553	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
2554	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2555	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
2556	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2557	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
2558	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2559
2560	/*
2561	 * Disable interrupts to restore the special globals.  They are not
2562	 * saved and restored for all kernel traps, so an interrupt at the
2563	 * wrong time would clobber them.
2564	 */
2565	wrpr	%g0, PSTATE_NORMAL, %pstate
2566
2567	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2568	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2569
2570	/*
2571	 * Switch to alternate globals.  This frees up some registers we
2572	 * can use after the restore changes our window.
2573	 */
2574	wrpr	%g0, PSTATE_ALT, %pstate
2575
2576	/*
2577	 * Drop %pil to zero.  It must have been zero at the time of the
2578	 * trap, since we were in usermode, but it was raised above in
2579	 * order to check for asts atomically.  We have interrupts disabled
2580	 * so any interrupts will not be serviced until we complete the
2581	 * return to usermode.
2582	 */
2583	wrpr	%g0, 0, %pil
2584
2585	/*
2586	 * Save %fprs in an alternate global so it can be restored after the
2587	 * restore instruction below.  If we restore it before the restore,
2588	 * and the restore traps we may run for a while with floating point
2589	 * enabled in the kernel, which we want to avoid.
2590	 */
2591	mov	%l0, %g1
2592
2593	/*
2594	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
2595	 * so we set it temporarily and then clear it.
2596	 */
2597	wr	%g0, FPRS_FEF, %fprs
2598	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2599	wr	%l1, 0, %gsr
2600	wr	%g0, 0, %fprs
2601
2602	/*
2603	 * Restore program counters.  This could be done after the restore
2604	 * but we're out of alternate globals to store them in...
2605	 */
2606	wrpr	%l2, 0, %tnpc
2607	wrpr	%l3, 0, %tpc
2608
2609	/*
2610	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
2611	 * will be affected by the restore below and we need to make sure it
2612	 * points to the current window at that time, not the window that was
2613	 * active at the time of the trap.
2614	 */
2615	andn	%l4, TSTATE_CWP_MASK, %g2
2616
2617	/*
2618	 * Save %y in an alternate global.
2619	 */
2620	mov	%l5, %g4
2621
2622	/*
2623	 * Setup %wstate for return.  We need to restore the user window state
2624	 * which we saved in wstate.other when we trapped.  We also need to
2625	 * set the transition bit so the restore will be handled specially
2626	 * if it traps, use the xor feature of wrpr to do that.
2627	 */
2628	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
2629	wrpr	%g3, WSTATE_TRANSITION, %wstate
2630
2631	/*
2632	 * Setup window management registers for return.  If not all user
2633	 * windows were spilled in the kernel %otherwin will be non-zero,
2634	 * so we need to transfer it to %canrestore to correctly restore
2635	 * those windows.  Otherwise everything gets set to zero and the
2636	 * restore below will fill a window directly from the user stack.
2637	 */
2638	rdpr	%otherwin, %o0
2639	wrpr	%o0, 0, %canrestore
2640	wrpr	%g0, 0, %otherwin
2641	wrpr	%o0, 0, %cleanwin
2642
2643	/*
2644	 * Now do the restore.  If this instruction causes a fill trap which
2645	 * fails to fill a window from the user stack, we will resume at
2646	 * tl0_ret_fill_end and call back into the kernel.
2647	 */
2648	restore
2649tl0_ret_fill:
2650
2651	/*
2652	 * We made it.  We're back in the window that was active at the time
2653	 * of the trap, and ready to return to usermode.
2654	 */
2655
2656	/*
2657	 * Restore %frps.  This was saved in an alternate global above.
2658	 */
2659	wr	%g1, 0, %fprs
2660
2661	/*
2662	 * Fixup %tstate so the saved %cwp points to the current window and
2663	 * restore it.
2664	 */
2665	rdpr	%cwp, %g1
2666	wrpr	%g2, %g1, %tstate
2667
2668	/*
2669	 * Restore the user window state.  The transition bit was set above
2670	 * for special handling of the restore, this clears it.
2671	 */
2672	wrpr	%g3, 0, %wstate
2673
2674#if KTR_COMPILE & KTR_TRAP
2675	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2676	    , %g1, %g2, %g3, 7, 8, 9)
2677	ldx	[PCPU(CURTHREAD)], %g2
2678	stx	%g2, [%g1 + KTR_PARM1]
2679	rdpr	%pil, %g2
2680	stx	%g2, [%g1 + KTR_PARM2]
2681	rdpr	%tpc, %g2
2682	stx	%g2, [%g1 + KTR_PARM3]
2683	rdpr	%tnpc, %g2
2684	stx	%g2, [%g1 + KTR_PARM4]
2685	stx	%sp, [%g1 + KTR_PARM5]
26869:
2687#endif
2688
2689	/*
2690	 * Restore %y.  Note that the CATR above clobbered it.
2691	 */
2692	wr	%g4, 0, %y
2693
2694	/*
2695	 * Return to usermode.
2696	 */
2697	retry
2698tl0_ret_fill_end:
2699
2700#if KTR_COMPILE & KTR_TRAP
2701	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2702	    , %l0, %l1, %l2, 7, 8, 9)
2703	rdpr	%pstate, %l1
2704	stx	%l1, [%l0 + KTR_PARM1]
2705	stx	%l5, [%l0 + KTR_PARM2]
2706	stx	%sp, [%l0 + KTR_PARM3]
27079:
2708
2709	/*
2710	 * Restore %y clobbered by the CATR.  This was saved in %l5 above.
2711	 */
2712	wr	%l5, 0, %y
2713#endif
2714
2715	/*
2716	 * The restore above caused a fill trap and the fill handler was
2717	 * unable to fill a window from the user stack.  The special fill
2718	 * handler recognized this and punted, sending us here.  We need
2719	 * to carefully undo any state that was restored before the restore
2720	 * was executed and call trap again.  Trap will copyin a window
2721	 * from the user stack which will fault in the page we need so the
2722	 * restore above will succeed when we try again.  If this fails
2723	 * the process has trashed its stack, so we kill it.
2724	 */
2725
2726	/*
2727	 * Restore the kernel window state.  This was saved in %l6 above, and
2728	 * since the restore failed we're back in the same window.
2729	 */
2730	wrpr	%l6, 0, %wstate
2731
2732	/*
2733	 * Restore the normal globals which have predefined values in the
2734	 * kernel.  We clobbered them above restoring the user's globals
2735	 * so this is very important.
2736	 * XXX PSTATE_ALT must already be set.
2737	 */
2738	wrpr	%g0, PSTATE_ALT, %pstate
2739	mov	PCB_REG, %o0
2740	mov	PCPU_REG, %o1
2741	wrpr	%g0, PSTATE_NORMAL, %pstate
2742	mov	%o0, PCB_REG
2743	mov	%o1, PCPU_REG
2744	wrpr	%g0, PSTATE_KERNEL, %pstate
2745
2746	/*
2747	 * Simulate a fill trap and then start the whole return sequence over
2748	 * again.  This is special because it only copies in 1 window, not 2
2749	 * as we would for a normal failed fill.  This may be the first time
2750	 * the process has been run, so there may not be 2 windows worth of
2751	 * stack to copyin.
2752	 */
2753	mov	T_FILL_RET, %o0
2754	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2755	call	trap
2756	 add	%sp, SPOFF + CCFSZ, %o0
2757	ba,a	%xcc, tl0_ret
2758	 nop
2759END(tl0_ret)
2760
2761/*
2762 * Kernel trap entry point
2763 *
2764 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2765 *     u_int sfsr)
2766 *
2767 * This is easy because the stack is already setup and the windows don't need
2768 * to be split.  We build a trapframe and call trap(), the same as above, but
2769 * the outs don't need to be saved.
2770 */
2771ENTRY(tl1_trap)
2772	rdpr	%tstate, %l0
2773	rdpr	%tpc, %l1
2774	rdpr	%tnpc, %l2
2775	rdpr	%pil, %l3
2776	rd	%y, %l4
2777	rdpr	%wstate, %l5
2778
2779#if KTR_COMPILE & KTR_TRAP
2780	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2781	    , %g1, %g2, %g3, 7, 8, 9)
2782	ldx	[PCPU(CURTHREAD)], %g2
2783	stx	%g2, [%g1 + KTR_PARM1]
2784	stx	%o0, [%g1 + KTR_PARM2]
2785	stx	%l3, [%g1 + KTR_PARM3]
2786	stx	%l1, [%g1 + KTR_PARM4]
2787	stx	%i6, [%g1 + KTR_PARM5]
27889:
2789#endif
2790
2791	wrpr	%g0, 1, %tl
2792
2793	and	%l5, WSTATE_OTHER_MASK, %l5
2794	wrpr	%l5, WSTATE_KERNEL, %wstate
2795
2796	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2797	stx	%o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2798	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2799	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2800	stx	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2801
2802	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2803	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2804	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2805	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2806	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2807
2808	mov	PCB_REG, %l0
2809	mov	PCPU_REG, %l1
2810	wrpr	%g0, PSTATE_NORMAL, %pstate
2811
2812	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2813	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2814
2815	mov	%l0, PCB_REG
2816	mov	%l1, PCPU_REG
2817	wrpr	%g0, PSTATE_KERNEL, %pstate
2818
2819	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2820	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2821	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2822	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2823	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2824	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2825	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2826	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2827
2828	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2829	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2830	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2831	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2832	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2833
2834	set	tl1_ret - 8, %o7
2835	jmpl	%o2, %g0
2836	 add	%sp, CCFSZ + SPOFF, %o0
2837END(tl1_trap)
2838
2839ENTRY(tl1_ret)
2840	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2841	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2842	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2843	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2844	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2845	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2846	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2847	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2848
2849	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2850	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2851	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2852	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2853	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2854
2855	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2856	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2857	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2858	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2859	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2860
2861	set	VM_MIN_PROM_ADDRESS, %l5
2862	cmp	%l1, %l5
2863	bl,a,pt	%xcc, 1f
2864	 nop
2865	set	VM_MAX_PROM_ADDRESS, %l5
2866	cmp	%l1, %l5
2867	bg,a,pt	%xcc, 1f
2868	 nop
2869
2870	wrpr	%g0, PSTATE_NORMAL, %pstate
2871
2872	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2873	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2874
28751:	wrpr	%g0, PSTATE_ALT, %pstate
2876
2877	andn	%l0, TSTATE_CWP_MASK, %g1
2878	mov	%l1, %g2
2879	mov	%l2, %g3
2880	mov	%l4, %g4
2881
2882	wrpr	%l3, 0, %pil
2883
2884	restore
2885
2886	wrpr	%g0, 2, %tl
2887
2888	wrpr	%g2, 0, %tpc
2889	wrpr	%g3, 0, %tnpc
2890	rdpr	%cwp, %g2
2891	wrpr	%g1, %g2, %tstate
2892
2893#if KTR_COMPILE & KTR_TRAP
2894	CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2895	    , %g1, %g2, %g3, 7, 8, 9)
2896	ldx	[PCPU(CURTHREAD)], %g2
2897	stx	%g2, [%g1 + KTR_PARM1]
2898	rdpr	%pil, %g2
2899	stx	%g2, [%g1 + KTR_PARM2]
2900	rdpr	%tstate, %g2
2901	stx	%g2, [%g1 + KTR_PARM3]
2902	rdpr	%tpc, %g2
2903	stx	%g2, [%g1 + KTR_PARM4]
2904	stx	%sp, [%g1 + KTR_PARM5]
29059:
2906#endif
2907
2908	wr	%g4, 0, %y
2909
2910	retry
2911END(tl1_ret)
2912
2913/*
2914 * void tl1_intr(u_int level, u_int mask)
2915 */
2916ENTRY(tl1_intr)
2917	rdpr	%tstate, %l0
2918	rdpr	%tpc, %l1
2919	rdpr	%tnpc, %l2
2920	rdpr	%pil, %l3
2921	rd	%y, %l4
2922	rdpr	%wstate, %l5
2923
2924#if KTR_COMPILE & KTR_INTR
2925	CATR(KTR_INTR,
2926	    "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2927	    , %g1, %g2, %g3, 7, 8, 9)
2928	ldx	[PCPU(CURTHREAD)], %g2
2929	stx	%g2, [%g1 + KTR_PARM1]
2930	stx	%o0, [%g1 + KTR_PARM2]
2931	stx	%l3, [%g1 + KTR_PARM3]
2932	stx	%l1, [%g1 + KTR_PARM4]
2933	stx	%i6, [%g1 + KTR_PARM5]
29349:
2935#endif
2936
2937	wrpr	%o0, 0, %pil
2938	wr	%o1, 0, %clear_softint
2939
2940	wrpr	%g0, 1, %tl
2941
2942	and	%l5, WSTATE_OTHER_MASK, %l5
2943	wrpr	%l5, WSTATE_KERNEL, %wstate
2944
2945	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2946	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2947	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2948	stx	%l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2949	stx	%l4, [%sp + SPOFF + CCFSZ + TF_Y]
2950
2951	mov	%o0, %l7
2952	mov	T_INTERRUPT | T_KERNEL, %o1
2953
2954	stx	%o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2955	stx	%o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2956
2957	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2958	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2959
2960	mov	PCB_REG, %l4
2961	mov	PCPU_REG, %l5
2962	wrpr	%g0, PSTATE_NORMAL, %pstate
2963
2964	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2965	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2966	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2967	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2968	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2969
2970	mov	%l4, PCB_REG
2971	mov	%l5, PCPU_REG
2972	wrpr	%g0, PSTATE_KERNEL, %pstate
2973
2974	SET(intr_handlers, %l5, %l4)
2975	sllx	%l7, IH_SHIFT, %l5
2976	ldx	[%l4 + %l5], %l5
2977	KASSERT(%l5, "tl1_intr: ih null")
2978	call	%l5
2979	 add	%sp, CCFSZ + SPOFF, %o0
2980
2981	/* %l7 contains PIL */
2982	SET(intrcnt, %l5, %l4)
2983	prefetcha [%l4] ASI_N, 1
2984	SET(pil_countp, %l5, %l6)
2985	sllx	%l7, 1, %l5
2986	lduh	[%l5 + %l6], %l5
2987	sllx	%l5, 3, %l5
2988	add	%l5, %l4, %l4
2989	ldx	[%l4], %l5
2990	inc	%l5
2991	stx	%l5, [%l4]
2992
2993	lduw	[PCPU(CNT) + V_INTR], %l4
2994	inc	%l4
2995	stw	%l4, [PCPU(CNT) + V_INTR]
2996
2997	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l4
2998
2999	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
3000	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
3001	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
3002	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
3003	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
3004
3005	wrpr	%g0, PSTATE_ALT, %pstate
3006
3007	andn	%l0, TSTATE_CWP_MASK, %g1
3008	mov	%l1, %g2
3009	mov	%l2, %g3
3010	mov	%l4, %g4
3011	wrpr	%l3, 0, %pil
3012
3013	restore
3014
3015	wrpr	%g0, 2, %tl
3016
3017	wrpr	%g2, 0, %tpc
3018	wrpr	%g3, 0, %tnpc
3019	rdpr	%cwp, %g2
3020	wrpr	%g1, %g2, %tstate
3021
3022#if KTR_COMPILE & KTR_INTR
3023	CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3024	    , %g1, %g2, %g3, 7, 8, 9)
3025	ldx	[PCPU(CURTHREAD)], %g2
3026	stx	%g2, [%g1 + KTR_PARM1]
3027	rdpr	%pil, %g2
3028	stx	%g2, [%g1 + KTR_PARM2]
3029	rdpr	%tstate, %g2
3030	stx	%g2, [%g1 + KTR_PARM3]
3031	rdpr	%tpc, %g2
3032	stx	%g2, [%g1 + KTR_PARM4]
3033	stx	%sp, [%g1 + KTR_PARM5]
30349:
3035#endif
3036
3037	wr	%g4, 0, %y
3038
3039	retry
3040END(tl1_intr)
3041
3042	.globl	tl_text_end
3043tl_text_end:
3044	nop
3045
3046/*
3047 * Freshly forked processes come here when switched to for the first time.
3048 * The arguments to fork_exit() have been setup in the locals, we must move
3049 * them to the outs.
3050 */
3051ENTRY(fork_trampoline)
3052#if KTR_COMPILE & KTR_PROC
3053	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
3054	    , %g1, %g2, %g3, 7, 8, 9)
3055	ldx	[PCPU(CURTHREAD)], %g2
3056	stx	%g2, [%g1 + KTR_PARM1]
3057	ldx	[%g2 + TD_PROC], %g2
3058	add	%g2, P_COMM, %g2
3059	stx	%g2, [%g1 + KTR_PARM2]
3060	rdpr	%cwp, %g2
3061	stx	%g2, [%g1 + KTR_PARM3]
30629:
3063#endif
3064	mov	%l0, %o0
3065	mov	%l1, %o1
3066	call	fork_exit
3067	 mov	%l2, %o2
3068	ba,a	%xcc, tl0_ret
3069	 nop
3070END(fork_trampoline)
3071