exception.S revision 88781
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * $FreeBSD: head/sys/sparc64/sparc64/exception.S 88781 2002-01-01 20:26:46Z jake $
56 */
57
58#include "opt_ddb.h"
59
60#include <machine/asi.h>
61#include <machine/asmacros.h>
62#include <machine/ktr.h>
63#include <machine/pstate.h>
64#include <machine/trap.h>
65#include <machine/tstate.h>
66#include <machine/wstate.h>
67
68#include "assym.s"
69
70	.register %g2,#ignore
71	.register %g3,#ignore
72	.register %g6,#ignore
73	.register %g7,#ignore
74
75/*
76 * Atomically increment an integer in memory.
77 */
78#define	ATOMIC_INC_INT(r1, r2, r3) \
79	lduw	[r1], r2 ; \
809:	add	r2, 1, r3 ; \
81	casa	[r1] ASI_N, r2, r3 ; \
82	cmp	r2, r3 ; \
83	bne,pn	%xcc, 9b ; \
84	 mov	r3, r2
85
86/*
87 * Atomically set the reference bit in a tte.
88 */
89#define	TTE_SET_BIT(r1, r2, r3, bit) \
90	add	r1, TTE_DATA, r1 ; \
91	ldx	[r1], r2 ; \
929:	or	r2, bit, r3 ; \
93	casxa	[r1] ASI_N, r2, r3 ; \
94	cmp	r2, r3 ; \
95	bne,pn	%xcc, 9b ; \
96	 mov	r3, r2
97
98#define	TTE_SET_REF(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_REF)
99#define	TTE_SET_W(r1, r2, r3)		TTE_SET_BIT(r1, r2, r3, TD_W)
100
101/*
102 * Macros for spilling and filling live windows.
103 *
104 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
105 * handler will not use more than 24 instructions total, to leave room for
106 * resume vectors which occupy the last 8 instructions.
107 */
108
109#define	SPILL(storer, base, size, asi) \
110	storer	%l0, [base + (0 * size)] asi ; \
111	storer	%l1, [base + (1 * size)] asi ; \
112	storer	%l2, [base + (2 * size)] asi ; \
113	storer	%l3, [base + (3 * size)] asi ; \
114	storer	%l4, [base + (4 * size)] asi ; \
115	storer	%l5, [base + (5 * size)] asi ; \
116	storer	%l6, [base + (6 * size)] asi ; \
117	storer	%l7, [base + (7 * size)] asi ; \
118	storer	%i0, [base + (8 * size)] asi ; \
119	storer	%i1, [base + (9 * size)] asi ; \
120	storer	%i2, [base + (10 * size)] asi ; \
121	storer	%i3, [base + (11 * size)] asi ; \
122	storer	%i4, [base + (12 * size)] asi ; \
123	storer	%i5, [base + (13 * size)] asi ; \
124	storer	%i6, [base + (14 * size)] asi ; \
125	storer	%i7, [base + (15 * size)] asi
126
127#define	FILL(loader, base, size, asi) \
128	loader	[base + (0 * size)] asi, %l0 ; \
129	loader	[base + (1 * size)] asi, %l1 ; \
130	loader	[base + (2 * size)] asi, %l2 ; \
131	loader	[base + (3 * size)] asi, %l3 ; \
132	loader	[base + (4 * size)] asi, %l4 ; \
133	loader	[base + (5 * size)] asi, %l5 ; \
134	loader	[base + (6 * size)] asi, %l6 ; \
135	loader	[base + (7 * size)] asi, %l7 ; \
136	loader	[base + (8 * size)] asi, %i0 ; \
137	loader	[base + (9 * size)] asi, %i1 ; \
138	loader	[base + (10 * size)] asi, %i2 ; \
139	loader	[base + (11 * size)] asi, %i3 ; \
140	loader	[base + (12 * size)] asi, %i4 ; \
141	loader	[base + (13 * size)] asi, %i5 ; \
142	loader	[base + (14 * size)] asi, %i6 ; \
143	loader	[base + (15 * size)] asi, %i7
144
145#define	ERRATUM50(reg)	mov reg, reg
146
147#define	KSTACK_SLOP	1024
148
149#define	KSTACK_CHECK \
150	dec	16, ASP_REG ; \
151	stx	%g1, [ASP_REG + 0] ; \
152	stx	%g2, [ASP_REG + 8] ; \
153	add	%sp, SPOFF, %g1 ; \
154	andcc	%g1, (1 << PTR_SHIFT) - 1, %g0 ; \
155	bnz,a	%xcc, tl1_kstack_fault ; \
156	 inc	16, ASP_REG ; \
157	ldx	[PCPU(CURTHREAD)], %g2 ; \
158	ldx	[%g2 + TD_KSTACK], %g2 ; \
159	add	%g2, KSTACK_SLOP, %g2 ; \
160	subcc	%g1, %g2, %g1 ; \
161	ble,a	%xcc, tl1_kstack_fault ; \
162	 inc	16, ASP_REG ; \
163	set	KSTACK_PAGES * PAGE_SIZE, %g2 ; \
164	cmp	%g1, %g2 ; \
165	bgt,a	%xcc, tl1_kstack_fault ; \
166	 inc	16, ASP_REG ; \
167	ldx	[ASP_REG + 8], %g2 ; \
168	ldx	[ASP_REG + 0], %g1 ; \
169	inc	16, ASP_REG
170
171ENTRY(tl1_kstack_fault)
172	rdpr	%tl, %g1
173	cmp	%g1, 3
174	beq	%xcc, 1f
175	 nop
176	blt	%xcc, 2f
177	 nop
178	sir
179
1801:
181#if KTR_COMPILE & KTR_TRAP
182	CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
183	    , %g1, %g2, %g3, 7, 8, 9)
184	rdpr	%tl, %g2
185	stx	%g2, [%g1 + KTR_PARM1]
186	rdpr	%tpc, %g2
187	stx	%g2, [%g1 + KTR_PARM1]
188	rdpr	%tnpc, %g2
189	stx	%g2, [%g1 + KTR_PARM1]
1909:
191#endif
192	wrpr	%g0, 2, %tl
193
1942:
195#if KTR_COMPILE & KTR_TRAP
196	CATR(KTR_TRAP,
197	    "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
198	    , %g1, %g2, %g3, 7, 8, 9)
199	add	%sp, SPOFF, %g2
200	stx	%g2, [%g1 + KTR_PARM1]
201	ldx	[PCPU(CURTHREAD)], %g2
202	ldx	[%g2 + TD_KSTACK], %g2
203	stx	%g2, [%g1 + KTR_PARM2]
204	rdpr	%canrestore, %g2
205	stx	%g2, [%g1 + KTR_PARM3]
206	rdpr	%cansave, %g2
207	stx	%g2, [%g1 + KTR_PARM4]
208	rdpr	%otherwin, %g2
209	stx	%g2, [%g1 + KTR_PARM5]
210	rdpr	%wstate, %g2
211	stx	%g2, [%g1 + KTR_PARM6]
2129:
213#endif
214
215	wrpr	%g0, 0, %canrestore
216	wrpr	%g0, 6, %cansave
217	wrpr	%g0, 0, %otherwin
218	wrpr	%g0, WSTATE_KERNEL, %wstate
219
220	SET(panic_stack + PANIC_STACK_PAGES * PAGE_SIZE, %g2, %g1)
221	sub	%g1, SPOFF + CCFSZ, %sp
222	clr	%fp
223
224	rdpr	%pil, %o1
225	b	%xcc, tl1_trap
226	 mov	T_KSTACK_FAULT | T_KERNEL, %o0
227END(tl1_kstack_fault)
228
229/*
230 * Magic to resume from a spill or fill trap.  If we get an alignment or an
231 * mmu fault during a spill or a fill, this macro will detect the fault and
232 * resume at a set instruction offset in the trap handler.
233 *
234 * To check if the previous trap was a spill/fill we convert the trapped pc
235 * to a trap type and verify that it is in the range of spill/fill vectors.
236 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
237 * tl bit allows us to detect both ranges with one test.
238 *
239 * This is:
240 *	0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
241 *
242 * To calculate the new pc we take advantage of the xor feature of wrpr.
243 * Forcing all the low bits of the trapped pc on we can produce any offset
244 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
245 *
246 *	0x7f ^ 0x1f == 0x60
247 *	0x1f == (0x80 - 0x60) - 1
248 *
249 * Which are the offset and xor value used to resume from alignment faults.
250 */
251
252/*
253 * Determine if we have trapped inside of a spill/fill vector, and if so resume
254 * at a fixed instruction offset in the trap vector.  Must be called on
255 * alternate globals.
256 */
257#define	RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
258	dec	16, ASP_REG ; \
259	stx	%g1, [ASP_REG + 0] ; \
260	stx	%g2, [ASP_REG + 8] ; \
261	rdpr	%tpc, %g1 ; \
262	ERRATUM50(%g1) ; \
263	rdpr	%tba, %g2 ; \
264	sub	%g1, %g2, %g2 ; \
265	srlx	%g2, 5, %g2 ; \
266	andn	%g2, 0x200, %g2 ; \
267	cmp	%g2, 0x80 ; \
268	blu,pt	%xcc, 9f ; \
269	 cmp	%g2, 0x100 ; \
270	bgeu,pt	%xcc, 9f ; \
271	 or	%g1, 0x7f, %g1 ; \
272	wrpr	%g1, xor, %tnpc ; \
273	stxa_g0_sfsr ; \
274	ldx	[ASP_REG + 8], %g2 ; \
275	ldx	[ASP_REG + 0], %g1 ; \
276	inc	16, ASP_REG ; \
277	done ; \
2789:	ldx	[ASP_REG + 8], %g2 ; \
279	ldx	[ASP_REG + 0], %g1 ; \
280	inc	16, ASP_REG
281
282/*
283 * For certain faults we need to clear the sfsr mmu register before returning.
284 */
285#define	RSF_CLR_SFSR \
286	wr	%g0, ASI_DMMU, %asi ; \
287	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
288
289#define	RSF_XOR(off)	((0x80 - off) - 1)
290
291/*
292 * Instruction offsets in spill and fill trap handlers for handling certain
293 * nested traps, and corresponding xor constants for wrpr.
294 */
295#define	RSF_OFF_ALIGN	0x60
296#define	RSF_OFF_MMU	0x70
297
298#define	RESUME_SPILLFILL_ALIGN \
299	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
300#define	RESUME_SPILLFILL_MMU \
301	RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
302#define	RESUME_SPILLFILL_MMU_CLR_SFSR \
303	RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
304
305/*
306 * Constant to add to %tnpc when taking a fill trap just before returning to
307 * user mode.
308 */
309#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
310
311/*
312 * Retry a spill or fill with a different wstate due to an alignment fault.
313 * We may just be using the wrong stack offset.
314 */
315#define	RSF_ALIGN_RETRY(ws) \
316	wrpr	%g0, (ws), %wstate ; \
317	retry ; \
318	.align	16
319
320/*
321 * Generate a T_SPILL or T_FILL trap if the window operation fails.
322 */
323#define	RSF_TRAP(type) \
324	b	%xcc, tl0_sftrap ; \
325	 mov	type, %g2 ; \
326	.align	16
327
328/*
329 * Game over if the window operation fails.
330 */
331#define	RSF_FATAL(type) \
332	b	%xcc, rsf_fatal ; \
333	 mov	type, %g2 ; \
334	.align	16
335
336/*
337 * Magic to resume from a failed fill a few instructions after the corrsponding
338 * restore.  This is used on return from the kernel to usermode.
339 */
340#define	RSF_FILL_MAGIC \
341	rdpr	%tnpc, %g1 ; \
342	add	%g1, RSF_FILL_INC, %g1 ; \
343	wrpr	%g1, 0, %tnpc ; \
344	done ; \
345	.align	16
346
347/*
348 * Spill to the pcb if a spill to the user stack in kernel mode fails.
349 */
350#define	RSF_SPILL_TOPCB \
351	b,a	%xcc, tl1_spill_topcb ; \
352	 nop ; \
353	.align	16
354
355ENTRY(rsf_fatal)
356#if KTR_COMPILE & KTR_TRAP
357	CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
358	    , %g1, %g3, %g4, 7, 8, 9)
359	rdpr	%tt, %g3
360	stx	%g3, [%g1 + KTR_PARM1]
361	stx	%g2, [%g1 + KTR_PARM2]
3629:
363#endif
364
365	KSTACK_CHECK
366
367	sir
368END(rsf_fatal)
369
370	.comm	intrnames, NIV * 8
371	.comm	eintrnames, 0
372
373	.comm	intrcnt, NIV * 8
374	.comm	eintrcnt, 0
375
376/*
377 * Trap table and associated macros
378 *
379 * Due to its size a trap table is an inherently hard thing to represent in
380 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
381 * instructions each, many of which are identical.  The way that this is
382 * layed out is the instructions (8 or 32) for the actual trap vector appear
383 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
384 * but if not supporting code can be placed just after the definition of the
385 * macro.  The macros are then instantiated in a different section (.trap),
386 * which is setup to be placed by the linker at the beginning of .text, and the
387 * code around the macros is moved to the end of trap table.  In this way the
388 * code that must be sequential in memory can be split up, and located near
389 * its supporting code so that it is easier to follow.
390 */
391
392	/*
393	 * Clean window traps occur when %cleanwin is zero to ensure that data
394	 * is not leaked between address spaces in registers.
395	 */
396	.macro	clean_window
397	clr	%o0
398	clr	%o1
399	clr	%o2
400	clr	%o3
401	clr	%o4
402	clr	%o5
403	clr	%o6
404	clr	%o7
405	clr	%l0
406	clr	%l1
407	clr	%l2
408	clr	%l3
409	clr	%l4
410	clr	%l5
411	clr	%l6
412	rdpr	%cleanwin, %l7
413	inc	%l7
414	wrpr	%l7, 0, %cleanwin
415	clr	%l7
416	retry
417	.align	128
418	.endm
419
420	/*
421	 * Stack fixups for entry from user mode.  We are still running on the
422	 * user stack, and with its live registers, so we must save soon.  We
423	 * are on alternate globals so we do have some registers.  Set the
424	 * transitional window state, and do the save.  If this traps we
425	 * we attempt to spill a window to the user stack.  If this fails,
426	 * we spill the window to the pcb and continue.  Spilling to the pcb
427	 * must not fail.
428	 *
429	 * NOTE: Must be called with alternate globals and clobbers %g1.
430	 */
431
432	.macro	tl0_split
433	rdpr	%wstate, %g1
434	wrpr	%g1, WSTATE_TRANSITION, %wstate
435	save
436	.endm
437
438	.macro	tl0_setup	type
439	tl0_split
440	b	%xcc, tl0_trap
441	 mov	\type, %o0
442	.endm
443
444	/*
445	 * Generic trap type.  Call trap() with the specified type.
446	 */
447	.macro	tl0_gen		type
448	tl0_setup \type
449	.align	32
450	.endm
451
452	/*
453	 * This is used to suck up the massive swaths of reserved trap types.
454	 * Generates count "reserved" trap vectors.
455	 */
456	.macro	tl0_reserved	count
457	.rept	\count
458	tl0_gen	T_RESERVED
459	.endr
460	.endm
461
462	.macro	tl0_fp_restore
463	wr	%g0, FPRS_FEF, %fprs
464	wr	%g0, ASI_BLK_S, %asi
465	ldda	[PCB_REG + PCB_FPSTATE + FP_FB0] %asi, %f0
466	ldda	[PCB_REG + PCB_FPSTATE + FP_FB1] %asi, %f16
467	ldda	[PCB_REG + PCB_FPSTATE + FP_FB2] %asi, %f32
468	ldda	[PCB_REG + PCB_FPSTATE + FP_FB3] %asi, %f48
469	membar	#Sync
470	done
471	.align	32
472	.endm
473
474	.macro	tl0_insn_excptn
475	wr	%g0, ASI_IMMU, %asi
476	rdpr	%tpc, %g3
477	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
478	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
479	membar	#Sync
480	b	%xcc, tl0_sfsr_trap
481	 mov	T_INSTRUCTION_EXCEPTION, %g2
482	.align	32
483	.endm
484
485	.macro	tl0_data_excptn
486	wr	%g0, ASI_DMMU, %asi
487	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
488	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
489	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
490	membar	#Sync
491	b	%xcc, tl0_sfsr_trap
492	 mov	T_DATA_EXCEPTION, %g2
493	.align	32
494	.endm
495
496	.macro	tl0_align
497	wr	%g0, ASI_DMMU, %asi
498	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
499	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
500	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
501	membar	#Sync
502	b	%xcc, tl0_sfsr_trap
503	 mov	T_MEM_ADDRESS_NOT_ALIGNED, %g2
504	.align	32
505	.endm
506
507ENTRY(tl0_sfsr_trap)
508	tl0_split
509	mov	%g3, %o4
510	mov	%g4, %o5
511	b	%xcc, tl0_trap
512	 mov	%g2, %o0
513END(tl0_sfsr_trap)
514
515	.macro	tl0_intr level, mask
516	wrpr	%g0, \level, %pil
517	set	\mask, %g1
518	wr	%g1, 0, %asr21
519	tl0_split
520	b	%xcc, tl0_intr
521	 mov	\level, %o2
522	.align	32
523	.endm
524
525#define	INTR(level, traplvl)						\
526	tl ## traplvl ## _intr	level, 1 << level
527
528#define	TICK(traplvl) \
529	tl ## traplvl ## _intr	PIL_TICK, 1
530
531#define	INTR_LEVEL(tl)							\
532	INTR(1, tl) ;							\
533	INTR(2, tl) ;							\
534	INTR(3, tl) ;							\
535	INTR(4, tl) ;							\
536	INTR(5, tl) ;							\
537	INTR(6, tl) ;							\
538	INTR(7, tl) ;							\
539	INTR(8, tl) ;							\
540	INTR(9, tl) ;							\
541	INTR(10, tl) ;							\
542	INTR(11, tl) ;							\
543	INTR(12, tl) ;							\
544	INTR(13, tl) ;							\
545	TICK(tl) ;							\
546	INTR(15, tl) ;
547
548	.macro	tl0_intr_level
549	INTR_LEVEL(0)
550	.endm
551
552	.macro	tl0_intr_vector
553	b,a	%xcc, intr_enqueue
554	.align	32
555	.endm
556
557	.macro	tl0_immu_miss
558	/*
559	 * Force kernel store order.
560	 */
561	wrpr	%g0, PSTATE_MMU, %pstate
562
563	/*
564	 * Extract the 8KB pointer.
565	 */
566	ldxa	[%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g6
567	srax	%g6, TTE_SHIFT, %g6
568
569	/*
570	 * Compute the tte address in the primary user tsb.
571	 */
572	and	%g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
573	sllx	%g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
574	add	%g1, TSB_REG, %g1
575
576	/*
577	 * Compute low bits of faulting va to check inside bucket loop.
578	 */
579	and	%g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
580	sllx	%g2, TD_VA_LOW_SHIFT, %g2
581	or	%g2, TD_EXEC, %g2
582
583	/*
584	 * Load the tte tag target.
585	 */
586	ldxa	[%g0] ASI_IMMU_TAG_TARGET_REG, %g6
587
588	/*
589	 * Load mask for tte data check.
590	 */
591	mov	TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
592	sllx	%g3, TD_VA_LOW_SHIFT, %g3
593	or	%g3, TD_EXEC, %g3
594
595	/*
596	 * Loop over the ttes in this bucket
597	 */
598
599	/*
600	 * Load the tte.
601	 */
6021:	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
603
604	/*
605	 * Compare the tag.
606	 */
607	cmp	%g4, %g6
608	bne,pn	%xcc, 2f
609	 EMPTY
610
611	/*
612	 * Compare the data.
613	 */
614	 xor	%g2, %g5, %g4
615	brgez,pn %g5, 2f
616	 andcc	%g3, %g4, %g0
617	bnz,pn	%xcc, 2f
618	 EMPTY
619
620	/*
621	 * We matched a tte, load the tlb.
622	 */
623
624	/*
625	 * Set the reference bit, if it's currently clear.
626	 */
627	 andcc	%g5, TD_REF, %g0
628	bz,a,pn	%xcc, tl0_immu_miss_set_ref
629	 nop
630
631	/*
632	 * Load the tte data into the tlb and retry the instruction.
633	 */
634	stxa	%g5, [%g0] ASI_ITLB_DATA_IN_REG
635	retry
636
637	/*
638	 * Check the low bits to see if we've finished the bucket.
639	 */
6402:	add	%g1, 1 << TTE_SHIFT, %g1
641	andcc	%g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
642	bnz,a,pt %xcc, 1b
643	 nop
644	b,a	%xcc, tl0_immu_miss_trap
645	.align	128
646	.endm
647
648ENTRY(tl0_immu_miss_set_ref)
649	/*
650	 * Set the reference bit.
651	 */
652	TTE_SET_REF(%g1, %g2, %g3)
653
654	/*
655	 * May have become invalid, in which case start over.
656	 */
657	brgez,pn %g2, 2f
658	 or	%g2, TD_REF, %g2
659
660	/*
661	 * Load the tte data into the tlb and retry the instruction.
662	 */
663	stxa	%g2, [%g0] ASI_ITLB_DATA_IN_REG
6642:	retry
665END(tl0_immu_miss_set_ref)
666
667ENTRY(tl0_immu_miss_trap)
668	/*
669	 * Switch to alternate globals.
670	 */
671	wrpr	%g0, PSTATE_ALT, %pstate
672
673	/*
674	 * Load the tar, sfar and sfsr aren't valid.
675	 */
676	wr	%g0, ASI_IMMU, %asi
677	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g3
678
679	/*
680	 * Save the mmu registers on the stack, and call common trap code.
681	 */
682	tl0_split
683	mov	%g3, %o3
684	b	%xcc, tl0_trap
685	 mov	T_INSTRUCTION_MISS, %o0
686END(tl0_immu_miss_trap)
687
688	.macro	dmmu_miss_user
689	/*
690	 * Extract the 8KB pointer and convert to an index.
691	 */
692	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g6
693	srax	%g6, TTE_SHIFT, %g6
694
695	/*
696	 * Compute the tte bucket address.
697	 */
698	and	%g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
699	sllx	%g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
700	add	%g1, TSB_REG, %g1
701
702	/*
703	 * Compute low bits of faulting va to check inside bucket loop.
704	 */
705	and	%g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
706	sllx	%g2, TD_VA_LOW_SHIFT, %g2
707
708	/*
709	 * Preload the tte tag target.
710	 */
711	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g6
712
713	/*
714	 * Load mask for tte data check.
715	 */
716	mov	TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
717	sllx	%g3, TD_VA_LOW_SHIFT, %g3
718
719	/*
720	 * Loop over the ttes in this bucket
721	 */
722
723	/*
724	 * Load the tte.
725	 */
7261:	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
727
728	/*
729	 * Compare the tag.
730	 */
731	cmp	%g4, %g6
732	bne,pn	%xcc, 2f
733	 EMPTY
734
735	/*
736	 * Compare the data.
737	 */
738	 xor	%g2, %g5, %g4
739	brgez,pn %g5, 2f
740	 andcc	%g3, %g4, %g0
741	bnz,pn	%xcc, 2f
742	 EMPTY
743
744	/*
745	 * We matched a tte, load the tlb.
746	 */
747
748	/*
749	 * Set the reference bit, if it's currently clear.
750	 */
751	 andcc	%g5, TD_REF, %g0
752	bz,a,pn	%xcc, dmmu_miss_user_set_ref
753	 nop
754
755	/*
756	 * Load the tte data into the tlb and retry the instruction.
757	 */
758	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
759	retry
760
761	/*
762	 * Check the low bits to see if we've finished the bucket.
763	 */
7642:	add	%g1, 1 << TTE_SHIFT, %g1
765	andcc	%g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
766	bnz,a,pt %xcc, 1b
767	 nop
768	.endm
769
770ENTRY(dmmu_miss_user_set_ref)
771	/*
772	 * Set the reference bit.
773	 */
774	TTE_SET_REF(%g1, %g2, %g3)
775
776	/*
777	 * May have become invalid, in which case start over.
778	 */
779	brgez,pn %g2, 2f
780	 or	%g2, TD_REF, %g2
781
782	/*
783	 * Load the tte data into the tlb and retry the instruction.
784	 */
785	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
7862:	retry
787END(dmmu_miss_user_set_ref)
788
789	.macro	tl0_dmmu_miss
790	/*
791	 * Force kernel store order.
792	 */
793	wrpr	%g0, PSTATE_MMU, %pstate
794
795	/*
796	 * Try a fast inline lookup of the primary tsb.
797	 */
798	dmmu_miss_user
799
800	/*
801	 * Not in primary tsb, call c code.  Nothing else fits inline.
802	 */
803	b,a	tl0_dmmu_miss_trap
804	.align	128
805	.endm
806
807ENTRY(tl0_dmmu_miss_trap)
808	/*
809	 * Switch to alternate globals.
810	 */
811	wrpr	%g0, PSTATE_ALT, %pstate
812
813	/*
814	 * Load the tar, sfar and sfsr aren't valid.
815	 */
816	wr	%g0, ASI_DMMU, %asi
817	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g3
818
819	/*
820	 * Save the mmu registers on the stack and call common trap code.
821	 */
822	tl0_split
823	mov	%g3, %o3
824	b	%xcc, tl0_trap
825	 mov	T_DATA_MISS, %o0
826END(tl0_dmmu_miss_trap)
827
828	.macro	dmmu_prot_user
829	/*
830	 * Extract the 8KB pointer and convert to an index.
831	 */
832	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g6
833	srax	%g6, TTE_SHIFT, %g6
834
835	/*
836	 * Compute the tte bucket address.
837	 */
838	and	%g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
839	sllx	%g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
840	add	%g1, TSB_REG, %g1
841
842	/*
843	 * Compute low bits of faulting va to check inside bucket loop.
844	 */
845	and	%g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
846	sllx	%g2, TD_VA_LOW_SHIFT, %g2
847	or	%g2, TD_SW, %g2
848
849	/*
850	 * Preload the tte tag target.
851	 */
852	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g6
853
854	/*
855	 * Load mask for tte data check.
856	 */
857	mov	TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
858	sllx	%g3, TD_VA_LOW_SHIFT, %g3
859	or	%g3, TD_SW, %g3
860
861	/*
862	 * Loop over the ttes in this bucket
863	 */
864
865	/*
866	 * Load the tte.
867	 */
8681:	ldda	[%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
869
870	/*
871	 * Compare the tag.
872	 */
873	cmp	%g4, %g6
874	bne,pn	%xcc, 2f
875	 EMPTY
876
877	/*
878	 * Compare the data.
879	 */
880	 xor	%g2, %g5, %g4
881	brgez,pn %g5, 2f
882	 andcc	%g3, %g4, %g0
883	bnz,a,pn %xcc, 2f
884	 nop
885
886	b,a	%xcc, dmmu_prot_set_w
887	 nop
888
889	/*
890	 * Check the low bits to see if we've finished the bucket.
891	 */
8922:	add	%g1, 1 << TTE_SHIFT, %g1
893	andcc	%g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
894	bnz,a,pn %xcc, 1b
895	 nop
896	.endm
897
898	.macro	tl0_dmmu_prot
899	/*
900	 * Force kernel store order.
901	 */
902	wrpr	%g0, PSTATE_MMU, %pstate
903
904	/*
905	 * Try a fast inline lookup of the tsb.
906	 */
907	dmmu_prot_user
908
909	/*
910	 * Not in tsb.  Call c code.
911	 */
912	b,a	%xcc, tl0_dmmu_prot_trap
913	 nop
914	.align	128
915	.endm
916
917ENTRY(dmmu_prot_set_w)
918	/*
919	 * Set the hardware write bit in the tte.
920	 */
921	TTE_SET_W(%g1, %g2, %g3)
922
923	/*
924	 * Delete the old TLB entry.
925	 */
926	wr	%g0, ASI_DMMU, %asi
927	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
928	srlx	%g1, PAGE_SHIFT, %g1
929	sllx	%g1, PAGE_SHIFT, %g1
930	stxa	%g0, [%g1] ASI_DMMU_DEMAP
931	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
932
933	brgez,pn %g2, 1f
934	 or	%g2, TD_W, %g2
935
936	/*
937	 * Load the tte data into the tlb and retry the instruction.
938	 */
939	stxa	%g2, [%g0] ASI_DTLB_DATA_IN_REG
9401:	retry
941END(dmmu_prot_set_w)
942
943ENTRY(tl0_dmmu_prot_trap)
944	/*
945	 * Switch to alternate globals.
946	 */
947	wrpr	%g0, PSTATE_ALT, %pstate
948
949	/*
950	 * Load the tar, sfar and sfsr.
951	 */
952	wr	%g0, ASI_DMMU, %asi
953	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
954	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
955	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
956	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
957	membar	#Sync
958
959	/*
960	 * Save the mmu registers on the stack and call common trap code.
961	 */
962	tl0_split
963	mov	%g2, %o3
964	mov	%g3, %o4
965	mov	%g4, %o5
966	b	%xcc, tl0_trap
967	 mov	T_DATA_PROTECTION, %o0
968END(tl0_dmmu_prot_trap)
969
970	.macro	tl0_spill_0_n
971	andcc	%sp, 1, %g0
972	bz,pn	%xcc, 2f
973	 wr	%g0, ASI_AIUP, %asi
9741:	SPILL(stxa, %sp + SPOFF, 8, %asi)
975	saved
976	wrpr	%g0, WSTATE_ASSUME64, %wstate
977	retry
978	.align	32
979	RSF_TRAP(T_SPILL)
980	RSF_TRAP(T_SPILL)
981	.endm
982
983	.macro	tl0_spill_1_n
984	andcc	%sp, 1, %g0
985	bnz,pt	%xcc, 1b
986	 wr	%g0, ASI_AIUP, %asi
9872:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
988	SPILL(stwa, %sp, 4, %asi)
989	saved
990	wrpr	%g0, WSTATE_ASSUME32, %wstate
991	retry
992	.align	32
993	RSF_TRAP(T_SPILL)
994	RSF_TRAP(T_SPILL)
995	.endm
996
997	.macro	tl0_spill_2_n
998	wr	%g0, ASI_AIUP, %asi
999	SPILL(stxa, %sp + SPOFF, 8, %asi)
1000	saved
1001	retry
1002	.align	32
1003	RSF_ALIGN_RETRY(WSTATE_TEST32)
1004	RSF_TRAP(T_SPILL)
1005	.endm
1006
1007	.macro	tl0_spill_3_n
1008	wr	%g0, ASI_AIUP, %asi
1009	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1010	SPILL(stwa, %sp, 4, %asi)
1011	saved
1012	retry
1013	.align	32
1014	RSF_ALIGN_RETRY(WSTATE_TEST64)
1015	RSF_TRAP(T_SPILL)
1016	.endm
1017
1018	.macro	tl0_fill_0_n
1019	andcc	%sp, 1, %g0
1020	bz,pn	%xcc, 2f
1021	 wr	%g0, ASI_AIUP, %asi
10221:	FILL(ldxa, %sp + SPOFF, 8, %asi)
1023	restored
1024	wrpr	%g0, WSTATE_ASSUME64, %wstate
1025	retry
1026	.align	32
1027	RSF_TRAP(T_FILL)
1028	RSF_TRAP(T_FILL)
1029	.endm
1030
1031	.macro	tl0_fill_1_n
1032	andcc	%sp, 1, %g0
1033	bnz	%xcc, 1b
1034	 wr	%g0, ASI_AIUP, %asi
10352:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1036	FILL(lduwa, %sp, 4, %asi)
1037	restored
1038	wrpr	%g0, WSTATE_ASSUME32, %wstate
1039	retry
1040	.align	32
1041	RSF_TRAP(T_FILL)
1042	RSF_TRAP(T_FILL)
1043	.endm
1044
1045	.macro	tl0_fill_2_n
1046	wr	%g0, ASI_AIUP, %asi
1047	FILL(ldxa, %sp + SPOFF, 8, %asi)
1048	restored
1049	retry
1050	.align	32
1051	RSF_ALIGN_RETRY(WSTATE_TEST32)
1052	RSF_TRAP(T_FILL)
1053	.endm
1054
1055	.macro	tl0_fill_3_n
1056	wr	%g0, ASI_AIUP, %asi
1057	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1058	FILL(lduwa, %sp, 4, %asi)
1059	restored
1060	retry
1061	.align	32
1062	RSF_ALIGN_RETRY(WSTATE_TEST64)
1063	RSF_TRAP(T_FILL)
1064	.endm
1065
1066ENTRY(tl0_sftrap)
1067	rdpr	%tstate, %g1
1068	and	%g1, TSTATE_CWP_MASK, %g1
1069	wrpr	%g1, 0, %cwp
1070	tl0_split
1071	b	%xcc, tl0_trap
1072	 mov	%g2, %o0
1073END(tl0_sftrap)
1074
1075	.macro	tl0_spill_bad	count
1076	.rept	\count
1077	sir
1078	.align	128
1079	.endr
1080	.endm
1081
1082	.macro	tl0_fill_bad	count
1083	.rept	\count
1084	sir
1085	.align	128
1086	.endr
1087	.endm
1088
1089	.macro	tl0_syscall
1090	tl0_split
1091	b	%xcc, tl0_syscall
1092	 mov	T_SYSCALL, %o0
1093	.endm
1094
1095	.macro	tl0_soft	count
1096	.rept	\count
1097	tl0_gen	T_SOFT
1098	.endr
1099	.endm
1100
1101	.macro	tl1_kstack
1102	save	%sp, -CCFSZ, %sp
1103	.endm
1104
1105	.macro	tl1_setup	type
1106	tl1_kstack
1107	rdpr	%pil, %o1
1108	b	%xcc, tl1_trap
1109	 mov	\type | T_KERNEL, %o0
1110	.endm
1111
1112	.macro	tl1_gen		type
1113	tl1_setup \type
1114	.align	32
1115	.endm
1116
1117	.macro	tl1_reserved	count
1118	.rept	\count
1119	tl1_gen	T_RESERVED
1120	.endr
1121	.endm
1122
1123	.macro	tl1_insn_excptn
1124	wr	%g0, ASI_IMMU, %asi
1125	rdpr	%tpc, %g3
1126	ldxa	[%g0 + AA_IMMU_SFSR] %asi, %g4
1127	stxa	%g0, [%g0 + AA_IMMU_SFSR] %asi
1128	membar	#Sync
1129	b	%xcc, tl1_insn_exceptn_trap
1130	 mov	T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1131	.align	32
1132	.endm
1133
1134ENTRY(tl1_insn_exceptn_trap)
1135	tl1_kstack
1136	rdpr	%pil, %o1
1137	mov	%g3, %o4
1138	mov	%g4, %o5
1139	b	%xcc, tl1_trap
1140	 mov	%g2, %o0
1141END(tl1_insn_exceptn_trap)
1142
1143	.macro	tl1_data_excptn
1144	b,a	%xcc, tl1_data_excptn_trap
1145	 nop
1146	.align	32
1147	.endm
1148
1149ENTRY(tl1_data_excptn_trap)
1150	wrpr	%g0, PSTATE_ALT, %pstate
1151	RESUME_SPILLFILL_MMU_CLR_SFSR
1152	b	%xcc, tl1_sfsr_trap
1153	 mov	T_DATA_EXCEPTION | T_KERNEL, %g2
1154END(tl1_data_excptn_trap)
1155
1156	.macro	tl1_align
1157	b,a	%xcc, tl1_align_trap
1158	 nop
1159	.align	32
1160	.endm
1161
1162ENTRY(tl1_align_trap)
1163	wrpr	%g0, PSTATE_ALT, %pstate
1164	RESUME_SPILLFILL_ALIGN
1165	b	%xcc, tl1_sfsr_trap
1166	 mov	T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1167END(tl1_data_excptn_trap)
1168
1169ENTRY(tl1_sfsr_trap)
1170	wr	%g0, ASI_DMMU, %asi
1171	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1172	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1173	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1174	membar	#Sync
1175
1176	tl1_kstack
1177	rdpr	%pil, %o1
1178	mov	%g3, %o4
1179	mov	%g4, %o5
1180	b	%xcc, tl1_trap
1181	 mov	%g2, %o0
1182END(tl1_sfsr_trap)
1183
1184	.macro	tl1_intr level, mask
1185	tl1_kstack
1186	rdpr	%pil, %o1
1187	wrpr	%g0, \level, %pil
1188	set	\mask, %o2
1189	wr	%o2, 0, %asr21
1190	b	%xcc, tl1_intr
1191	 mov	\level, %o2
1192	.align	32
1193	.endm
1194
1195	.macro	tl1_intr_level
1196	INTR_LEVEL(1)
1197	.endm
1198
1199	.macro	tl1_intr_vector
1200	b,a	intr_enqueue
1201	.align	32
1202	.endm
1203
1204ENTRY(intr_enqueue)
1205	/*
1206	 * Find the head of the queue and advance it.
1207	 */
1208	ldx	[IQ_REG + IQ_HEAD], %g1
1209	add	%g1, 1, %g2
1210	and	%g2, IQ_MASK, %g2
1211	stx	%g2, [IQ_REG + IQ_HEAD]
1212
1213#ifdef INVARIANTS
1214	/*
1215	 * If the new head is the same as the tail, the next interrupt will
1216	 * overwrite unserviced packets.  This is bad.
1217	 */
1218	ldx	[IQ_REG + IQ_TAIL], %g3
1219	cmp	%g3, %g2
1220	be	%xcc, 3f
1221	 nop
1222#endif
1223
1224	/*
1225	 * Find the iqe.
1226	 */
1227	sllx	%g1, IQE_SHIFT, %g1
1228	add	%g1, IQ_REG, %g1
1229
1230	/*
1231	 * Load the interrupt packet from the hardware.
1232	 */
1233	wr	%g0, ASI_SDB_INTR_R, %asi
1234	ldxa	[%g0] ASI_INTR_RECEIVE, %g2
1235	ldxa	[%g0 + AA_SDB_INTR_D0] %asi, %g3
1236	ldxa	[%g0 + AA_SDB_INTR_D1] %asi, %g4
1237	ldxa	[%g0 + AA_SDB_INTR_D2] %asi, %g5
1238	stxa	%g0, [%g0] ASI_INTR_RECEIVE
1239	membar	#Sync
1240
1241	/*
1242	 * Store the tag and first data word in the iqe.  These are always
1243	 * valid.
1244	 */
1245	stw	%g2, [%g1 + IQE_TAG]
1246	stx	%g3, [%g1 + IQE_VEC]
1247
1248	/*
1249	 * Load the function and argument, if not supplied in iqe.
1250	 */
1251	sllx	%g3, IV_SHIFT, %g3
1252	brnz,pn %g4, 1f
1253	 add	%g3, IV_REG, %g3
1254	ldx	[%g3 + IV_FUNC], %g4
1255	ldx	[%g3 + IV_ARG], %g5
1256
1257	/*
1258	 * Save the priority and the two remaining data words in the iqe.
1259	 */
12601:	lduw	[%g3 + IV_PRI], %g3
1261	stw	%g3, [%g1 + IQE_PRI]
1262	stx	%g4, [%g1 + IQE_FUNC]
1263	stx	%g5, [%g1 + IQE_ARG]
1264
1265#if KTR_COMPILE & KTR_INTR
1266	CATR(KTR_INTR, "intr_enqueue: head=%d tail=%d pri=%p tag=%#x vec=%#x"
1267	    , %g2, %g4, %g5, 7, 8, 9)
1268	ldx	[IQ_REG + IQ_HEAD], %g4
1269	stx	%g4, [%g2 + KTR_PARM1]
1270	ldx	[IQ_REG + IQ_TAIL], %g4
1271	stx	%g4, [%g2 + KTR_PARM2]
1272	lduw	[%g1 + IQE_PRI], %g4
1273	stx	%g4, [%g2 + KTR_PARM3]
1274	lduw	[%g1 + IQE_TAG], %g4
1275	stx	%g4, [%g2 + KTR_PARM4]
1276	ldx	[%g1 + IQE_VEC], %g4
1277	stx	%g4, [%g2 + KTR_PARM5]
12789:
1279#endif
1280
1281	/*
1282	 * Trigger a softint at the level indicated by the priority.
1283	 */
1284	mov	1, %g2
1285	sllx	%g2, %g3, %g2
1286	wr	%g2, 0, %asr20
1287
1288	retry
1289
1290#ifdef INVARIANTS
1291	/*
1292	 * The interrupt queue is about to overflow.  We are in big trouble.
1293	 */
12943:	sir
1295#endif
1296END(intr_enqueue)
1297
1298	.macro	tl1_immu_miss
1299	ldxa	[%g0] ASI_IMMU_TAG_TARGET_REG, %g1
1300	sllx	%g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
1301
1302	set	TSB_KERNEL_VA_MASK, %g3
1303	and	%g2, %g3, %g2
1304
1305	ldxa	[%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g4
1306	add	%g2, %g4, %g2
1307
1308	/*
1309	 * Load the tte, check that it's valid and that the tags match.
1310	 */
1311	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1312	brgez,pn %g5, 2f
1313	 cmp	%g4, %g1
1314	bne,pn	%xcc, 2f
1315	 andcc	%g5, TD_EXEC, %g0
1316	bz,pn	%xcc, 2f
1317	 EMPTY
1318
1319	/*
1320	 * Set the refence bit, if its currently clear.
1321	 */
1322	 andcc	%g5, TD_REF, %g0
1323	bnz,pt	%xcc, 1f
1324	 EMPTY
1325
1326	TTE_SET_REF(%g2, %g3, %g4)
1327
1328	/*
1329	 * Load the tte data into the TLB and retry the instruction.
1330	 */
13311:	stxa	%g5, [%g0] ASI_ITLB_DATA_IN_REG
1332	retry
1333
1334	/*
1335	 * Switch to alternate globals.
1336	 */
13372:	wrpr	%g0, PSTATE_ALT, %pstate
1338
1339	wr	%g0, ASI_IMMU, %asi
1340	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g3
1341
1342	tl1_kstack
1343	rdpr	%pil, %o1
1344	mov	%g3, %o3
1345	b	%xcc, tl1_trap
1346	 mov	T_INSTRUCTION_MISS | T_KERNEL, %o0
1347	.align	128
1348	.endm
1349
1350	.macro	tl1_dmmu_miss
1351	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g1
1352	srlx	%g1, TT_CTX_SHIFT, %g2
1353	brnz,pn	%g2, tl1_dmmu_miss_user
1354	 sllx	%g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
1355
1356	set	TSB_KERNEL_VA_MASK, %g3
1357	and	%g2, %g3, %g2
1358
1359	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g4
1360	add	%g2, %g4, %g2
1361
1362	/*
1363	 * Load the tte, check that it's valid and that the tags match.
1364	 */
1365	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1366	brgez,pn %g5, 2f
1367	 cmp	%g4, %g1
1368	bne,pn	%xcc, 2f
1369	 EMPTY
1370
1371	/*
1372	 * Set the refence bit, if its currently clear.
1373	 */
1374	 andcc	%g5, TD_REF, %g0
1375	bnz,pt	%xcc, 1f
1376	 EMPTY
1377
1378	TTE_SET_REF(%g2, %g3, %g4)
1379
1380	/*
1381	 * Load the tte data into the TLB and retry the instruction.
1382	 */
13831:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1384	retry
1385
1386	/*
1387	 * Switch to alternate globals.
1388	 */
13892:	wrpr	%g0, PSTATE_ALT, %pstate
1390
1391	b,a	%xcc, tl1_dmmu_miss_trap
1392	 nop
1393	.align	128
1394	.endm
1395
1396ENTRY(tl1_dmmu_miss_trap)
1397#if KTR_COMPILE & KTR_TRAP
1398	CATR(KTR_TRAP, "tl1_dmmu_miss_trap: tar=%#lx"
1399	    , %g1, %g2, %g3, 7, 8, 9)
1400	mov	AA_DMMU_TAR, %g2
1401	ldxa	[%g2] ASI_DMMU, %g2
1402	stx	%g2, [%g1 + KTR_PARM1]
14039:
1404#endif
1405
1406	KSTACK_CHECK
1407
1408	wr	%g0, ASI_DMMU, %asi
1409	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g3
1410
1411	tl1_kstack
1412	rdpr	%pil, %o1
1413	mov	%g3, %o3
1414	b	%xcc, tl1_trap
1415	 mov	T_DATA_MISS | T_KERNEL, %o0
1416END(tl1_dmmu_miss_trap)
1417
1418ENTRY(tl1_dmmu_miss_user)
1419	/*
1420	 * Try a fast inline lookup of the user tsb.
1421	 */
1422	dmmu_miss_user
1423
1424	/*
1425	 * Switch to alternate globals.
1426	 */
1427	wrpr	%g0, PSTATE_ALT, %pstate
1428
1429	/* Handle faults during window spill/fill. */
1430	RESUME_SPILLFILL_MMU
1431
1432	wr	%g0, ASI_DMMU, %asi
1433	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g3
1434
1435	tl1_kstack
1436	rdpr	%pil, %o1
1437	mov	%g3, %o3
1438	b	%xcc, tl1_trap
1439	 mov	T_DATA_MISS | T_KERNEL, %o0
1440END(tl1_dmmu_miss_user)
1441
1442	.macro	tl1_dmmu_prot
1443	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g1
1444	srlx	%g1, TT_CTX_SHIFT, %g2
1445	brnz,pn	%g2, tl1_dmmu_prot_user
1446	 sllx	%g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
1447
1448	set	TSB_KERNEL_VA_MASK, %g3
1449	and	%g2, %g3, %g2
1450
1451	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g4
1452	add	%g2, %g4, %g2
1453
1454	/*
1455	 * Load the tte, check that it's valid and that the tags match.
1456	 */
1457	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1458	brgez,pn %g5, 1f
1459	 cmp	%g4, %g1
1460	bne,pn	%xcc, 1f
1461	 andcc	%g5, TD_SW, %g0
1462	bz,pn	%xcc, 1f
1463	 EMPTY
1464
1465	TTE_SET_W(%g2, %g3, %g4)
1466
1467	/*
1468	 * Delete the old TLB entry.
1469	 */
1470	wr	%g0, ASI_DMMU, %asi
1471	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1472	stxa	%g0, [%g1] ASI_DMMU_DEMAP
1473	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1474
1475	/*
1476	 * Load the tte data into the TLB and retry the instruction.
1477	 */
1478	or	%g3, TD_W, %g3
1479	stxa	%g3, [%g0] ASI_DTLB_DATA_IN_REG
1480	retry
1481
14821:	b	%xcc, tl1_dmmu_prot_trap
1483	 wrpr	%g0, PSTATE_ALT, %pstate
1484	.align	128
1485	.endm
1486
1487ENTRY(tl1_dmmu_prot_user)
1488	/*
1489	 * Try a fast inline lookup of the user tsb.
1490	 */
1491	dmmu_prot_user
1492
1493	/*
1494	 * Switch to alternate globals.
1495	 */
1496	wrpr	%g0, PSTATE_ALT, %pstate
1497
1498	/* Handle faults during window spill/fill. */
1499	RESUME_SPILLFILL_MMU_CLR_SFSR
1500
1501	b,a	%xcc, tl1_dmmu_prot_trap
1502	 nop
1503END(tl1_dmmu_prot_user)
1504
1505ENTRY(tl1_dmmu_prot_trap)
1506	/*
1507	 * Load the sfar, sfsr and tar.  Clear the sfsr.
1508	 */
1509	wr	%g0, ASI_DMMU, %asi
1510	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
1511	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
1512	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
1513	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1514	membar	#Sync
1515
1516	tl1_kstack
1517	rdpr	%pil, %o1
1518	mov	%g2, %o3
1519	mov	%g3, %o4
1520	mov	%g4, %o5
1521	b	%xcc, tl1_trap
1522	 mov	T_DATA_PROTECTION | T_KERNEL, %o0
1523END(tl1_dmmu_prot_trap)
1524
1525	.macro	tl1_spill_0_n
1526	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1527	saved
1528	retry
1529	.align	32
1530	RSF_FATAL(T_SPILL)
1531	RSF_FATAL(T_SPILL)
1532	.endm
1533
1534	.macro	tl1_spill_4_n
1535	andcc	%sp, 1, %g0
1536	bz,pn	%xcc, 2f
1537	 wr	%g0, ASI_AIUP, %asi
15381:	SPILL(stxa, %sp + SPOFF, 8, %asi)
1539	saved
1540	retry
1541	.align	32
1542	RSF_SPILL_TOPCB
1543	RSF_SPILL_TOPCB
1544	.endm
1545
1546	.macro	tl1_spill_5_n
1547	andcc	%sp, 1, %g0
1548	bnz,pt	%xcc, 1b
1549	 wr	%g0, ASI_AIUP, %asi
15502:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1551	SPILL(stwa, %sp, 4, %asi)
1552	saved
1553	retry
1554	.align	32
1555	RSF_SPILL_TOPCB
1556	RSF_SPILL_TOPCB
1557	.endm
1558
1559	.macro	tl1_spill_6_n
1560	wr	%g0, ASI_AIUP, %asi
1561	SPILL(stxa, %sp + SPOFF, 8, %asi)
1562	saved
1563	retry
1564	.align	32
1565	RSF_ALIGN_RETRY(WSTATE_TRANSITION | WSTATE_TEST32)
1566	RSF_SPILL_TOPCB
1567	.endm
1568
1569	.macro	tl1_spill_7_n
1570	wr	%g0, ASI_AIUP, %asi
1571	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1572	SPILL(stwa, %sp, 4, %asi)
1573	saved
1574	retry
1575	.align	32
1576	RSF_ALIGN_RETRY(WSTATE_TRANSITION | WSTATE_TEST64)
1577	RSF_SPILL_TOPCB
1578	.endm
1579
1580	.macro	tl1_spill_0_o
1581	andcc	%sp, 1, %g0
1582	bz,pn	%xcc, 2f
1583	 wr	%g0, ASI_AIUP, %asi
15841:	SPILL(stxa, %sp + SPOFF, 8, %asi)
1585	saved
1586	wrpr	%g0, WSTATE_ASSUME64 << WSTATE_OTHER_SHIFT, %wstate
1587	retry
1588	.align	32
1589	RSF_SPILL_TOPCB
1590	RSF_SPILL_TOPCB
1591	.endm
1592
1593	.macro	tl1_spill_1_o
1594	andcc	%sp, 1, %g0
1595	bnz,pt	%xcc, 1b
1596	 wr	%g0, ASI_AIUP, %asi
15972:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1598	SPILL(stwa, %sp, 4, %asi)
1599	saved
1600	wrpr	%g0, WSTATE_ASSUME32 << WSTATE_OTHER_SHIFT, %wstate
1601	retry
1602	.align	32
1603	RSF_SPILL_TOPCB
1604	RSF_SPILL_TOPCB
1605	.endm
1606
1607	.macro	tl1_spill_2_o
1608	wr	%g0, ASI_AIUP, %asi
1609	SPILL(stxa, %sp + SPOFF, 8, %asi)
1610	saved
1611	retry
1612	.align	32
1613	RSF_ALIGN_RETRY(WSTATE_TEST32 << WSTATE_OTHER_SHIFT)
1614	RSF_SPILL_TOPCB
1615	.endm
1616
1617	.macro	tl1_spill_3_o
1618	wr	%g0, ASI_AIUP, %asi
1619	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1620	SPILL(stwa, %sp, 4, %asi)
1621	saved
1622	retry
1623	.align	32
1624	RSF_ALIGN_RETRY(WSTATE_TEST64 << WSTATE_OTHER_SHIFT)
1625	RSF_SPILL_TOPCB
1626	.endm
1627
1628	.macro	tl1_fill_0_n
1629	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1630	restored
1631	retry
1632	.align	32
1633	RSF_FATAL(T_FILL)
1634	RSF_FATAL(T_FILL)
1635	.endm
1636
1637	.macro	tl1_fill_4_n
1638	andcc	%sp, 1, %g0
1639	bz,pn	%xcc, 2f
1640	 wr	%g0, ASI_AIUP, %asi
16411:	FILL(ldxa, %sp + SPOFF, 8, %asi)
1642	restored
1643	retry
1644	.align 32
1645	RSF_FILL_MAGIC
1646	RSF_FILL_MAGIC
1647	.endm
1648
1649	.macro	tl1_fill_5_n
1650	andcc	%sp, 1, %g0
1651	bnz,pn	%xcc, 1b
1652	 wr	%g0, ASI_AIUP, %asi
16532:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1654	FILL(lduwa, %sp, 4, %asi)
1655	restored
1656	retry
1657	.align 32
1658	RSF_FILL_MAGIC
1659	RSF_FILL_MAGIC
1660	.endm
1661
1662	.macro	tl1_fill_6_n
1663	wr	%g0, ASI_AIUP, %asi
1664	FILL(ldxa, %sp + SPOFF, 8, %asi)
1665	restored
1666	retry
1667	.align 32
1668	RSF_ALIGN_RETRY(WSTATE_TEST32 | WSTATE_TRANSITION)
1669	RSF_FILL_MAGIC
1670	.endm
1671
1672	.macro	tl1_fill_7_n
1673	wr	%g0, ASI_AIUP, %asi
1674	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1675	FILL(lduwa, %sp, 4, %asi)
1676	restored
1677	retry
1678	.align 32
1679	RSF_ALIGN_RETRY(WSTATE_TEST64 | WSTATE_TRANSITION)
1680	RSF_FILL_MAGIC
1681	.endm
1682
1683/*
1684 * This is used to spill windows that are still occupied with user
1685 * data on kernel entry to the pcb.
1686 */
1687ENTRY(tl1_spill_topcb)
1688	wrpr	%g0, PSTATE_ALT, %pstate
1689
1690	/* Free some globals for our use. */
1691	dec	24, ASP_REG
1692	stx	%g1, [ASP_REG + 0]
1693	stx	%g2, [ASP_REG + 8]
1694	stx	%g3, [ASP_REG + 16]
1695
1696	ldx	[PCB_REG + PCB_NSAVED], %g1
1697
1698	sllx	%g1, PTR_SHIFT, %g2
1699	add	%g2, PCB_REG, %g2
1700	stx	%sp, [%g2 + PCB_RWSP]
1701
1702	sllx	%g1, RW_SHIFT, %g2
1703	add	%g2, PCB_REG, %g2
1704	SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1705
1706	inc	%g1
1707	stx	%g1, [PCB_REG + PCB_NSAVED]
1708
1709#if KTR_COMPILE & KTR_TRAP
1710	CATR(KTR_TRAP, "tl1_spill_topcb: pc=%lx sp=%#lx nsaved=%d"
1711	   , %g1, %g2, %g3, 7, 8, 9)
1712	rdpr	%tpc, %g2
1713	stx	%g2, [%g1 + KTR_PARM1]
1714	stx	%sp, [%g1 + KTR_PARM2]
1715	ldx	[PCB_REG + PCB_NSAVED], %g2
1716	stx	%g2, [%g1 + KTR_PARM3]
17179:
1718#endif
1719
1720	saved
1721
1722	ldx	[ASP_REG + 16], %g3
1723	ldx	[ASP_REG + 8], %g2
1724	ldx	[ASP_REG + 0], %g1
1725	inc	24, ASP_REG
1726	retry
1727END(tl1_spill_topcb)
1728
1729	.macro	tl1_spill_bad	count
1730	.rept	\count
1731	sir
1732	.align	128
1733	.endr
1734	.endm
1735
1736	.macro	tl1_fill_bad	count
1737	.rept	\count
1738	sir
1739	.align	128
1740	.endr
1741	.endm
1742
1743	.macro	tl1_soft	count
1744	.rept	\count
1745	tl1_gen	T_SOFT | T_KERNEL
1746	.endr
1747	.endm
1748
1749	.sect	.trap
1750	.align	0x8000
1751	.globl	tl0_base
1752
1753tl0_base:
1754	tl0_reserved	8				! 0x0-0x7
1755tl0_insn_excptn:
1756	tl0_insn_excptn					! 0x8
1757	tl0_reserved	1				! 0x9
1758tl0_insn_error:
1759	tl0_gen		T_INSTRUCTION_ERROR		! 0xa
1760	tl0_reserved	5				! 0xb-0xf
1761tl0_insn_illegal:
1762	tl0_gen		T_ILLEGAL_INSTRUCTION		! 0x10
1763tl0_priv_opcode:
1764	tl0_gen		T_PRIVILEGED_OPCODE		! 0x11
1765	tl0_reserved	14				! 0x12-0x1f
1766tl0_fp_disabled:
1767	tl0_gen		T_FP_DISABLED			! 0x20
1768tl0_fp_ieee:
1769	tl0_gen		T_FP_EXCEPTION_IEEE_754		! 0x21
1770tl0_fp_other:
1771	tl0_gen		T_FP_EXCEPTION_OTHER		! 0x22
1772tl0_tag_ovflw:
1773	tl0_gen		T_TAG_OFERFLOW			! 0x23
1774tl0_clean_window:
1775	clean_window					! 0x24
1776tl0_divide:
1777	tl0_gen		T_DIVISION_BY_ZERO		! 0x28
1778	tl0_reserved	7				! 0x29-0x2f
1779tl0_data_excptn:
1780	tl0_data_excptn					! 0x30
1781	tl0_reserved	1				! 0x31
1782tl0_data_error:
1783	tl0_gen		T_DATA_ERROR			! 0x32
1784	tl0_reserved	1				! 0x33
1785tl0_align:
1786	tl0_align					! 0x34
1787tl0_align_lddf:
1788	tl0_gen		T_RESERVED			! 0x35
1789tl0_align_stdf:
1790	tl0_gen		T_RESERVED			! 0x36
1791tl0_priv_action:
1792	tl0_gen		T_PRIVILEGED_ACTION		! 0x37
1793	tl0_reserved	9				! 0x38-0x40
1794tl0_intr_level:
1795	tl0_intr_level					! 0x41-0x4f
1796	tl0_reserved	16				! 0x50-0x5f
1797tl0_intr_vector:
1798	tl0_intr_vector					! 0x60
1799tl0_watch_phys:
1800	tl0_gen		T_PA_WATCHPOINT			! 0x61
1801tl0_watch_virt:
1802	tl0_gen		T_VA_WATCHPOINT			! 0x62
1803tl0_ecc:
1804	tl0_gen		T_CORRECTED_ECC_ERROR		! 0x63
1805tl0_immu_miss:
1806	tl0_immu_miss					! 0x64
1807tl0_dmmu_miss:
1808	tl0_dmmu_miss					! 0x68
1809tl0_dmmu_prot:
1810	tl0_dmmu_prot					! 0x6c
1811	tl0_reserved	16				! 0x70-0x7f
1812tl0_spill_0_n:
1813	tl0_spill_0_n					! 0x80
1814tl0_spill_1_n:
1815	tl0_spill_1_n					! 0x84
1816tl0_spill_2_n:
1817	tl0_spill_2_n					! 0x88
1818tl0_spill_3_n:
1819	tl0_spill_3_n					! 0x8c
1820	tl0_spill_bad	12				! 0x90-0xbf
1821tl0_fill_0_n:
1822	tl0_fill_0_n					! 0xc0
1823tl0_fill_1_n:
1824	tl0_fill_1_n					! 0xc4
1825tl0_fill_2_n:
1826	tl0_fill_2_n					! 0xc8
1827tl0_fill_3_n:
1828	tl0_fill_3_n					! 0xcc
1829	tl0_fill_bad	12				! 0xc4-0xff
1830tl0_soft:
1831	tl0_reserved	1				! 0x100
1832	tl0_gen		T_BREAKPOINT			! 0x101
1833	tl0_gen		T_DIVISION_BY_ZERO		! 0x102
1834	tl0_reserved	1				! 0x103
1835	tl0_gen		T_CLEAN_WINDOW			! 0x104
1836	tl0_gen		T_RANGE_CHECK			! 0x105
1837	tl0_gen		T_FIX_ALIGNMENT			! 0x106
1838	tl0_gen		T_INTEGER_OVERFLOW		! 0x107
1839	tl0_reserved	1				! 0x108
1840	tl0_syscall					! 0x109
1841	tl0_fp_restore					! 0x10a
1842	tl0_reserved	5				! 0x10b-0x10f
1843	tl0_gen		T_TRAP_INSTRUCTION_16		! 0x110
1844	tl0_gen		T_TRAP_INSTRUCTION_17		! 0x111
1845	tl0_gen		T_TRAP_INSTRUCTION_18		! 0x112
1846	tl0_gen		T_TRAP_INSTRUCTION_19		! 0x113
1847	tl0_gen		T_TRAP_INSTRUCTION_20		! 0x114
1848	tl0_gen		T_TRAP_INSTRUCTION_21		! 0x115
1849	tl0_gen		T_TRAP_INSTRUCTION_22		! 0x116
1850	tl0_gen		T_TRAP_INSTRUCTION_23		! 0x117
1851	tl0_gen		T_TRAP_INSTRUCTION_24		! 0x118
1852	tl0_gen		T_TRAP_INSTRUCTION_25		! 0x119
1853	tl0_gen		T_TRAP_INSTRUCTION_26		! 0x11a
1854	tl0_gen		T_TRAP_INSTRUCTION_27		! 0x11b
1855	tl0_gen		T_TRAP_INSTRUCTION_28		! 0x11c
1856	tl0_gen		T_TRAP_INSTRUCTION_29		! 0x11d
1857	tl0_gen		T_TRAP_INSTRUCTION_30		! 0x11e
1858	tl0_gen		T_TRAP_INSTRUCTION_31		! 0x11f
1859	tl0_reserved	224				! 0x120-0x1ff
1860
1861tl1_base:
1862	tl1_reserved	8				! 0x200-0x207
1863tl1_insn_excptn:
1864	tl1_insn_excptn					! 0x208
1865	tl1_reserved	1				! 0x209
1866tl1_insn_error:
1867	tl1_gen		T_INSTRUCTION_ERROR		! 0x20a
1868	tl1_reserved	5				! 0x20b-0x20f
1869tl1_insn_illegal:
1870	tl1_gen		T_ILLEGAL_INSTRUCTION		! 0x210
1871tl1_priv_opcode:
1872	tl1_gen		T_PRIVILEGED_OPCODE		! 0x211
1873	tl1_reserved	14				! 0x212-0x21f
1874tl1_fp_disabled:
1875	tl1_gen		T_FP_DISABLED			! 0x220
1876tl1_fp_ieee:
1877	tl1_gen		T_FP_EXCEPTION_IEEE_754		! 0x221
1878tl1_fp_other:
1879	tl1_gen		T_FP_EXCEPTION_OTHER		! 0x222
1880tl1_tag_ovflw:
1881	tl1_gen		T_TAG_OFERFLOW			! 0x223
1882tl1_clean_window:
1883	clean_window					! 0x224
1884tl1_divide:
1885	tl1_gen		T_DIVISION_BY_ZERO		! 0x228
1886	tl1_reserved	7				! 0x229-0x22f
1887tl1_data_excptn:
1888	tl1_data_excptn					! 0x230
1889	tl1_reserved	1				! 0x231
1890tl1_data_error:
1891	tl1_gen		T_DATA_ERROR			! 0x232
1892	tl1_reserved	1				! 0x233
1893tl1_align:
1894	tl1_align					! 0x234
1895tl1_align_lddf:
1896	tl1_gen		T_RESERVED			! 0x235
1897tl1_align_stdf:
1898	tl1_gen		T_RESERVED			! 0x236
1899tl1_priv_action:
1900	tl1_gen		T_PRIVILEGED_ACTION		! 0x237
1901	tl1_reserved	9				! 0x238-0x240
1902tl1_intr_level:
1903	tl1_intr_level					! 0x241-0x24f
1904	tl1_reserved	16				! 0x250-0x25f
1905tl1_intr_vector:
1906	tl1_intr_vector					! 0x260
1907tl1_watch_phys:
1908	tl1_gen		T_PA_WATCHPOINT			! 0x261
1909tl1_watch_virt:
1910	tl1_gen		T_VA_WATCHPOINT			! 0x262
1911tl1_ecc:
1912	tl1_gen		T_CORRECTED_ECC_ERROR		! 0x263
1913tl1_immu_miss:
1914	tl1_immu_miss					! 0x264
1915tl1_dmmu_miss:
1916	tl1_dmmu_miss					! 0x268
1917tl1_dmmu_prot:
1918	tl1_dmmu_prot					! 0x26c
1919	tl1_reserved	16				! 0x270-0x27f
1920tl1_spill_0_n:
1921	tl1_spill_0_n					! 0x280
1922	tl1_spill_bad	3				! 0x284-0x28f
1923tl1_spill_4_n:
1924	tl1_spill_4_n					! 0x290
1925tl1_spill_5_n:
1926	tl1_spill_5_n					! 0x294
1927tl1_spill_6_n:
1928	tl1_spill_6_n					! 0x298
1929tl1_spill_7_n:
1930	tl1_spill_7_n					! 0x29c
1931tl1_spill_0_o:
1932	tl1_spill_0_o					! 0x2a0
1933tl1_spill_1_o:
1934	tl1_spill_1_o					! 0x2a4
1935tl1_spill_2_o:
1936	tl1_spill_2_o					! 0x2a8
1937tl1_spill_3_o:
1938	tl1_spill_3_o					! 0x2ac
1939	tl1_spill_bad	4				! 0x2b0-0x2bf
1940tl1_fill_0_n:
1941	tl1_fill_0_n					! 0x2c0
1942	tl1_fill_bad	3				! 0x2c4-0x2cf
1943tl1_fill_4_n:
1944	tl1_fill_4_n					! 0x2d0
1945tl1_fill_5_n:
1946	tl1_fill_5_n					! 0x2d4
1947tl1_fill_6_n:
1948	tl1_fill_6_n					! 0x2d8
1949tl1_fill_7_n:
1950	tl1_fill_7_n					! 0x2dc
1951	tl1_fill_bad	8				! 0x2e0-0x2ff
1952	tl1_reserved	1				! 0x300
1953tl1_breakpoint:
1954	tl1_gen		T_BREAKPOINT			! 0x301
1955	tl1_gen		T_RSTRWP_PHYS			! 0x302
1956	tl1_gen		T_RSTRWP_VIRT			! 0x303
1957	tl1_reserved	252				! 0x304-0x3ff
1958
1959/*
1960 * User trap entry point.
1961 *
1962 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
1963 *		 u_int sfsr)
1964 *
1965 * The following setup has been performed:
1966 *	- the windows have been split and the active user window has been saved
1967 *	  (maybe just to the pcb)
1968 *	- we are on alternate globals and interrupts are disabled
1969 *
1970 * We switch to the kernel stack,  build a trapframe, switch to normal
1971 * globals, enable interrupts and call trap.
1972 *
1973 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
1974 * it has been pre-set in alternate globals, so we read it from there and setup
1975 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
1976 * of cpu migration and using the wrong pcpup.
1977 */
1978ENTRY(tl0_trap)
1979	/*
1980	 * Force kernel store order.
1981	 */
1982	wrpr	%g0, PSTATE_ALT, %pstate
1983
1984	rdpr	%tstate, %l0
1985	rdpr	%tpc, %l1
1986	rdpr	%tnpc, %l2
1987	rd	%y, %l3
1988	rd	%fprs, %l4
1989	rdpr	%wstate, %l5
1990
1991#if KTR_COMPILE & KTR_TRAP
1992	CATR(KTR_TRAP,
1993	    "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
1994	    , %g1, %g2, %g3, 7, 8, 9)
1995	ldx	[PCPU(CURTHREAD)], %g2
1996	stx	%g2, [%g1 + KTR_PARM1]
1997	stx	%o0, [%g1 + KTR_PARM2]
1998	rdpr	%pil, %g2
1999	stx	%g2, [%g1 + KTR_PARM3]
2000	stx	%l1, [%g1 + KTR_PARM4]
2001	stx	%l2, [%g1 + KTR_PARM5]
2002	stx	%i6, [%g1 + KTR_PARM6]
20039:
2004#endif
2005
2006	and	%l5, WSTATE_NORMAL_MASK, %l5
2007	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2008	wrpr	%l5, WSTATE_KERNEL, %wstate
2009	rdpr	%canrestore, %l6
2010	wrpr	%l6, 0, %otherwin
2011	wrpr	%g0, 0, %canrestore
2012
2013	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2014
2015	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2016	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2017	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2018	stw	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2019
2020	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2021	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2022	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2023	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2024	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2025	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2026
2027	wr	%g0, FPRS_FEF, %fprs
2028	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2029	wr	%g0, 0, %fprs
2030
2031	mov	PCPU_REG, %o0
2032	wrpr	%g0, PSTATE_NORMAL, %pstate
2033
2034	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2035	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2036	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2037	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2038	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2039	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2040	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2041
2042	mov	%o0, PCPU_REG
2043	wrpr	%g0, PSTATE_KERNEL, %pstate
2044
2045	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2046	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2047	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2048	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2049	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2050	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2051	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2052	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2053
2054.Ltl0_trap_reenter:
2055	call	trap
2056	 add	%sp, CCFSZ + SPOFF, %o0
2057	b,a	%xcc, tl0_ret
2058	 nop
2059END(tl0_trap)
2060
2061/*
2062 * System call entry point.
2063 *
2064 * Essentially the same as tl0_trap but calls syscall.
2065 */
2066ENTRY(tl0_syscall)
2067	/*
2068	 * Force kernel store order.
2069	 */
2070	wrpr	%g0, PSTATE_ALT, %pstate
2071
2072	rdpr	%tstate, %l0
2073	rdpr	%tpc, %l1
2074	rdpr	%tnpc, %l2
2075	rd	%y, %l3
2076	rd	%fprs, %l4
2077	rdpr	%wstate, %l5
2078
2079#if KTR_COMPILE & KTR_SYSC
2080	CATR(KTR_SYSC,
2081	    "tl0_syscall: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2082	    , %g1, %g2, %g3, 7, 8, 9)
2083	ldx	[PCPU(CURTHREAD)], %g2
2084	stx	%g2, [%g1 + KTR_PARM1]
2085	stx	%o0, [%g1 + KTR_PARM2]
2086	rdpr	%pil, %g2
2087	stx	%g2, [%g1 + KTR_PARM3]
2088	stx	%l1, [%g1 + KTR_PARM4]
2089	stx	%l2, [%g1 + KTR_PARM5]
2090	stx	%i6, [%g1 + KTR_PARM6]
20919:
2092#endif
2093
2094	and	%l5, WSTATE_NORMAL_MASK, %l5
2095	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2096	wrpr	%l5, WSTATE_KERNEL, %wstate
2097	rdpr	%canrestore, %l6
2098	wrpr	%l6, 0, %otherwin
2099	wrpr	%g0, 0, %canrestore
2100
2101	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2102
2103	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2104
2105	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2106	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2107	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2108	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2109	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2110	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2111
2112	wr	%g0, FPRS_FEF, %fprs
2113	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2114	wr	%g0, 0, %fprs
2115
2116	mov	PCPU_REG, %o0
2117	wrpr	%g0, PSTATE_NORMAL, %pstate
2118
2119	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2120	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2121	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2122	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2123	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2124	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2125	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2126
2127	mov	%o0, PCPU_REG
2128	wrpr	%g0, PSTATE_KERNEL, %pstate
2129
2130	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2131	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2132	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2133	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2134	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2135	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2136	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2137	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2138
2139	call	syscall
2140	 add	%sp, CCFSZ + SPOFF, %o0
2141	b,a	%xcc, tl0_ret
2142	 nop
2143END(tl0_syscall)
2144
2145ENTRY(tl0_intr)
2146	/*
2147	 * Force kernel store order.
2148	 */
2149	wrpr	%g0, PSTATE_ALT, %pstate
2150
2151	rdpr	%tstate, %l0
2152	rdpr	%tpc, %l1
2153	rdpr	%tnpc, %l2
2154	rd	%y, %l3
2155	rd	%fprs, %l4
2156	rdpr	%wstate, %l5
2157
2158#if KTR_COMPILE & KTR_INTR
2159	CATR(KTR_INTR,
2160	    "tl0_intr: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2161	    , %g1, %g2, %g3, 7, 8, 9)
2162	ldx	[PCPU(CURTHREAD)], %g2
2163	stx	%g2, [%g1 + KTR_PARM1]
2164	stx	%o0, [%g1 + KTR_PARM2]
2165	rdpr	%pil, %g2
2166	stx	%g2, [%g1 + KTR_PARM3]
2167	stx	%l1, [%g1 + KTR_PARM4]
2168	stx	%l2, [%g1 + KTR_PARM5]
2169	stx	%i6, [%g1 + KTR_PARM6]
21709:
2171#endif
2172
2173	and	%l5, WSTATE_NORMAL_MASK, %l5
2174	sllx	%l5, WSTATE_OTHER_SHIFT, %l5
2175	wrpr	%l5, WSTATE_KERNEL, %wstate
2176	rdpr	%canrestore, %l6
2177	wrpr	%l6, 0, %otherwin
2178	wrpr	%g0, 0, %canrestore
2179
2180	sub	PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2181
2182	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2183	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2184	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2185	stw	%l3, [%sp + SPOFF + CCFSZ + TF_Y]
2186	stb	%l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2187	stb	%l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2188
2189	wr	%g0, FPRS_FEF, %fprs
2190	stx	%fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2191	wr	%g0, 0, %fprs
2192
2193	mov	T_INTERRUPT, %o0
2194	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2195	stw	%o2, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2196
2197	mov	PCPU_REG, %o0
2198	wrpr	%g0, PSTATE_NORMAL, %pstate
2199
2200	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2201	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2202	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2203	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2204	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2205	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2206	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
2207
2208	mov	%o0, PCPU_REG
2209	wrpr	%g0, PSTATE_KERNEL, %pstate
2210
2211	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
2212	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
2213	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
2214	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
2215	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
2216	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
2217	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2218	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2219
2220	SET(cnt+V_INTR, %l1, %l0)
2221	ATOMIC_INC_INT(%l0, %l1, %l2)
2222
2223	SET(intr_handlers, %l1, %l0)
2224	sllx	%o2, IH_SHIFT, %l1
2225	ldx	[%l0 + %l1], %l1
2226	call	%l1
2227	 add	%sp, CCFSZ + SPOFF, %o0
2228	b,a	%xcc, tl0_ret
2229	 nop
2230END(tl0_intr)
2231
2232ENTRY(tl0_ret)
2233#if KTR_COMPILE & KTR_TRAP
2234	CATR(KTR_TRAP, "tl0_ret: check ast td=%p (%s) pil=%#lx sflag=%#x"
2235	    , %g1, %g2, %g3, 7, 8, 9)
2236	ldx	[PCPU(CURTHREAD)], %g2
2237	stx	%g2, [%g1 + KTR_PARM1]
2238	ldx	[%g2 + TD_PROC], %g2
2239	add	%g2, P_COMM, %g3
2240	stx	%g3, [%g1 + KTR_PARM2]
2241	rdpr	%pil, %g3
2242	stx	%g3, [%g1 + KTR_PARM3]
2243	lduw	[%g2 + P_SFLAG], %g3
2244	stx	%g3, [%g1 + KTR_PARM4]
22459:
2246#endif
2247
2248	wrpr	%g0, PIL_TICK, %pil
2249	ldx	[PCPU(CURTHREAD)], %l0
2250	ldx	[%l0 + TD_KSE], %l1
2251	lduw	[%l1 + KE_FLAGS], %l2
2252	and	%l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
2253	brz,pt	%l2, 1f
2254	 nop
2255	call	ast
2256	 add	%sp, CCFSZ + SPOFF, %o0
2257
22581:	ldx	[%l0 + TD_PCB], %l1
2259	ldx	[%l1 + PCB_NSAVED], %l2
2260	mov	T_SPILL, %o0
2261	brnz,a,pn %l2, .Ltl0_trap_reenter
2262	 stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2263
2264	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2265	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2266	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2267	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2268	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2269	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2270	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
2271
2272	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
2273	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
2274	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
2275	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
2276	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
2277	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
2278	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
2279	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
2280
2281	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2282	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2283	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2284	lduw	[%sp + SPOFF + CCFSZ + TF_Y], %l3
2285	ldub	[%sp + SPOFF + CCFSZ + TF_FPRS], %l4
2286	ldub	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l5
2287
2288	set	TSTATE_IE, %o0
2289	andcc	%l0, %o0, %g0
2290	bnz	%xcc, 1f
2291	 nop
2292	ta	%xcc, 1
22931:	set	TSTATE_PEF, %o0
2294	andcc	%l0, %o0, %g0
2295	bnz	%xcc, 1f
2296	 nop
2297	ta	%xcc, 1
22981:
2299
2300	wrpr	%g0, PSTATE_ALT, %pstate
2301
2302	wrpr	%g0, 0, %pil
2303	wrpr	%l1, 0, %tpc
2304	wrpr	%l2, 0, %tnpc
2305	wr	%l3, 0, %y
2306
2307	andn	%l0, TSTATE_CWP_MASK, %g1
2308	mov	%l4, %g2
2309
2310	srlx	%l5, WSTATE_OTHER_SHIFT, %g3
2311	wrpr	%g3, WSTATE_TRANSITION, %wstate
2312	rdpr	%otherwin, %o0
2313	wrpr	%o0, 0, %canrestore
2314	wrpr	%g0, 0, %otherwin
2315	wrpr	%o0, 0, %cleanwin
2316
2317	/*
2318	 * If this instruction causes a fill trap which fails to fill a window
2319	 * from the user stack, we will resume at tl0_ret_fill_end and call
2320	 * back into the kernel.
2321	 */
2322	restore
2323tl0_ret_fill:
2324
2325	rdpr	%cwp, %g4
2326	wrpr	%g1, %g4, %tstate
2327	wr	%g2, 0, %fprs
2328	wrpr	%g3, 0, %wstate
2329
2330#if KTR_COMPILE & KTR_TRAP
2331	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2332	    , %g2, %g3, %g4, 7, 8, 9)
2333	ldx	[PCPU(CURTHREAD)], %g3
2334	stx	%g3, [%g2 + KTR_PARM1]
2335	rdpr	%pil, %g3
2336	stx	%g3, [%g2 + KTR_PARM2]
2337	rdpr	%tpc, %g3
2338	stx	%g3, [%g2 + KTR_PARM3]
2339	rdpr	%tnpc, %g3
2340	stx	%g3, [%g2 + KTR_PARM4]
2341	stx	%sp, [%g2 + KTR_PARM5]
23429:
2343#endif
2344
2345	retry
2346tl0_ret_fill_end:
2347
2348#if KTR_COMPILE & KTR_TRAP
2349	CATR(KTR_TRAP, "tl0_ret: fill magic wstate=%#lx sp=%#lx"
2350	    , %l0, %l1, %l2, 7, 8, 9)
2351	stx	%l5, [%l0 + KTR_PARM1]
2352	stx	%sp, [%l0 + KTR_PARM2]
23539:
2354#endif
2355
2356	/*
2357	 * The fill failed and magic has been performed.  Call trap again,
2358	 * which will copyin the window on the user's behalf.
2359	 */
2360	wrpr	%l5, 0, %wstate
2361	mov	PCPU_REG, %o0
2362	wrpr	%g0, PSTATE_KERNEL, %pstate
2363	mov	%o0, PCPU_REG
2364	mov	T_FILL, %o0
2365	b	%xcc, .Ltl0_trap_reenter
2366	 stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2367END(tl0_ret)
2368
2369/*
2370 * Kernel trap entry point
2371 *
2372 * void tl1_trap(u_int type, u_char pil, u_long o2, u_long tar, u_long sfar,
2373 *		 u_int sfsr)
2374 *
2375 * This is easy because the stack is already setup and the windows don't need
2376 * to be split.  We build a trapframe and call trap(), the same as above, but
2377 * the outs don't need to be saved.
2378 */
2379ENTRY(tl1_trap)
2380	sub	%sp, TF_SIZEOF, %sp
2381
2382	rdpr	%tstate, %l0
2383	rdpr	%tpc, %l1
2384	rdpr	%tnpc, %l2
2385	mov	%o1, %l3
2386
2387#if KTR_COMPILE & KTR_TRAP
2388	CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2389	    , %g1, %g2, %g3, 7, 8, 9)
2390	ldx	[PCPU(CURTHREAD)], %g2
2391	stx	%g2, [%g1 + KTR_PARM1]
2392	andn	%o0, T_KERNEL, %g2
2393	stx	%g2, [%g1 + KTR_PARM2]
2394	stx	%o1, [%g1 + KTR_PARM3]
2395	stx	%l1, [%g1 + KTR_PARM4]
2396	stx	%i6, [%g1 + KTR_PARM5]
23979:
2398#endif
2399
2400	wrpr	%g0, 1, %tl
2401
2402	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2403	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2404	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2405
2406	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2407	stb	%o1, [%sp + SPOFF + CCFSZ + TF_PIL]
2408	stx	%o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2409	stx	%o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2410	stw	%o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2411
2412	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2413	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2414
2415	mov	PCPU_REG, %o0
2416	wrpr	%g0, PSTATE_NORMAL, %pstate
2417
2418	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2419	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2420	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2421	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2422	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2423	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2424
2425	mov	%o0, PCPU_REG
2426	wrpr	%g0, PSTATE_KERNEL, %pstate
2427
2428	call	trap
2429	 add	%sp, CCFSZ + SPOFF, %o0
2430
2431	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2432	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l1
2433	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2434	ldub	[%sp + SPOFF + CCFSZ + TF_PIL], %l3
2435
2436	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2437	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2438	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2439	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2440	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2441	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2442
2443	wrpr	%g0, PSTATE_ALT, %pstate
2444
2445	andn	%l0, TSTATE_CWP_MASK, %g1
2446	mov	%l1, %g2
2447	mov	%l2, %g3
2448
2449	wrpr	%l3, 0, %pil
2450
2451	restore
2452
2453	wrpr	%g0, 2, %tl
2454
2455	rdpr	%cwp, %g4
2456	wrpr	%g1, %g4, %tstate
2457	wrpr	%g2, 0, %tpc
2458	wrpr	%g3, 0, %tnpc
2459
2460#if KTR_COMPILE & KTR_TRAP
2461	CATR(KTR_TRAP, "tl1_trap: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2462	    , %g2, %g3, %g4, 7, 8, 9)
2463	ldx	[PCPU(CURTHREAD)], %g3
2464	stx	%g3, [%g2 + KTR_PARM1]
2465	rdpr	%pil, %g3
2466	stx	%g3, [%g2 + KTR_PARM2]
2467	rdpr	%tstate, %g3
2468	stx	%g3, [%g2 + KTR_PARM3]
2469	rdpr	%tpc, %g3
2470	stx	%g3, [%g2 + KTR_PARM4]
2471	stx	%sp, [%g2 + KTR_PARM5]
24729:
2473#endif
2474
2475	retry
2476END(tl1_trap)
2477
2478ENTRY(tl1_intr)
2479	sub	%sp, TF_SIZEOF, %sp
2480
2481	rdpr	%tstate, %l0
2482	rdpr	%tpc, %l1
2483	rdpr	%tnpc, %l2
2484	mov	%o1, %l3
2485
2486#if KTR_COMPILE & KTR_INTR
2487	CATR(KTR_INTR, "tl1_intr: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2488	    , %g1, %g2, %g3, 7, 8, 9)
2489	ldx	[PCPU(CURTHREAD)], %g2
2490	stx	%g2, [%g1 + KTR_PARM1]
2491	andn	%o0, T_KERNEL, %g2
2492	stx	%g2, [%g1 + KTR_PARM2]
2493	stx	%o1, [%g1 + KTR_PARM3]
2494	stx	%l1, [%g1 + KTR_PARM4]
2495	stx	%i6, [%g1 + KTR_PARM5]
24969:
2497#endif
2498
2499	wrpr	%g0, 1, %tl
2500
2501	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2502	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2503	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2504
2505	mov	T_INTERRUPT | T_KERNEL, %o0
2506	stw	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2507	stb	%o1, [%sp + SPOFF + CCFSZ + TF_PIL]
2508	stw	%o2, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2509
2510	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
2511	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
2512
2513	mov	PCPU_REG, %o0
2514	wrpr	%g0, PSTATE_NORMAL, %pstate
2515
2516	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2517	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2518	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2519	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2520	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2521	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2522
2523	mov	%o0, PCPU_REG
2524	wrpr	%g0, PSTATE_KERNEL, %pstate
2525
2526	SET(cnt+V_INTR, %l5, %l4)
2527	ATOMIC_INC_INT(%l4, %l5, %l6)
2528
2529	SET(intr_handlers, %l5, %l4)
2530	sllx	%o2, IH_SHIFT, %l5
2531	ldx	[%l4 + %l5], %l5
2532	call	%l5
2533	 add	%sp, CCFSZ + SPOFF, %o0
2534
2535	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2536	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2537	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2538	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2539	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2540	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2541
2542	wrpr	%g0, PSTATE_ALT, %pstate
2543
2544	andn	%l0, TSTATE_CWP_MASK, %g1
2545	mov	%l1, %g2
2546	mov	%l2, %g3
2547	wrpr	%l3, 0, %pil
2548
2549	restore
2550
2551	wrpr	%g0, 2, %tl
2552
2553	rdpr	%cwp, %g4
2554	wrpr	%g1, %g4, %tstate
2555	wrpr	%g2, 0, %tpc
2556	wrpr	%g3, 0, %tnpc
2557
2558#if KTR_COMPILE & KTR_INTR
2559	CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2560	    , %g2, %g3, %g4, 7, 8, 9)
2561	ldx	[PCPU(CURTHREAD)], %g3
2562	stx	%g3, [%g2 + KTR_PARM1]
2563	rdpr	%pil, %g3
2564	stx	%g3, [%g2 + KTR_PARM2]
2565	rdpr	%tstate, %g3
2566	stx	%g3, [%g2 + KTR_PARM3]
2567	rdpr	%tpc, %g3
2568	stx	%g3, [%g2 + KTR_PARM4]
2569	stx	%sp, [%g2 + KTR_PARM5]
25709:
2571#endif
2572
2573	retry
2574END(tl1_intr)
2575
2576/*
2577 * Freshly forked processes come here when switched to for the first time.
2578 * The arguments to fork_exit() have been setup in the locals, we must move
2579 * them to the outs.
2580 */
2581ENTRY(fork_trampoline)
2582#if KTR_COMPILE & KTR_PROC
2583	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
2584	    , %g1, %g2, %g3, 7, 8, 9)
2585	ldx	[PCPU(CURTHREAD)], %g2
2586	stx	%g2, [%g1 + KTR_PARM1]
2587	ldx	[%g2 + TD_PROC], %g2
2588	add	%g2, P_COMM, %g2
2589	stx	%g2, [%g1 + KTR_PARM2]
2590	rdpr	%cwp, %g2
2591	stx	%g2, [%g1 + KTR_PARM3]
25929:
2593#endif
2594	mov	%l0, %o0
2595	mov	%l1, %o1
2596	call	fork_exit
2597	 mov	%l2, %o2
2598	b,a	%xcc, tl0_ret
2599	 nop
2600END(fork_trampoline)
2601