exception.S revision 83366
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * $FreeBSD: head/sys/sparc64/sparc64/exception.S 83366 2001-09-12 08:38:13Z julian $
56 */
57
58#include "opt_ddb.h"
59
60#include <machine/asi.h>
61#include <machine/asmacros.h>
62#include <machine/ktr.h>
63#include <machine/pstate.h>
64#include <machine/trap.h>
65#include <machine/tstate.h>
66#include <machine/wstate.h>
67
68#include "assym.s"
69
70/*
71 * Macros for spilling and filling live windows.
72 *
73 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
74 * handler will not use more than 24 instructions total, to leave room for
75 * resume vectors which occupy the last 8 instructions.
76 */
77
78#define	SPILL(storer, base, size, asi) \
79	storer	%l0, [base + (0 * size)] asi ; \
80	storer	%l1, [base + (1 * size)] asi ; \
81	storer	%l2, [base + (2 * size)] asi ; \
82	storer	%l3, [base + (3 * size)] asi ; \
83	storer	%l4, [base + (4 * size)] asi ; \
84	storer	%l5, [base + (5 * size)] asi ; \
85	storer	%l6, [base + (6 * size)] asi ; \
86	storer	%l7, [base + (7 * size)] asi ; \
87	storer	%i0, [base + (8 * size)] asi ; \
88	storer	%i1, [base + (9 * size)] asi ; \
89	storer	%i2, [base + (10 * size)] asi ; \
90	storer	%i3, [base + (11 * size)] asi ; \
91	storer	%i4, [base + (12 * size)] asi ; \
92	storer	%i5, [base + (13 * size)] asi ; \
93	storer	%i6, [base + (14 * size)] asi ; \
94	storer	%i7, [base + (15 * size)] asi
95
96#define	FILL(loader, base, size, asi) \
97	loader	[base + (0 * size)] asi, %l0 ; \
98	loader	[base + (1 * size)] asi, %l1 ; \
99	loader	[base + (2 * size)] asi, %l2 ; \
100	loader	[base + (3 * size)] asi, %l3 ; \
101	loader	[base + (4 * size)] asi, %l4 ; \
102	loader	[base + (5 * size)] asi, %l5 ; \
103	loader	[base + (6 * size)] asi, %l6 ; \
104	loader	[base + (7 * size)] asi, %l7 ; \
105	loader	[base + (8 * size)] asi, %i0 ; \
106	loader	[base + (9 * size)] asi, %i1 ; \
107	loader	[base + (10 * size)] asi, %i2 ; \
108	loader	[base + (11 * size)] asi, %i3 ; \
109	loader	[base + (12 * size)] asi, %i4 ; \
110	loader	[base + (13 * size)] asi, %i5 ; \
111	loader	[base + (14 * size)] asi, %i6 ; \
112	loader	[base + (15 * size)] asi, %i7
113
114#define	ERRATUM50(reg)	mov reg, reg
115
116/*
117 * Magic to resume from a spill or fill trap.  If we get an alignment or an
118 * mmu fault during a spill or a fill, this macro will detect the fault and
119 * resume at a set instruction offset in the trap handler, which will try to
120 * get help.
121 *
122 * To check if the previous trap was a spill/fill we convert the the trapped
123 * pc to a trap type and verify that it is in the range of spill/fill vectors.
124 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
125 * tl bit allows us to detect both ranges with one test.
126 *
127 * This is:
128 *	(((%tpc - %tba) >> 5) & ~0x200) >= 0x80 && <= 0xff
129 *
130 * Values outside of the trap table will produce negative or large positive
131 * results.
132 *
133 * To calculate the new pc we take advantage of the xor feature of wrpr.
134 * Forcing all the low bits of the trapped pc on we can produce any offset
135 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
136 *
137 *	0x7f ^ 0x1f == 0x60
138 *	0x1f == (0x80 - 0x60) - 1
139 *
140 * Which are the offset and xor value used to resume from mmu faults.
141 */
142
143/*
144 * If a spill/fill trap is not detected this macro will branch to the label l1.
145 * Otherwise the caller should do any necesary cleanup and execute a done.
146 */
147#define	RESUME_SPILLFILL_MAGIC(r1, r2, xor, l1) \
148	rdpr	%tpc, r1 ; \
149	ERRATUM50(r1) ; \
150	rdpr	%tba, r2 ; \
151	sub	r1, r2, r2 ; \
152	srlx	r2, 5, r2 ; \
153	andn	r2, 0x200, r2 ; \
154	sub	r2, 0x80, r2 ; \
155	brlz	r2, l1 ; \
156	 sub	r2, 0x7f, r2 ; \
157	brgz	r2, l1 ; \
158	 or	r1, 0x7f, r1 ; \
159	wrpr	r1, xor, %tnpc ; \
160
161#define	RSF_XOR(off)	((0x80 - off) - 1)
162
163/*
164 * Instruction offsets in spill and fill trap handlers for handling certain
165 * nested traps, and corresponding xor constants for wrpr.
166 */
167#define	RSF_OFF_MMU	0x60
168#define	RSF_OFF_ALIGN	0x70
169
170#define	RSF_MMU		RSF_XOR(RSF_OFF_MMU)
171#define	RSF_ALIGN	RSF_XOR(RSF_OFF_ALIGN)
172
173/*
174 * Constant to add to %tnpc when taking a fill trap just before returning to
175 * user mode.  The instruction sequence looks like restore, wrpr, retry; we
176 * want to skip over the wrpr and retry and execute code to call back into the
177 * kernel.  It is useful to add tracing between these instructions, which would
178 * change the size of the sequence, so we demark with labels and subtract.
179 */
180#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
181
182/*
183 * Retry a spill or fill with a different wstate due to an alignment fault.
184 * We may just be using the wrong stack offset.
185 */
186#define	RSF_ALIGN_RETRY(ws) \
187	wrpr	%g0, (ws), %wstate ; \
188	retry ; \
189	.align	16
190
191/*
192 * Generate a T_SPILL or T_FILL trap if the window operation fails.
193 */
194#define	RSF_TRAP(type) \
195	b	%xcc, tl0_sftrap ; \
196	 mov	type, %g2 ; \
197	.align	16
198
199/*
200 * Game over if the window operation fails.
201 */
202#define	RSF_FATAL(type) \
203	sir	type ; \
204	.align	16
205
206/*
207 * Magic to resume from a failed fill a few instructions after the corrsponding
208 * restore.  This is used on return from the kernel to usermode.
209 */
210#define	RSF_FILL_MAGIC \
211	rdpr	%tnpc, %g1 ; \
212	add	%g1, RSF_FILL_INC, %g1 ; \
213	wrpr	%g1, 0, %tnpc ; \
214	done ; \
215	.align	16
216
217/*
218 * Spill to the pcb if a spill to the user stack in kernel mode fails.
219 */
220#define	RSF_SPILL_TOPCB \
221	b,a	%xcc, tl1_spill_topcb ; \
222	 nop ; \
223	.align	16
224
225DATA(intrnames)
226	.asciz	"foo"
227DATA(eintrnames)
228
229DATA(intrcnt)
230	.long	0
231DATA(eintrcnt)
232
233/*
234 * Trap table and associated macros
235 *
236 * Due to its size a trap table is an inherently hard thing to represent in
237 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
238 * instructions each, many of which are identical.  The way that this is
239 * layed out is the instructions (8 or 32) for the actual trap vector appear
240 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
241 * but if not supporting code can be placed just after the definition of the
242 * macro.  The macros are then instantiated in a different section (.trap),
243 * which is setup to be placed by the linker at the beginning of .text, and the
244 * code around the macros is moved to the end of trap table.  In this way the
245 * code that must be sequential in memory can be split up, and located near
246 * its supporting code so that it is easier to follow.
247 */
248
249	/*
250	 * Clean window traps occur when %cleanwin is zero to ensure that data
251	 * is not leaked between address spaces in registers.
252	 */
253	.macro	clean_window
254	clr	%o0
255	clr	%o1
256	clr	%o2
257	clr	%o3
258	clr	%o4
259	clr	%o5
260	clr	%o6
261	clr	%o7
262	clr	%l0
263	clr	%l1
264	clr	%l2
265	clr	%l3
266	clr	%l4
267	clr	%l5
268	clr	%l6
269	rdpr	%cleanwin, %l7
270	inc	%l7
271	wrpr	%l7, 0, %cleanwin
272	clr	%l7
273	retry
274	.align	128
275	.endm
276
277	/*
278	 * Stack fixups for entry from user mode.  We are still running on the
279	 * user stack, and with its live registers, so we must save soon.  We
280	 * are on alternate globals so we do have some registers.  Set the
281	 * transitional window state, save, and call a routine to get onto
282	 * the kernel stack.  If the save traps we attempt to spill a window
283	 * to the user stack.  If this fails, we spill the window to the pcb
284	 * and continue.
285	 *
286	 * NOTE: Must be called with alternate globals and clobbers %g1.
287	 */
288
289	.macro	tl0_kstack
290	rdpr	%wstate, %g1
291	wrpr	%g1, WSTATE_TRANSITION, %wstate
292	save
293	call	tl0_kstack_fixup
294	 rdpr	%canrestore, %o0
295	.endm
296
297	.macro	tl0_setup	type
298	tl0_kstack
299	rdpr	%pil, %o2
300	b	%xcc, tl0_trap
301	 mov	\type, %o0
302	.endm
303
304/*
305 * Setup the kernel stack and split the register windows when faulting from
306 * user space.
307 * %canrestore is passed in %o0 and %wstate in (alternate) %g1.
308 */
309ENTRY(tl0_kstack_fixup)
310	mov	%g1, %o3
311	and	%o3, WSTATE_MASK, %o1
312	sllx	%o0, WSTATE_USERSHIFT, %o1
313	wrpr	%o1, 0, %wstate
314	wrpr	%o0, 0, %otherwin
315	wrpr	%g0, 0, %canrestore
316	ldx	[PCPU(CURTHREAD)], %o0
317	ldx	[%o0 + TD_KSTACK], %o0
318	set	KSTACK_PAGES * PAGE_SIZE - SPOFF - CCFSZ, %o1
319	retl
320	 add	%o0, %o1, %sp
321END(tl0_kstack_fixup)
322
323	/*
324	 * Generic trap type.  Call trap() with the specified type.
325	 */
326	.macro	tl0_gen		type
327	tl0_setup \type
328	.align	32
329	.endm
330
331	.macro	tl0_wide	type
332	tl0_setup \type
333	.align	128
334	.endm
335
336	/*
337	 * This is used to suck up the massive swaths of reserved trap types.
338	 * Generates count "reserved" trap vectors.
339	 */
340	.macro	tl0_reserved	count
341	.rept	\count
342	tl0_gen	T_RESERVED
343	.endr
344	.endm
345
346	/*
347	 * NOTE: we cannot use mmu globals here because tl0_kstack may cause
348	 * an mmu fault.
349	 */
350	.macro	tl0_data_excptn
351	wrpr	%g0, PSTATE_ALT, %pstate
352	wr	%g0, ASI_DMMU, %asi
353	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
354	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
355	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
356	b	%xcc, tl0_sfsr_trap
357	 mov	T_DATA_EXCPTN, %g2
358	.align	32
359	.endm
360
361	.macro	tl0_align
362	wr	%g0, ASI_DMMU, %asi
363	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
364	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
365	b	%xcc, tl0_sfsr_trap
366	 mov	T_ALIGN, %g2
367	.align	32
368	.endm
369
370ENTRY(tl0_sfsr_trap)
371	/*
372	 * Clear the sfsr.
373	 */
374	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
375	membar	#Sync
376
377	/*
378	 * Get onto the kernel stack, save the mmu registers, and call
379	 * common code.
380	 */
381	tl0_kstack
382	sub	%sp, MF_SIZEOF, %sp
383	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFAR]
384	stx	%g4, [%sp + SPOFF + CCFSZ + MF_SFSR]
385	stx	%g5, [%sp + SPOFF + CCFSZ + MF_TAR]
386	rdpr	%pil, %o2
387	add	%sp, SPOFF + CCFSZ, %o1
388	b	%xcc, tl0_trap
389	 mov	%g2, %o0
390END(tl0_sfsr_trap)
391
392	.macro	tl0_intr level, mask
393	tl0_kstack
394	set	\mask, %o2
395	b	%xcc, tl0_intr_call_trap
396	 mov	\level, %o1
397	.align	32
398	.endm
399
400/*
401 * Actually call tl0_trap, and do some work that cannot be done in tl0_intr
402 * because of space constraints.
403 */
404ENTRY(tl0_intr_call_trap)
405	wr	%o2, 0, %asr21
406	rdpr	%pil, %o2
407	wrpr	%g0, %o1, %pil
408	b	%xcc, tl0_trap
409	 mov	T_INTR, %o0
410END(tl0_intr_call_trap)
411
412#define	INTR(level, traplvl)						\
413	tl ## traplvl ## _intr	level, 1 << level
414
415#define	TICK(traplvl) \
416	tl ## traplvl ## _intr	PIL_TICK, 1
417
418#define	INTR_LEVEL(tl)							\
419	INTR(1, tl) ;							\
420	INTR(2, tl) ;							\
421	INTR(3, tl) ;							\
422	INTR(4, tl) ;							\
423	INTR(5, tl) ;							\
424	INTR(6, tl) ;							\
425	INTR(7, tl) ;							\
426	INTR(8, tl) ;							\
427	INTR(9, tl) ;							\
428	INTR(10, tl) ;							\
429	INTR(11, tl) ;							\
430	INTR(12, tl) ;							\
431	INTR(13, tl) ;							\
432	TICK(tl) ;							\
433	INTR(15, tl) ;
434
435	.macro	tl0_intr_level
436	INTR_LEVEL(0)
437	.endm
438
439	.macro	tl0_intr_vector
440	b,a	intr_enqueue
441	.align	32
442	.endm
443
444	.macro	tl0_immu_miss
445	/*
446	 * Force kernel store order.
447	 */
448	wrpr	%g0, PSTATE_MMU, %pstate
449
450	/*
451	 * Extract the 8KB pointer and convert to an index.
452	 */
453	ldxa	[%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g1
454	srax	%g1, TTE_SHIFT, %g1
455
456	/*
457	 * Compute the stte address in the primary used tsb.
458	 */
459	and	%g1, (1 << TSB_PRIMARY_MASK_WIDTH) - 1, %g2
460	sllx	%g2, TSB_PRIMARY_STTE_SHIFT, %g2
461	setx	TSB_USER_MIN_ADDRESS, %g4, %g3
462	add	%g2, %g3, %g2
463
464	/*
465	 * Preload the tte tag target.
466	 */
467	ldxa	[%g0] ASI_IMMU_TAG_TARGET_REG, %g3
468
469	/*
470	 * Preload tte data bits to check inside the bucket loop.
471	 */
472	and	%g1, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g4
473	sllx	%g4, TD_VA_LOW_SHIFT, %g4
474	or	%g4, TD_EXEC, %g4
475
476	/*
477	 * Preload mask for tte data check.
478	 */
479	setx	TD_VA_LOW_MASK, %g5, %g1
480	or	%g1, TD_EXEC, %g1
481
482	/*
483	 * Loop over the sttes in this bucket
484	 */
485
486	/*
487	 * Load the tte.
488	 */
4891:	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g6
490
491	/*
492	 * Compare the tag.
493	 */
494	cmp	%g6, %g3
495	bne,pn	%xcc, 2f
496
497	/*
498	 * Compare the data.
499	 */
500	 xor	%g7, %g4, %g5
501	brgez,pn %g7, 2f
502	 andcc	%g5, %g1, %g0
503	bnz,pn	%xcc, 2f
504
505	/*
506	 * We matched a tte, load the tlb.
507	 */
508
509	/*
510	 * Set the reference bit, if it's currently clear.
511	 */
512	 andcc	%g7, TD_REF, %g0
513	bz,a,pn	%xcc, tl0_immu_miss_set_ref
514	 nop
515
516	/*
517	 * Load the tte data into the tlb and retry the instruction.
518	 */
519	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
520	retry
521
522	/*
523	 * Check the low bits to see if we've finished the bucket.
524	 */
5252:	add	%g2, STTE_SIZEOF, %g2
526	andcc	%g2, TSB_PRIMARY_STTE_MASK, %g0
527	bnz	%xcc, 1b
528	 nop
529	b,a	%xcc, tl0_immu_miss_trap
530	.align	128
531	.endm
532
533ENTRY(tl0_immu_miss_set_ref)
534	/*
535	 * Set the reference bit.
536	 */
537	add	%g2, TTE_DATA, %g2
5381:	or	%g7, TD_REF, %g1
539	casxa	[%g2] ASI_N, %g7, %g1
540	cmp	%g1, %g7
541	bne,a,pn %xcc, 1b
542	 mov	%g1, %g7
543
544#if KTR_COMPILE & KTR_CT1
545	CATR(KTR_CT1, "tl0_immu_miss: set ref"
546	    , %g2, %g3, %g4, 7, 8, 9)
5479:
548#endif
549
550	/*
551	 * May have become invalid, in which case start over.
552	 */
553	brgez,pn %g1, 2f
554	 nop
555
556	/*
557	 * Load the tte data into the tlb and retry the instruction.
558	 */
559	stxa	%g1, [%g0] ASI_ITLB_DATA_IN_REG
5602:	retry
561END(tl0_immu_miss_set_ref)
562
563ENTRY(tl0_immu_miss_trap)
564	/*
565	 * Switch to alternate globals.
566	 */
567	wrpr	%g0, PSTATE_ALT, %pstate
568
569	/*
570	 * Load the tar, sfar and sfsr aren't valid.
571	 */
572	wr	%g0, ASI_IMMU, %asi
573	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
574
575#if KTR_COMPILE & KTR_CT1
576	CATR(KTR_CT1, "tl0_immu_miss: trap sp=%#lx tar=%#lx"
577	    , %g3, %g4, %g5, 7, 8, 9)
578	stx	%sp, [%g3 + KTR_PARM1]
579	stx	%g2, [%g3 + KTR_PARM2]
5809:
581#endif
582
583	/*
584	 * Save the mmu registers on the stack, and call common trap code.
585	 */
586	tl0_kstack
587	sub	%sp, MF_SIZEOF, %sp
588	stx	%g2, [%sp + SPOFF + CCFSZ + MF_TAR]
589	rdpr	%pil, %o2
590	add	%sp, SPOFF + CCFSZ, %o1
591	b	%xcc, tl0_trap
592	 mov	T_IMMU_MISS, %o0
593END(tl0_immu_miss_trap)
594
595	.macro	dmmu_miss_user
596	/*
597	 * Extract the 8KB pointer and convert to an index.
598	 */
599	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g1
600	srax	%g1, TTE_SHIFT, %g1
601
602	/*
603	 * Compute the stte address in the primary used tsb.
604	 */
605	and	%g1, (1 << TSB_PRIMARY_MASK_WIDTH) - 1, %g2
606	sllx	%g2, TSB_PRIMARY_STTE_SHIFT, %g2
607	setx	TSB_USER_MIN_ADDRESS, %g4, %g3
608	add	%g2, %g3, %g2
609
610	/*
611	 * Preload the tte tag target.
612	 */
613	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g3
614
615	/*
616	 * Preload tte data bits to check inside the bucket loop.
617	 */
618	and	%g1, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g4
619	sllx	%g4, TD_VA_LOW_SHIFT, %g4
620
621	/*
622	 * Preload mask for tte data check.
623	 */
624	setx	TD_VA_LOW_MASK, %g5, %g1
625
626	/*
627	 * Loop over the sttes in this bucket
628	 */
629
630	/*
631	 * Load the tte.
632	 */
6331:	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g6
634
635	/*
636	 * Compare the tag.
637	 */
638	cmp	%g6, %g3
639	bne,pn	%xcc, 2f
640
641	/*
642	 * Compare the data.
643	 */
644	 xor	%g7, %g4, %g5
645	brgez,pn %g7, 2f
646	 andcc	%g5, %g1, %g0
647	bnz,pn	%xcc, 2f
648
649	/*
650	 * We matched a tte, load the tlb.
651	 */
652
653	/*
654	 * Set the reference bit, if it's currently clear.
655	 */
656	 andcc	%g7, TD_REF, %g0
657	bz,a,pn	%xcc, dmmu_miss_user_set_ref
658	 nop
659
660	/*
661	 * Load the tte data into the tlb and retry the instruction.
662	 */
663	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
664	retry
665
666	/*
667	 * Check the low bits to see if we've finished the bucket.
668	 */
6692:	add	%g2, STTE_SIZEOF, %g2
670	andcc	%g2, TSB_PRIMARY_STTE_MASK, %g0
671	bnz	%xcc, 1b
672	 nop
673	.endm
674
675ENTRY(dmmu_miss_user_set_ref)
676	/*
677	 * Set the reference bit.
678	 */
679	add	%g2, TTE_DATA, %g2
6801:	or	%g7, TD_REF, %g1
681	casxa	[%g2] ASI_N, %g7, %g1
682	cmp	%g1, %g7
683	bne,a,pn %xcc, 1b
684	 mov	%g1, %g7
685
686#if KTR_COMPILE & KTR_CT1
687	CATR(KTR_CT1, "tl0_dmmu_miss: set ref"
688	    , %g2, %g3, %g4, 7, 8, 9)
6899:
690#endif
691
692	/*
693	 * May have become invalid, in which case start over.
694	 */
695	brgez,pn %g1, 2f
696	 nop
697
698	/*
699	 * Load the tte data into the tlb and retry the instruction.
700	 */
701	stxa	%g1, [%g0] ASI_DTLB_DATA_IN_REG
7022:	retry
703END(dmmu_miss_user_set_ref)
704
705	.macro	tl0_dmmu_miss
706	/*
707	 * Force kernel store order.
708	 */
709	wrpr	%g0, PSTATE_MMU, %pstate
710
711	/*
712	 * Try a fast inline lookup of the primary tsb.
713	 */
714	dmmu_miss_user
715
716	/*
717	 * Not in primary tsb, call c code.  Nothing else fits inline.
718	 */
719	b,a	tl0_dmmu_miss_trap
720	.align	128
721	.endm
722
723ENTRY(tl0_dmmu_miss_trap)
724	/*
725	 * Switch to alternate globals.
726	 */
727	wrpr	%g0, PSTATE_ALT, %pstate
728
729	/*
730	 * Load the tar, sfar and sfsr aren't valid.
731	 */
732	wr	%g0, ASI_DMMU, %asi
733	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
734
735#if KTR_COMPILE & KTR_CT1
736	CATR(KTR_CT1, "tl0_dmmu_miss: trap sp=%#lx tar=%#lx"
737	    , %g3, %g4, %g5, 7, 8, 9)
738	stx	%sp, [%g3 + KTR_PARM1]
739	stx	%g2, [%g3 + KTR_PARM2]
7409:
741#endif
742
743	/*
744	 * Save the mmu registers on the stack and call common trap code.
745	 */
746	tl0_kstack
747	sub	%sp, MF_SIZEOF, %sp
748	stx	%g2, [%sp + SPOFF + CCFSZ + MF_TAR]
749	rdpr	%pil, %o2
750	add	%sp, SPOFF + CCFSZ, %o1
751	b	%xcc, tl0_trap
752	 mov	T_DMMU_MISS, %o0
753END(tl0_dmmu_miss_trap)
754
755	.macro	tl0_dmmu_prot
756	/*
757	 * Switch to alternate globals.
758	 */
759	wrpr	%g0, PSTATE_ALT, %pstate
760
761	/*
762	 * Load the tar, sfar and sfsr.
763	 */
764	wr	%g0, ASI_DMMU, %asi
765	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g2
766	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g3
767	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g4
768	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
769	membar	#Sync
770
771	/*
772	 * Save the mmu registers on the stack, switch to alternate globals,
773	 * and call common trap code.
774	 */
775	tl0_kstack
776	sub	%sp, MF_SIZEOF, %sp
777	stx	%g2, [%sp + SPOFF + CCFSZ + MF_TAR]
778	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFAR]
779	stx	%g4, [%sp + SPOFF + CCFSZ + MF_SFSR]
780	rdpr	%pil, %o2
781	add	%sp, SPOFF + CCFSZ, %o1
782	b	%xcc, tl0_trap
783	 mov	T_DMMU_PROT, %o0
784	.align	128
785	.endm
786
787	.macro	tl0_spill_0_n
788	andcc	%sp, 1, %g0
789	bz,pn	%xcc, 2f
790	 wr	%g0, ASI_AIUP, %asi
7911:	SPILL(stxa, %sp + SPOFF, 8, %asi)
792	saved
793	wrpr	%g0, WSTATE_ASSUME64, %wstate
794	retry
795	.align	32
796	RSF_TRAP(T_SPILL)
797	RSF_TRAP(T_SPILL)
798	.endm
799
800	.macro	tl0_spill_1_n
801	andcc	%sp, 1, %g0
802	bnz	%xcc, 1b
803	 wr	%g0, ASI_AIUP, %asi
8042:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
805	SPILL(stwa, %sp, 4, %asi)
806	saved
807	wrpr	%g0, WSTATE_ASSUME32, %wstate
808	retry
809	.align	32
810	RSF_TRAP(T_SPILL)
811	RSF_TRAP(T_SPILL)
812	.endm
813
814	.macro	tl0_spill_2_n
815	wr	%g0, ASI_AIUP, %asi
816	SPILL(stxa, %sp + SPOFF, 8, %asi)
817	saved
818	retry
819	.align	32
820	RSF_ALIGN_RETRY(WSTATE_TEST32)
821	RSF_TRAP(T_SPILL)
822	.endm
823
824	.macro	tl0_spill_3_n
825	wr	%g0, ASI_AIUP, %asi
826	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
827	SPILL(stwa, %sp, 4, %asi)
828	saved
829	retry
830	.align	32
831	RSF_ALIGN_RETRY(WSTATE_TEST64)
832	RSF_TRAP(T_SPILL)
833	.endm
834
835	.macro	tl0_fill_0_n
836	andcc	%sp, 1, %g0
837	bz,pn	%xcc, 2f
838	 wr	%g0, ASI_AIUP, %asi
8391:	FILL(ldxa, %sp + SPOFF, 8, %asi)
840	restored
841	wrpr	%g0, WSTATE_ASSUME64, %wstate
842	retry
843	.align	32
844	RSF_TRAP(T_FILL)
845	RSF_TRAP(T_FILL)
846	.endm
847
848	.macro	tl0_fill_1_n
849	andcc	%sp, 1, %g0
850	bnz	%xcc, 1b
851	 wr	%g0, ASI_AIUP, %asi
8522:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
853	FILL(lduwa, %sp, 4, %asi)
854	restored
855	wrpr	%g0, WSTATE_ASSUME32, %wstate
856	retry
857	.align	32
858	RSF_TRAP(T_FILL)
859	RSF_TRAP(T_FILL)
860	.endm
861
862	.macro	tl0_fill_2_n
863	wr	%g0, ASI_AIUP, %asi
864	FILL(ldxa, %sp + SPOFF, 8, %asi)
865	restored
866	retry
867	.align	32
868	RSF_ALIGN_RETRY(WSTATE_TEST32)
869	RSF_TRAP(T_FILL)
870	.endm
871
872	.macro	tl0_fill_3_n
873	wr	%g0, ASI_AIUP, %asi
874	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
875	FILL(lduwa, %sp, 4, %asi)
876	restored
877	retry
878	.align	32
879	RSF_ALIGN_RETRY(WSTATE_TEST64)
880	RSF_TRAP(T_FILL)
881	.endm
882
883ENTRY(tl0_sftrap)
884	rdpr	%tstate, %g1
885	and	%g1, TSTATE_CWP_MASK, %g1
886	wrpr	%g1, 0, %cwp
887	tl0_kstack
888	rdpr	%pil, %o2
889	b	%xcc, tl0_trap
890	 mov	%g2, %o0
891END(tl0_sftrap)
892
893	.macro	tl0_spill_bad	count
894	.rept	\count
895	tl0_wide T_SPILL
896	.endr
897	.endm
898
899	.macro	tl0_fill_bad	count
900	.rept	\count
901	tl0_wide T_FILL
902	.endr
903	.endm
904
905	.macro	tl0_soft	count
906	.rept	\count
907	tl0_gen	T_SOFT
908	.endr
909	.endm
910
911	.macro	tl1_kstack
912	save	%sp, -CCFSZ, %sp
913	.endm
914
915	.macro	tl1_setup	type
916	tl1_kstack
917	mov	\type | T_KERNEL, %o0
918	b	%xcc, tl1_trap
919	 rdpr	%pil, %o2
920	.endm
921
922	.macro	tl1_gen		type
923	tl1_setup \type
924	.align	32
925	.endm
926
927	.macro	tl1_wide	type
928	tl1_setup \type
929	.align	128
930	.endm
931
932	.macro	tl1_reserved	count
933	.rept	\count
934	tl1_gen	T_RESERVED
935	.endr
936	.endm
937
938	.macro	tl1_insn_excptn
939	tl1_kstack
940	wrpr	%g0, PSTATE_ALT, %pstate
941	rdpr	%pil, %o2
942	b	%xcc, tl1_trap
943	 mov	T_INSN_EXCPTN | T_KERNEL, %o0
944	.align	32
945	.endm
946
947	.macro	tl1_data_excptn
948	b,a	%xcc, tl1_data_exceptn_trap
949	 nop
950	.align	32
951	.endm
952
953ENTRY(tl1_data_exceptn_trap)
954	wr	%g0, ASI_DMMU, %asi
955	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_MMU, 1f)
956	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
957	done
958
9591:	wrpr	%g0, PSTATE_ALT, %pstate
960	b	%xcc, tl1_sfsr_trap
961	 mov	T_DATA_EXCPTN | T_KERNEL, %g1
962END(tl1_data_exceptn)
963
964	/*
965	 * NOTE: We switch to mmu globals here, to avoid needing to save
966	 * alternates, which may be live.
967	 */
968	.macro	tl1_align
969	b	%xcc, tl1_align_trap
970	 wrpr	%g0, PSTATE_MMU, %pstate
971	.align	32
972	.endm
973
974ENTRY(tl1_align_trap)
975	wr	%g0, ASI_DMMU, %asi
976	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_ALIGN, 1f)
977	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
978	done
979
9801:	wrpr	%g0, PSTATE_ALT, %pstate
981	b	%xcc, tl1_sfsr_trap
982	 mov	T_ALIGN | T_KERNEL, %g1
983END(tl1_align_trap)
984
985ENTRY(tl1_sfsr_trap)
986!	wr	%g0, ASI_DMMU, %asi
987	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g2
988	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g3
989	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
990	membar	#Sync
991
992	tl1_kstack
993	sub	%sp, MF_SIZEOF, %sp
994	stx	%g2, [%sp + SPOFF + CCFSZ + MF_SFAR]
995	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFSR]
996	rdpr	%pil, %o2
997	add	%sp, SPOFF + CCFSZ, %o1
998	b	%xcc, tl1_trap
999	 mov	%g1, %o0
1000END(tl1_align_trap)
1001
1002	.macro	tl1_intr level, mask, type
1003	tl1_kstack
1004	rdpr	%pil, %o2
1005	wrpr	%g0, \level, %pil
1006	set	\mask, %o3
1007	wr	%o3, 0, %asr21
1008	mov	T_INTR | T_KERNEL, %o0
1009	b	%xcc, tl1_trap
1010	 mov	\level, %o1
1011	.align	32
1012	.endm
1013
1014	.macro	tl1_intr_level
1015	INTR_LEVEL(1)
1016	.endm
1017
1018	.macro	tl1_intr_vector
1019	b,a	intr_enqueue
1020	.align	32
1021	.endm
1022
1023ENTRY(intr_enqueue)
1024#if KTR_COMPILE & KTR_CT1
1025	CATR(KTR_CT1, "intr_enqueue: td=%p (%s) tl=%#lx pc=%#lx sp=%#lx"
1026	   , %g1, %g2, %g3, 7, 8, 9)
1027	ldx	[PCPU(CURTHREAD)], %g2
1028	stx	%g2, [%g1 + KTR_PARM1]
1029	add	%g2, P_COMM, %g2
1030	stx	%g2, [%g1 + KTR_PARM2]
1031	rdpr	%tl, %g2
1032	stx	%g2, [%g1 + KTR_PARM3]
1033	rdpr	%tpc, %g2
1034	stx	%g2, [%g1 + KTR_PARM4]
1035	stx	%sp, [%g1 + KTR_PARM5]
10369:
1037#endif
1038
1039	/*
1040	 * Find the head of the queue and advance it.
1041	 */
1042	ldx	[PCPU(IQ)], %g1
1043	ldx	[%g1 + IQ_HEAD], %g2
1044	add	%g2, 1, %g3
1045	and	%g3, IQ_MASK, %g3
1046	stx	%g3, [%g1 + IQ_HEAD]
1047
1048#if KTR_COMPILE & KTR_CT1
1049	CATR(KTR_CT1, "intr_enqueue: cpu=%d head=%d tail=%d iqe=%d"
1050	    , %g4, %g5, %g6, 7, 8, 9)
1051	lduw	[PCPU(CPUID)], %g5
1052	stx	%g5, [%g4 + KTR_PARM1]
1053	stx	%g3, [%g4 + KTR_PARM2]
1054	ldx	[%g1 + IQ_TAIL], %g5
1055	stx	%g5, [%g4 + KTR_PARM3]
1056	stx	%g2, [%g4 + KTR_PARM4]
10579:
1058#endif
1059
1060#ifdef INVARIANTS
1061	/*
1062	 * If the new head is the same as the tail, the next interrupt will
1063	 * overwrite unserviced packets.  This is bad.
1064	 */
1065	ldx	[%g1 + IQ_TAIL], %g4
1066	cmp	%g4, %g3
1067	be	%xcc, 3f
1068	 nop
1069#endif
1070
1071	/*
1072	 * Load the interrupt packet from the hardware.
1073	 */
1074	wr	%g0, ASI_SDB_INTR_R, %asi
1075	ldxa	[%g0] ASI_INTR_RECEIVE, %g3
1076	ldxa	[%g0 + AA_SDB_INTR_D0] %asi, %g4
1077	ldxa	[%g0 + AA_SDB_INTR_D1] %asi, %g5
1078	ldxa	[%g0 + AA_SDB_INTR_D2] %asi, %g6
1079	stxa	%g0, [%g0] ASI_INTR_RECEIVE
1080	membar	#Sync
1081
1082	/*
1083	 * Store the tag and first data word in the iqe.  These are always
1084	 * valid.
1085	 */
1086	sllx	%g2, IQE_SHIFT, %g2
1087	add	%g2, %g1, %g2
1088	stw	%g3, [%g2 + IQE_TAG]
1089	stx	%g4, [%g2 + IQE_VEC]
1090
1091	/*
1092	 * Find the interrupt vector associated with this source.
1093	 */
1094	ldx	[PCPU(IVT)], %g3
1095	sllx	%g4, IV_SHIFT, %g4
1096
1097	/*
1098	 * If the 2nd data word, the function, is zero the actual function
1099	 * and argument are in the interrupt vector table, so retrieve them.
1100	 * The function is used as a lock on the vector data.  If it can be
1101	 * read atomically as non-zero, the argument and priority are valid.
1102	 * Otherwise this is either a true stray interrupt, or someone is
1103	 * trying to deregister the source as we speak.  In either case,
1104	 * bail and log a stray.
1105	 */
1106	brnz,pn %g5, 1f
1107	 add	%g3, %g4, %g3
1108	casxa	[%g3] ASI_N, %g0, %g5
1109	brz,pn	%g5, 2f
1110	 ldx	[%g3 + IV_ARG], %g6
1111
1112	/*
1113	 * Save the priority and the two remaining data words in the iqe.
1114	 */
11151:	lduw	[%g3 + IV_PRI], %g4
1116	stw	%g4, [%g2 + IQE_PRI]
1117	stx	%g5, [%g2 + IQE_FUNC]
1118	stx	%g6, [%g2 + IQE_ARG]
1119
1120	/*
1121	 * Trigger a softint at the level indicated by the priority.
1122	 */
1123	mov	1, %g3
1124	sllx	%g3, %g4, %g3
1125	wr	%g3, 0, %asr20
1126
1127#if KTR_COMPILE & KTR_CT1
1128	CATR(KTR_CT1, "intr_enqueue: tag=%#lx vec=%#lx pri=%d func=%p arg=%p"
1129	    , %g1, %g3, %g4, 7, 8, 9)
1130	lduw	[%g2 + IQE_TAG], %g3
1131	stx	%g3, [%g1 + KTR_PARM1]
1132	ldx	[%g2 + IQE_VEC], %g3
1133	stx	%g3, [%g1 + KTR_PARM2]
1134	lduw	[%g2 + IQE_PRI], %g3
1135	stx	%g3, [%g1 + KTR_PARM3]
1136	stx	%g5, [%g1 + KTR_PARM4]
1137	stx	%g6, [%g1 + KTR_PARM5]
11389:
1139#endif
1140
1141	retry
1142
1143	/*
1144	 * Either this is a true stray interrupt, or an interrupt occured
1145	 * while the source was being deregistered.  In either case, just
1146	 * log the stray and return.  XXX
1147	 */
11482:	DEBUGGER()
1149
1150#ifdef INVARIANTS
1151	/*
1152	 * The interrupt queue is about to overflow.  We are in big trouble.
1153	 */
11543:	DEBUGGER()
1155#endif
1156END(intr_enqueue)
1157
1158	.macro	tl1_immu_miss
1159	wrpr	%g0, PSTATE_ALT, %pstate
1160	tl1_kstack
1161	rdpr	%pil, %o2
1162	b	%xcc, tl1_trap
1163	 mov	T_IMMU_MISS | T_KERNEL, %o0
1164	.align	128
1165	.endm
1166
1167	.macro	tl1_dmmu_miss
1168	/*
1169	 * Load the target tte tag, and extract the context.  If the context
1170	 * is non-zero handle as user space access.  In either case, load the
1171	 * tsb 8k pointer.
1172	 */
1173	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g1
1174	srlx	%g1, TT_CTX_SHIFT, %g2
1175	brnz,pn	%g2, tl1_dmmu_miss_user
1176	 ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g2
1177
1178	/*
1179	 * Convert the tte pointer to an stte pointer, and add extra bits to
1180	 * accomodate for large tsb.
1181	 */
1182	sllx	%g2, STTE_SHIFT - TTE_SHIFT, %g2
1183#ifdef notyet
1184	mov	AA_DMMU_TAR, %g3
1185	ldxa	[%g3] ASI_DMMU, %g3
1186	srlx	%g3, TSB_1M_STTE_SHIFT, %g3
1187	and	%g3, TSB_KERNEL_MASK >> TSB_1M_STTE_SHIFT, %g3
1188	sllx	%g3, TSB_1M_STTE_SHIFT, %g3
1189	add	%g2, %g3, %g2
1190#endif
1191
1192	/*
1193	 * Load the tte, check that it's valid and that the tags match.
1194	 */
1195	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1196	brgez,pn %g5, 2f
1197	 cmp	%g4, %g1
1198	bne	%xcc, 2f
1199	 EMPTY
1200
1201	/*
1202	 * Set the refence bit, if its currently clear.
1203	 */
1204	andcc	%g5, TD_REF, %g0
1205	bnz	%xcc, 1f
1206	 or	%g5, TD_REF, %g1
1207	stx	%g1, [%g2 + ST_TTE + TTE_DATA]
1208
1209	/*
1210	 * Load the tte data into the TLB and retry the instruction.
1211	 */
12121:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1213	retry
1214
1215	/*
1216	 * Switch to alternate globals.
1217	 */
12182:	wrpr	%g0, PSTATE_ALT, %pstate
1219
1220	wr	%g0, ASI_DMMU, %asi
1221	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1222
1223	tl1_kstack
1224	sub	%sp, MF_SIZEOF, %sp
1225	stx	%g1, [%sp + SPOFF + CCFSZ + MF_TAR]
1226	wrpr	%g0, PSTATE_ALT, %pstate
1227	rdpr	%pil, %o2
1228	add	%sp, SPOFF + CCFSZ, %o1
1229	b	%xcc, tl1_trap
1230	 mov	T_DMMU_MISS | T_KERNEL, %o0
1231	.align	128
1232	.endm
1233
1234ENTRY(tl1_dmmu_miss_user)
1235	/*
1236	 * Try a fast inline lookup of the primary tsb.
1237	 */
1238	dmmu_miss_user
1239
1240	/* Handle faults during window spill/fill. */
1241	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_MMU, 1f)
1242#if KTR_COMPILE & KTR_CT1
1243	CATR(KTR_CT1, "tl1_dmmu_miss_user: resume spillfill npc=%#lx"
1244	    , %g1, %g2, %g3, 7, 8, 9)
1245	rdpr	%tnpc, %g2
1246	stx	%g2, [%g1 + KTR_PARM1]
12479:
1248#endif
1249	done
12501:
1251
1252#if KTR_COMPILE & KTR_CT1
1253	CATR(KTR_CT1, "tl1_dmmu_miss_user: trap", %g1, %g2, %g3, 7, 8, 9)
12549:
1255#endif
1256
1257	/*
1258	 * Switch to alternate globals.
1259	 */
1260	wrpr	%g0, PSTATE_ALT, %pstate
1261
1262	wr	%g0, ASI_DMMU, %asi
1263	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1264#if KTR_COMPILE & KTR_CT1
1265	CATR(KTR_CT1, "tl1_dmmu_miss: trap sp=%#lx tar=%#lx"
1266	    , %g2, %g3, %g4, 7, 8, 9)
1267	stx	%sp, [%g2 + KTR_PARM1]
1268	stx	%g1, [%g2 + KTR_PARM2]
12699:
1270#endif
1271
1272	tl1_kstack
1273	sub	%sp, MF_SIZEOF, %sp
1274	stx	%g1, [%sp + SPOFF + CCFSZ + MF_TAR]
1275	rdpr	%pil, %o2
1276	add	%sp, SPOFF + CCFSZ, %o1
1277	b	%xcc, tl1_trap
1278	 mov	T_DMMU_MISS | T_KERNEL, %o0
1279END(tl1_dmmu_miss_user)
1280
1281	.macro	tl1_dmmu_prot
1282	wr	%g0, ASI_DMMU, %asi
1283	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_MMU, 1f)
1284	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1285	done
1286
1287	/*
1288	 * Switch to alternate globals.
1289	 */
12901:	wrpr	%g0, PSTATE_ALT, %pstate
1291
1292	/*
1293	 * Load the sfar, sfsr and tar.  Clear the sfsr.
1294	 */
1295	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1296	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g2
1297	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g3
1298	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1299	membar	#Sync
1300
1301	tl1_kstack
1302	sub	%sp, MF_SIZEOF, %sp
1303	stx	%g1, [%sp + SPOFF + CCFSZ + MF_TAR]
1304	stx	%g2, [%sp + SPOFF + CCFSZ + MF_SFAR]
1305	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFSR]
1306	rdpr	%pil, %o2
1307	add	%sp, SPOFF + CCFSZ, %o1
1308	b	%xcc, tl1_trap
1309	 mov	T_DMMU_PROT | T_KERNEL, %o0
1310	.align	128
1311	.endm
1312
1313	.macro	tl1_spill_0_n
1314	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1315	saved
1316	retry
1317	.align	32
1318	RSF_FATAL(T_SPILL)
1319	RSF_FATAL(T_SPILL)
1320	.endm
1321
1322	.macro	tl1_spill_4_n
1323	andcc	%sp, 1, %g0
1324	bz,pn	%xcc, 2f
1325	 wr	%g0, ASI_AIUP, %asi
13261:	SPILL(stxa, %sp + SPOFF, 8, %asi)
1327	saved
1328	retry
1329	.align	32
1330	RSF_SPILL_TOPCB
1331	RSF_SPILL_TOPCB
1332	.endm
1333
1334	.macro	tl1_spill_5_n
1335	andcc	%sp, 1, %g0
1336	bnz	%xcc, 1b
1337	 wr	%g0, ASI_AIUP, %asi
13382:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1339	SPILL(stwa, %sp, 4, %asi)
1340	saved
1341	retry
1342	.align	32
1343	RSF_SPILL_TOPCB
1344	RSF_SPILL_TOPCB
1345	.endm
1346
1347	.macro	tl1_spill_6_n
1348	wr	%g0, ASI_AIUP, %asi
1349	SPILL(stxa, %sp + SPOFF, 8, %asi)
1350	saved
1351	retry
1352	.align	32
1353	RSF_ALIGN_RETRY(WSTATE_TRANSITION | WSTATE_TEST32)
1354	RSF_SPILL_TOPCB
1355	.endm
1356
1357	.macro	tl1_spill_7_n
1358	wr	%g0, ASI_AIUP, %asi
1359	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1360	SPILL(stwa, %sp, 4, %asi)
1361	saved
1362	retry
1363	.align	32
1364	RSF_ALIGN_RETRY(WSTATE_TRANSITION | WSTATE_TEST64)
1365	RSF_SPILL_TOPCB
1366	.endm
1367
1368	.macro	tl1_spill_0_o
1369	andcc	%sp, 1, %g0
1370	bz,pn	%xcc, 2f
1371	 wr	%g0, ASI_AIUP, %asi
13721:	SPILL(stxa, %sp + SPOFF, 8, %asi)
1373	saved
1374	wrpr	%g0, WSTATE_ASSUME64 << WSTATE_USERSHIFT, %wstate
1375	retry
1376	.align	32
1377	RSF_SPILL_TOPCB
1378	RSF_SPILL_TOPCB
1379	.endm
1380
1381	.macro	tl1_spill_1_o
1382	andcc	%sp, 1, %g0
1383	bnz	%xcc, 1b
1384	 wr	%g0, ASI_AIUP, %asi
13852:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1386	SPILL(stwa, %sp, 4, %asi)
1387	saved
1388	wrpr	%g0, WSTATE_ASSUME32 << WSTATE_USERSHIFT, %wstate
1389	retry
1390	.align	32
1391	RSF_SPILL_TOPCB
1392	RSF_SPILL_TOPCB
1393	.endm
1394
1395	.macro	tl1_spill_2_o
1396	wr	%g0, ASI_AIUP, %asi
1397	SPILL(stxa, %sp + SPOFF, 8, %asi)
1398	saved
1399	retry
1400	.align	32
1401	RSF_ALIGN_RETRY(WSTATE_TEST32 << WSTATE_USERSHIFT)
1402	RSF_SPILL_TOPCB
1403	.endm
1404
1405	.macro	tl1_spill_3_o
1406	wr	%g0, ASI_AIUP, %asi
1407	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1408	SPILL(stwa, %sp, 4, %asi)
1409	saved
1410	retry
1411	.align	32
1412	RSF_ALIGN_RETRY(WSTATE_TEST64 << WSTATE_USERSHIFT)
1413	RSF_SPILL_TOPCB
1414	.endm
1415
1416	.macro	tl1_fill_0_n
1417	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1418	restored
1419	retry
1420	.align	32
1421	RSF_FATAL(T_FILL)
1422	RSF_FATAL(T_FILL)
1423	.endm
1424
1425	.macro	tl1_fill_4_n
1426	andcc	%sp, 1, %g0
1427	bz,pn	%xcc, 2f
1428	 wr	%g0, ASI_AIUP, %asi
14291:	FILL(ldxa, %sp + SPOFF, 8, %asi)
1430	restored
1431	retry
1432	.align 32
1433	RSF_FILL_MAGIC
1434	RSF_FILL_MAGIC
1435	.endm
1436
1437	.macro	tl1_fill_5_n
1438	andcc	%sp, 1, %g0
1439	bnz,pn	%xcc, 1b
1440	 wr	%g0, ASI_AIUP, %asi
14412:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1442	FILL(lduwa, %sp, 4, %asi)
1443	restored
1444	retry
1445	.align 32
1446	RSF_FILL_MAGIC
1447	RSF_FILL_MAGIC
1448	.endm
1449
1450	.macro	tl1_fill_6_n
1451	wr	%g0, ASI_AIUP, %asi
1452	FILL(ldxa, %sp + SPOFF, 8, %asi)
1453	restored
1454	retry
1455	.align 32
1456	RSF_ALIGN_RETRY(WSTATE_TEST32 | WSTATE_TRANSITION)
1457	RSF_FILL_MAGIC
1458	.endm
1459
1460	.macro	tl1_fill_7_n
1461	wr	%g0, ASI_AIUP, %asi
1462	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1463	FILL(lduwa, %sp, 4, %asi)
1464	restored
1465	retry
1466	.align 32
1467	RSF_ALIGN_RETRY(WSTATE_TEST64 | WSTATE_TRANSITION)
1468	RSF_FILL_MAGIC
1469	.endm
1470
1471/*
1472 * This is used to spill windows that are still occupied with user
1473 * data on kernel entry to the pcb.
1474 */
1475ENTRY(tl1_spill_topcb)
1476	wrpr	%g0, PSTATE_ALT, %pstate
1477
1478	/* Free some globals for our use. */
1479	sub	%g6, 24, %g6
1480	stx	%g1, [%g6]
1481	stx	%g2, [%g6 + 8]
1482	stx	%g3, [%g6 + 16]
1483
1484	ldx	[PCPU(CURPCB)], %g1
1485	ldx	[%g1 + PCB_NSAVED], %g2
1486
1487	sllx	%g2, 3, %g3
1488	add	%g3, %g1, %g3
1489	stx	%sp, [%g3 + PCB_RWSP]
1490
1491	sllx	%g2, 7, %g3
1492	add	%g3, %g1, %g3
1493	SPILL(stx, %g3 + PCB_RW, 8, EMPTY)
1494
1495	inc	%g2
1496	stx	%g2, [%g1 + PCB_NSAVED]
1497
1498#if KTR_COMPILE & KTR_CT1
1499	CATR(KTR_CT1, "tl1_spill_topcb: pc=%lx sp=%#lx nsaved=%d"
1500	   , %g1, %g2, %g3, 7, 8, 9)
1501	rdpr	%tpc, %g2
1502	stx	%g2, [%g1 + KTR_PARM1]
1503	stx	%sp, [%g1 + KTR_PARM2]
1504	ldx	[PCPU(CURPCB)], %g2
1505	ldx	[%g2 + PCB_NSAVED], %g2
1506	stx	%g2, [%g1 + KTR_PARM3]
15079:
1508#endif
1509
1510	saved
1511
1512	ldx	[%g6 + 16], %g3
1513	ldx	[%g6 + 8], %g2
1514	ldx	[%g6], %g1
1515	add	%g6, 24, %g6
1516	retry
1517END(tl1_spill_topcb)
1518
1519	.macro	tl1_spill_bad	count
1520	.rept	\count
1521	tl1_wide T_SPILL
1522	.endr
1523	.endm
1524
1525	.macro	tl1_fill_bad	count
1526	.rept	\count
1527	tl1_wide T_FILL
1528	.endr
1529	.endm
1530
1531	.macro	tl1_breakpoint
1532	b,a	%xcc, tl1_breakpoint_trap
1533	.align	32
1534	.endm
1535
1536ENTRY(tl1_breakpoint_trap)
1537	tl1_kstack
1538	sub	%sp, KF_SIZEOF, %sp
1539	flushw
1540	stx	%fp, [%sp + SPOFF + CCFSZ + KF_FP]
1541	mov	T_BREAKPOINT | T_KERNEL, %o0
1542	add	%sp, SPOFF + CCFSZ, %o1
1543	b	%xcc, tl1_trap
1544	 rdpr	%pil, %o2
1545END(tl1_breakpoint_trap)
1546
1547	.macro	tl1_soft	count
1548	.rept	\count
1549	tl1_gen	T_SOFT | T_KERNEL
1550	.endr
1551	.endm
1552
1553	.sect	.trap
1554	.align	0x8000
1555	.globl	tl0_base
1556
1557tl0_base:
1558	tl0_reserved	1		! 0x0 unused
1559tl0_power_on:
1560	tl0_gen		T_POWER_ON	! 0x1 power on reset
1561tl0_watchdog:
1562	tl0_gen		T_WATCHDOG	! 0x2 watchdog rest
1563tl0_reset_ext:
1564	tl0_gen		T_RESET_EXT	! 0x3 externally initiated reset
1565tl0_reset_soft:
1566	tl0_gen		T_RESET_SOFT	! 0x4 software initiated reset
1567tl0_red_state:
1568	tl0_gen		T_RED_STATE	! 0x5 red state exception
1569	tl0_reserved	2		! 0x6-0x7 reserved
1570tl0_insn_excptn:
1571	tl0_gen		T_INSN_EXCPTN	! 0x8 instruction access exception
1572	tl0_reserved	1		! 0x9 reserved
1573tl0_insn_error:
1574	tl0_gen		T_INSN_ERROR	! 0xa instruction access error
1575	tl0_reserved	5		! 0xb-0xf reserved
1576tl0_insn_illegal:
1577	tl0_gen		T_INSN_ILLEGAL	! 0x10 illegal instruction
1578tl0_priv_opcode:
1579	tl0_gen		T_PRIV_OPCODE	! 0x11 privileged opcode
1580	tl0_reserved	14		! 0x12-0x1f reserved
1581tl0_fp_disabled:
1582	tl0_gen		T_FP_DISABLED	! 0x20 floating point disabled
1583tl0_fp_ieee:
1584	tl0_gen		T_FP_IEEE	! 0x21 floating point exception ieee
1585tl0_fp_other:
1586	tl0_gen		T_FP_OTHER	! 0x22 floating point exception other
1587tl0_tag_ovflw:
1588	tl0_gen		T_TAG_OVFLW	! 0x23 tag overflow
1589tl0_clean_window:
1590	clean_window			! 0x24 clean window
1591tl0_divide:
1592	tl0_gen		T_DIVIDE	! 0x28 division by zero
1593	tl0_reserved	7		! 0x29-0x2f reserved
1594tl0_data_excptn:
1595	tl0_data_excptn			! 0x30 data access exception
1596	tl0_reserved	1		! 0x31 reserved
1597tl0_data_error:
1598	tl0_gen		T_DATA_ERROR	! 0x32 data access error
1599	tl0_reserved	1		! 0x33 reserved
1600tl0_align:
1601	tl0_align			! 0x34 memory address not aligned
1602tl0_align_lddf:
1603	tl0_gen		T_ALIGN_LDDF	! 0x35 lddf memory address not aligned
1604tl0_align_stdf:
1605	tl0_gen		T_ALIGN_STDF	! 0x36 stdf memory address not aligned
1606tl0_priv_action:
1607	tl0_gen		T_PRIV_ACTION	! 0x37 privileged action
1608	tl0_reserved	9		! 0x38-0x40 reserved
1609tl0_intr_level:
1610	tl0_intr_level			! 0x41-0x4f interrupt level 1 to 15
1611	tl0_reserved	16		! 0x50-0x5f reserved
1612tl0_intr_vector:
1613	tl0_intr_vector			! 0x60 interrupt vector
1614tl0_watch_phys:
1615	tl0_gen		T_WATCH_PHYS	! 0x61 physical address watchpoint
1616tl0_watch_virt:
1617	tl0_gen		T_WATCH_VIRT	! 0x62 virtual address watchpoint
1618tl0_ecc:
1619	tl0_gen		T_ECC		! 0x63 corrected ecc error
1620tl0_immu_miss:
1621	tl0_immu_miss			! 0x64 fast instruction access mmu miss
1622tl0_dmmu_miss:
1623	tl0_dmmu_miss			! 0x68 fast data access mmu miss
1624tl0_dmmu_prot:
1625	tl0_dmmu_prot			! 0x6c fast data access protection
1626	tl0_reserved	16		! 0x70-0x7f reserved
1627tl0_spill_0_n:
1628	tl0_spill_0_n			! 0x80 spill 0 normal
1629tl0_spill_1_n:
1630	tl0_spill_1_n			! 0x84 spill 1 normal
1631tl0_spill_2_n:
1632	tl0_spill_2_n			! 0x88 spill 2 normal
1633tl0_spill_3_n:
1634	tl0_spill_3_n			! 0x8c spill 3 normal
1635	tl0_spill_bad	12		! 0x90-0xbf spill normal, other
1636tl0_fill_0_n:
1637	tl0_fill_0_n			! 0xc0 fill 0 normal
1638tl0_fill_1_n:
1639	tl0_fill_1_n			! 0xc4 fill 1 normal
1640tl0_fill_2_n:
1641	tl0_fill_2_n			! 0xc8 fill 2 normal
1642tl0_fill_3_n:
1643	tl0_fill_3_n			! 0xcc fill 3 normal
1644	tl0_fill_bad	12		! 0xc4-0xff fill normal, other
1645tl0_sun_syscall:
1646	tl0_reserved	1		! 0x100 sun system call
1647tl0_breakpoint:
1648	tl0_gen		T_BREAKPOINT	! 0x101 breakpoint
1649	tl0_soft	6		! 0x102-0x107 trap instruction
1650	tl0_soft	1		! 0x108 SVr4 syscall
1651	tl0_gen		T_SYSCALL	! 0x109 BSD syscall
1652	tl0_soft	118		! 0x110-0x17f trap instruction
1653	tl0_reserved	128		! 0x180-0x1ff reserved
1654
1655tl1_base:
1656	tl1_reserved	1		! 0x200 unused
1657tl1_power_on:
1658	tl1_gen		T_POWER_ON	! 0x201 power on reset
1659tl1_watchdog:
1660	tl1_gen		T_WATCHDOG	! 0x202 watchdog rest
1661tl1_reset_ext:
1662	tl1_gen		T_RESET_EXT	! 0x203 externally initiated reset
1663tl1_reset_soft:
1664	tl1_gen		T_RESET_SOFT	! 0x204 software initiated reset
1665tl1_red_state:
1666	tl1_gen		T_RED_STATE	! 0x205 red state exception
1667	tl1_reserved	2		! 0x206-0x207 reserved
1668tl1_insn_excptn:
1669	tl1_insn_excptn			! 0x208 instruction access exception
1670	tl1_reserved	1		! 0x209 reserved
1671tl1_insn_error:
1672	tl1_gen		T_INSN_ERROR	! 0x20a instruction access error
1673	tl1_reserved	5		! 0x20b-0x20f reserved
1674tl1_insn_illegal:
1675	tl1_gen		T_INSN_ILLEGAL	! 0x210 illegal instruction
1676tl1_priv_opcode:
1677	tl1_gen		T_PRIV_OPCODE	! 0x211 privileged opcode
1678	tl1_reserved	14		! 0x212-0x21f reserved
1679tl1_fp_disabled:
1680	tl1_gen		T_FP_DISABLED	! 0x220 floating point disabled
1681tl1_fp_ieee:
1682	tl1_gen		T_FP_IEEE	! 0x221 floating point exception ieee
1683tl1_fp_other:
1684	tl1_gen		T_FP_OTHER	! 0x222 floating point exception other
1685tl1_tag_ovflw:
1686	tl1_gen		T_TAG_OVFLW	! 0x223 tag overflow
1687tl1_clean_window:
1688	clean_window			! 0x224 clean window
1689tl1_divide:
1690	tl1_gen		T_DIVIDE	! 0x228 division by zero
1691	tl1_reserved	7		! 0x229-0x22f reserved
1692tl1_data_excptn:
1693	tl1_data_excptn			! 0x230 data access exception
1694	tl1_reserved	1		! 0x231 reserved
1695tl1_data_error:
1696	tl1_gen		T_DATA_ERROR	! 0x232 data access error
1697	tl1_reserved	1		! 0x233 reserved
1698tl1_align:
1699	tl1_align			! 0x234 memory address not aligned
1700tl1_align_lddf:
1701	tl1_gen		T_ALIGN_LDDF	! 0x235 lddf memory address not aligned
1702tl1_align_stdf:
1703	tl1_gen		T_ALIGN_STDF	! 0x236 stdf memory address not aligned
1704tl1_priv_action:
1705	tl1_gen		T_PRIV_ACTION	! 0x237 privileged action
1706	tl1_reserved	9		! 0x238-0x240 reserved
1707tl1_intr_level:
1708	tl1_intr_level			! 0x241-0x24f interrupt level 1 to 15
1709	tl1_reserved	16		! 0x250-0x25f reserved
1710tl1_intr_vector:
1711	tl1_intr_vector			! 0x260 interrupt vector
1712tl1_watch_phys:
1713	tl1_gen		T_WATCH_PHYS	! 0x261 physical address watchpoint
1714tl1_watch_virt:
1715	tl1_gen		T_WATCH_VIRT	! 0x262 virtual address watchpoint
1716tl1_ecc:
1717	tl1_gen		T_ECC		! 0x263 corrected ecc error
1718tl1_immu_miss:
1719	tl1_immu_miss			! 0x264 fast instruction access mmu miss
1720tl1_dmmu_miss:
1721	tl1_dmmu_miss			! 0x268 fast data access mmu miss
1722tl1_dmmu_prot:
1723	tl1_dmmu_prot			! 0x26c fast data access protection
1724	tl1_reserved	16		! 0x270-0x27f reserved
1725tl1_spill_0_n:
1726	tl1_spill_0_n			! 0x280 spill 0 normal
1727	tl1_spill_bad	3		! 0x284-0x28f spill normal
1728tl1_spill_4_n:
1729	tl1_spill_4_n			! 0x290 spill 4 normal
1730tl1_spill_5_n:
1731	tl1_spill_5_n			! 0x294 spill 5 normal
1732tl1_spill_6_n:
1733	tl1_spill_6_n			! 0x298 spill 6 normal
1734tl1_spill_7_n:
1735	tl1_spill_7_n			! 0x29c spill 7 normal
1736tl1_spill_0_o:
1737	tl1_spill_0_o			! 0x2a0 spill 0 other
1738tl1_spill_1_o:
1739	tl1_spill_1_o			! 0x2a4 spill 1 other
1740tl1_spill_2_o:
1741	tl1_spill_2_o			! 0x2a8 spill 2 other
1742tl1_spill_3_o:
1743	tl1_spill_3_o			! 0x2ac spill 3 other
1744	tl1_spill_bad	4		! 0x2a0-0x2bf spill other
1745tl1_fill_0_n:
1746	tl1_fill_0_n			! 0x2c0 fill 0 normal
1747	tl1_fill_bad	3		! 0x2c4-0x2cf fill normal
1748tl1_fill_4_n:
1749	tl1_fill_4_n			! 0x2d0 fill 4 normal
1750tl1_fill_5_n:
1751	tl1_fill_5_n			! 0x2d4 fill 5 normal
1752tl1_fill_6_n:
1753	tl1_fill_6_n			! 0x2d8 fill 6 normal
1754tl1_fill_7_n:
1755	tl1_fill_7_n			! 0x2dc fill 7 normal
1756	tl1_fill_bad	8		! 0x2e0-0x2ff fill other
1757	tl1_reserved	1		! 0x300 trap instruction
1758tl1_breakpoint:
1759	tl1_breakpoint			! 0x301 breakpoint
1760	tl1_gen		T_RESTOREWP	! 0x302 restore watchpoint (debug)
1761	tl1_soft	125		! 0x303-0x37f trap instruction
1762	tl1_reserved	128		! 0x380-0x3ff reserved
1763
1764/*
1765 * User trap entry point.
1766 *
1767 * void tl0_trap(u_long type, u_long arg, u_long pil, u_long wstate)
1768 *
1769 * The following setup has been performed:
1770 *	- the windows have been split and the active user window has been saved
1771 *	  (maybe just to the pcb)
1772 *	- we are on the current kernel stack and a frame has been setup, there
1773 *	  may be extra trap specific stuff below the frame
1774 *	- we are on alternate globals and interrupts are disabled
1775 *
1776 * We build a trapframe, switch to normal globals, enable interrupts and call
1777 * trap.
1778 *
1779 * NOTE: Due to a chip bug, we must save the trap state registers in memory
1780 * early.
1781 *
1782 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
1783 * it has been pre-set in alternate globals, so we read it from there and setup
1784 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
1785 * of cpu migration and using the wrong globalp.
1786 */
1787ENTRY(tl0_trap)
1788	/*
1789	 * Force kernel store order.
1790	 */
1791	wrpr	%g0, PSTATE_ALT, %pstate
1792
1793	sub	%sp, TF_SIZEOF, %sp
1794
1795	rdpr	%tstate, %l0
1796	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
1797	rdpr	%tpc, %l1
1798	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
1799	rdpr	%tnpc, %l2
1800	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
1801
1802	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
1803	stx	%o1, [%sp + SPOFF + CCFSZ + TF_ARG]
1804	stx	%o2, [%sp + SPOFF + CCFSZ + TF_PIL]
1805	stx	%o3, [%sp + SPOFF + CCFSZ + TF_WSTATE]
1806
1807.Ltl0_trap_fill:
1808	mov	%g7, %l0
1809	wrpr	%g0, PSTATE_NORMAL, %pstate
1810	mov	%l0, %g7	/* set up the normal %g7 */
1811	wrpr	%g0, PSTATE_KERNEL, %pstate
1812
1813	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
1814	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
1815	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
1816	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
1817	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
1818	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
1819	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
1820
1821#if KTR_COMPILE & KTR_CT1
1822	CATR(KTR_CT1, "tl0_trap: td=%p type=%#x arg=%#lx pil=%#lx ws=%#lx"
1823	    , %g1, %g2, %g3, 7, 8, 9)
1824	ldx	[PCPU(CURTHREAD)], %g2
1825	stx	%g2, [%g1 + KTR_PARM1]
1826	stx	%o0, [%g1 + KTR_PARM2]
1827	stx	%o1, [%g1 + KTR_PARM3]
1828	stx	%o2, [%g1 + KTR_PARM4]
1829	stx	%o3, [%g1 + KTR_PARM5]
18309:
1831#endif
1832
1833	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
1834	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
1835	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
1836	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
1837	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
1838	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
1839	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
1840	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
1841
1842.Ltl0_trap_spill:
1843	call	trap
1844	 add	%sp, CCFSZ + SPOFF, %o0
1845
1846	/* Fallthough. */
1847END(tl0_trap)
1848
1849/* Return to tl0 (user process). */
1850ENTRY(tl0_ret)
1851#if KTR_COMPILE & KTR_CT1
1852	CATR(KTR_CT1, "tl0_ret: td=%p (%s) pil=%#lx sflag=%#x"
1853	    , %g1, %g2, %g3, 7, 8, 9)
1854	ldx	[PCPU(CURTHREAD)], %g2
1855	stx	%g2, [%g1 + KTR_PARM1]
1856	add	%g2, P_COMM, %g3
1857	stx	%g3, [%g1 + KTR_PARM2]
1858	rdpr	%pil, %g3
1859	stx	%g3, [%g1 + KTR_PARM3]
1860	lduw	[%g2 + P_SFLAG], %g3
1861	stx	%g3, [%g1 + KTR_PARM4]
18629:
1863#endif
1864
1865	wrpr	%g0, PIL_TICK, %pil
1866	ldx	[PCPU(CURTHREAD)], %o0
1867	ldx	[%o0 + TD_KSE], %o0
1868	lduw	[%o0 + KE_FLAGS], %o1
1869	and	%o1, KEF_ASTPENDING | KEF_NEEDRESCHED, %o1
1870	brz,pt	%o1, 1f
1871	 nop
1872	call	ast
1873	 add	%sp, CCFSZ + SPOFF, %o0
1874
18751:	ldx	[PCPU(CURPCB)], %o0
1876	ldx	[%o0 + PCB_NSAVED], %o1
1877	mov	T_SPILL, %o0
1878	brnz,a,pn %o1, .Ltl0_trap_spill
1879	 stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
1880
1881	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
1882	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
1883	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
1884	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
1885	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
1886	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
1887	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
1888
1889	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
1890	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
1891	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
1892	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
1893	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
1894	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
1895	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
1896	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
1897
1898	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l0
1899	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l1
1900	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l2
1901	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l3
1902	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l4
1903
1904	wrpr	%g0, PSTATE_ALT, %pstate
1905
1906	wrpr	%l0, 0, %pil
1907
1908	wrpr	%l1, 0, %tstate
1909	wrpr	%l2, 0, %tpc
1910	wrpr	%l3, 0, %tnpc
1911
1912	/*
1913	 * Restore the user window state.
1914	 * NOTE: whenever we come here, it should be with %canrestore = 0.
1915	 */
1916	srlx	%l4, WSTATE_USERSHIFT, %g1
1917	wrpr	%g1, WSTATE_TRANSITION, %wstate
1918	rdpr	%otherwin, %g2
1919	wrpr	%g2, 0, %canrestore
1920	wrpr	%g0, 0, %otherwin
1921	wrpr	%g2, 0, %cleanwin
1922
1923	/*
1924	 * If this instruction causes a fill trap which fails to fill a window
1925	 * from the user stack, we will resume at tl0_ret_fill_end and call
1926	 * back into the kernel.
1927	 */
1928	restore
1929tl0_ret_fill:
1930
1931#if KTR_COMPILE & KTR_CT1
1932	CATR(KTR_CT1, "tl0_ret: return td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
1933	    , %g2, %g3, %g4, 7, 8, 9)
1934	ldx	[PCPU(CURTHREAD)], %g3
1935	stx	%g3, [%g2 + KTR_PARM1]
1936	rdpr	%tstate, %g3
1937	stx	%g3, [%g2 + KTR_PARM2]
1938	rdpr	%tpc, %g3
1939	stx	%g3, [%g2 + KTR_PARM3]
1940	stx	%sp, [%g2 + KTR_PARM4]
1941	stx	%g1, [%g2 + KTR_PARM5]
19429:
1943#endif
1944
1945	wrpr	%g1, 0, %wstate
1946	retry
1947tl0_ret_fill_end:
1948
1949#if KTR_COMPILE & KTR_CT1
1950	CATR(KTR_CT1, "tl0_ret: fill magic wstate=%#lx sp=%#lx"
1951	    , %l0, %l1, %l2, 7, 8, 9)
1952	stx	%l4, [%l0 + KTR_PARM1]
1953	stx	%sp, [%l0 + KTR_PARM2]
19549:
1955#endif
1956
1957	/*
1958	 * The fill failed and magic has been preformed.  Call trap again,
1959	 * which will copyin the window on the user's behalf.
1960	 */
1961	wrpr	%l4, 0, %wstate
1962	mov	T_FILL, %o0
1963	b	%xcc, .Ltl0_trap_fill
1964	 stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
1965END(tl0_ret)
1966
1967/*
1968 * Kernel trap entry point
1969 *
1970 * void tl1_trap(u_long type, u_long arg, u_long pil)
1971 *
1972 * This is easy because the stack is already setup and the windows don't need
1973 * to be split.  We build a trapframe and call trap(), the same as above, but
1974 * the outs don't need to be saved.
1975 *
1976 * NOTE: See comments above tl0_trap for song and dance about chip bugs and
1977 * setting up globalp.
1978 */
1979ENTRY(tl1_trap)
1980	sub	%sp, TF_SIZEOF, %sp
1981
1982	rdpr	%tstate, %l0
1983	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
1984	rdpr	%tpc, %l1
1985	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
1986	rdpr	%tnpc, %l2
1987	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
1988
1989#if KTR_COMPILE & KTR_CT1
1990	setx	trap_mask, %l4, %l3
1991	andn	%o1, T_KERNEL, %l4
1992	mov	1, %l5
1993	sllx	%l5, %l4, %l4
1994	ldx	[%l3], %l5
1995	and	%l4, %l5, %l4
1996	brz	%l4, 9f
1997	 nop
1998	CATR(KTR_CT1, "tl1_trap: td=%p pil=%#lx type=%#lx arg=%#lx pc=%#lx"
1999	    , %l3, %l4, %l5, 7, 8, 9)
2000	ldx	[PCPU(CURTHREAD)], %l4
2001	stx	%l4, [%l3 + KTR_PARM1]
2002#if 0
2003	add	%l4, P_COMM, %l4
2004	stx	%l4, [%l3 + KTR_PARM2]
2005#else
2006	stx	%o2, [%l3 + KTR_PARM2]
2007#endif
2008	andn	%o0, T_KERNEL, %l4
2009	stx	%l4, [%l3 + KTR_PARM3]
2010	stx	%o1, [%l3 + KTR_PARM4]
2011	stx	%l1, [%l3 + KTR_PARM5]
20129:
2013#endif
2014
2015	wrpr	%g0, 1, %tl
2016	/* We may have trapped before %g7 was set up correctly. */
2017	mov	%g7, %l0
2018	wrpr	%g0, PSTATE_NORMAL, %pstate
2019	mov	%l0, %g7
2020	wrpr	%g0, PSTATE_KERNEL, %pstate
2021
2022	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2023	stx	%o1, [%sp + SPOFF + CCFSZ + TF_ARG]
2024	stx	%o2, [%sp + SPOFF + CCFSZ + TF_PIL]
2025
2026	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2027	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2028	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2029	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2030	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2031	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2032
2033	call	trap
2034	 add	%sp, CCFSZ + SPOFF, %o0
2035
2036	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2037	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2038	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2039	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2040	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2041	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2042
2043	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l0
2044	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l1
2045	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l2
2046	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l3
2047
2048	wrpr	%g0, PSTATE_ALT, %pstate
2049
2050	wrpr	%l0, 0, %pil
2051
2052	wrpr	%g0, 2, %tl
2053	wrpr	%l1, 0, %tstate
2054	wrpr	%l2, 0, %tpc
2055	wrpr	%l3, 0, %tnpc
2056
2057#if KTR_COMPILE & KTR_CT1
2058	ldx	[%sp + SPOFF + CCFSZ + TF_TYPE], %l5
2059	andn	%l5, T_KERNEL, %l4
2060	mov	1, %l5
2061	sllx	%l5, %l4, %l4
2062	setx	trap_mask, %l4, %l3
2063	ldx	[%l3], %l5
2064	and	%l4, %l5, %l4
2065	brz	%l4, 9f
2066	 nop
2067	CATR(KTR_CT1, "tl1_trap: return td=%p pil=%#lx sp=%#lx pc=%#lx"
2068	    , %l3, %l4, %l5, 7, 8, 9)
2069	ldx	[PCPU(CURTHREAD)], %l4
2070	stx	%l4, [%l3 + KTR_PARM1]
2071	stx	%l0, [%l3 + KTR_PARM2]
2072	stx	%sp, [%l3 + KTR_PARM3]
2073	stx	%l2, [%l3 + KTR_PARM4]
20749:
2075#endif
2076
2077	restore
2078	retry
2079END(tl1_trap)
2080
2081/*
2082 * Freshly forked processes come here when switched to for the first time.
2083 * The arguments to fork_exit() have been setup in the locals, we must move
2084 * them to the outs.
2085 */
2086ENTRY(fork_trampoline)
2087#if KTR_COMPILE & KTR_CT1
2088	CATR(KTR_CT1, "fork_trampoline: td=%p (%s) cwp=%#lx"
2089	    , %g1, %g2, %g3, 7, 8, 9)
2090	ldx	[PCPU(CURTHREAD)], %g2
2091	stx	%g2, [%g1 + KTR_PARM1]
2092	add	%g2, P_COMM, %g2
2093	stx	%g2, [%g1 + KTR_PARM2]
2094	rdpr	%cwp, %g2
2095	stx	%g2, [%g1 + KTR_PARM3]
20969:
2097#endif
2098	mov	%l0, %o0
2099	mov	%l1, %o1
2100	mov	%l2, %o2
2101	call	fork_exit
2102	 nop
2103	b,a	%xcc, tl0_ret
2104END(fork_trampoline)
2105