exception.S revision 82906
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
29 */
30/*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * $FreeBSD: head/sys/sparc64/sparc64/exception.S 82906 2001-09-03 23:10:45Z jake $
56 */
57
58#include "opt_ddb.h"
59
60#include <machine/asi.h>
61#include <machine/asmacros.h>
62#include <machine/ktr.h>
63#include <machine/pstate.h>
64#include <machine/trap.h>
65#include <machine/tstate.h>
66#include <machine/wstate.h>
67
68#include "assym.s"
69
70/*
71 * Macros for spilling and filling live windows.
72 *
73 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
74 * handler will not use more than 24 instructions total, to leave room for
75 * resume vectors which occupy the last 8 instructions.
76 */
77
78#define	SPILL(storer, base, size, asi) \
79	storer	%l0, [base + (0 * size)] asi ; \
80	storer	%l1, [base + (1 * size)] asi ; \
81	storer	%l2, [base + (2 * size)] asi ; \
82	storer	%l3, [base + (3 * size)] asi ; \
83	storer	%l4, [base + (4 * size)] asi ; \
84	storer	%l5, [base + (5 * size)] asi ; \
85	storer	%l6, [base + (6 * size)] asi ; \
86	storer	%l7, [base + (7 * size)] asi ; \
87	storer	%i0, [base + (8 * size)] asi ; \
88	storer	%i1, [base + (9 * size)] asi ; \
89	storer	%i2, [base + (10 * size)] asi ; \
90	storer	%i3, [base + (11 * size)] asi ; \
91	storer	%i4, [base + (12 * size)] asi ; \
92	storer	%i5, [base + (13 * size)] asi ; \
93	storer	%i6, [base + (14 * size)] asi ; \
94	storer	%i7, [base + (15 * size)] asi
95
96#define	FILL(loader, base, size, asi) \
97	loader	[base + (0 * size)] asi, %l0 ; \
98	loader	[base + (1 * size)] asi, %l1 ; \
99	loader	[base + (2 * size)] asi, %l2 ; \
100	loader	[base + (3 * size)] asi, %l3 ; \
101	loader	[base + (4 * size)] asi, %l4 ; \
102	loader	[base + (5 * size)] asi, %l5 ; \
103	loader	[base + (6 * size)] asi, %l6 ; \
104	loader	[base + (7 * size)] asi, %l7 ; \
105	loader	[base + (8 * size)] asi, %i0 ; \
106	loader	[base + (9 * size)] asi, %i1 ; \
107	loader	[base + (10 * size)] asi, %i2 ; \
108	loader	[base + (11 * size)] asi, %i3 ; \
109	loader	[base + (12 * size)] asi, %i4 ; \
110	loader	[base + (13 * size)] asi, %i5 ; \
111	loader	[base + (14 * size)] asi, %i6 ; \
112	loader	[base + (15 * size)] asi, %i7
113
114#define	ERRATUM50(reg)	mov reg, reg
115
116/*
117 * Magic to resume from a spill or fill trap.  If we get an alignment or an
118 * mmu fault during a spill or a fill, this macro will detect the fault and
119 * resume at a set instruction offset in the trap handler, which will try to
120 * get help.
121 *
122 * To check if the previous trap was a spill/fill we convert the the trapped
123 * pc to a trap type and verify that it is in the range of spill/fill vectors.
124 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
125 * tl bit allows us to detect both ranges with one test.
126 *
127 * This is:
128 *	(((%tpc - %tba) >> 5) & ~0x200) >= 0x80 && <= 0xff
129 *
130 * Values outside of the trap table will produce negative or large positive
131 * results.
132 *
133 * To calculate the new pc we take advantage of the xor feature of wrpr.
134 * Forcing all the low bits of the trapped pc on we can produce any offset
135 * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
136 *
137 *	0x7f ^ 0x1f == 0x60
138 *	0x1f == (0x80 - 0x60) - 1
139 *
140 * Which are the offset and xor value used to resume from mmu faults.
141 */
142
143/*
144 * If a spill/fill trap is not detected this macro will branch to the label l1.
145 * Otherwise the caller should do any necesary cleanup and execute a done.
146 */
147#define	RESUME_SPILLFILL_MAGIC(r1, r2, xor, l1) \
148	rdpr	%tpc, r1 ; \
149	ERRATUM50(r1) ; \
150	rdpr	%tba, r2 ; \
151	sub	r1, r2, r2 ; \
152	srlx	r2, 5, r2 ; \
153	andn	r2, 0x200, r2 ; \
154	sub	r2, 0x80, r2 ; \
155	brlz	r2, l1 ; \
156	 sub	r2, 0x7f, r2 ; \
157	brgz	r2, l1 ; \
158	 or	r1, 0x7f, r1 ; \
159	wrpr	r1, xor, %tnpc ; \
160
161#define	RSF_XOR(off)	((0x80 - off) - 1)
162
163/*
164 * Instruction offsets in spill and fill trap handlers for handling certain
165 * nested traps, and corresponding xor constants for wrpr.
166 */
167#define	RSF_OFF_MMU	0x60
168#define	RSF_OFF_ALIGN	0x70
169
170#define	RSF_MMU		RSF_XOR(RSF_OFF_MMU)
171#define	RSF_ALIGN	RSF_XOR(RSF_OFF_ALIGN)
172
173/*
174 * Constant to add to %tnpc when taking a fill trap just before returning to
175 * user mode.  The instruction sequence looks like restore, wrpr, retry; we
176 * want to skip over the wrpr and retry and execute code to call back into the
177 * kernel.  It is useful to add tracing between these instructions, which would
178 * change the size of the sequence, so we demark with labels and subtract.
179 */
180#define	RSF_FILL_INC	tl0_ret_fill_end - tl0_ret_fill
181
182/*
183 * Retry a spill or fill with a different wstate due to an alignment fault.
184 * We may just be using the wrong stack offset.
185 */
186#define	RSF_ALIGN_RETRY(ws) \
187	wrpr	%g0, (ws), %wstate ; \
188	retry ; \
189	.align	16
190
191/*
192 * Generate a T_SPILL or T_FILL trap if the window operation fails.
193 */
194#define	RSF_TRAP(type) \
195	b	%xcc, tl0_sftrap ; \
196	 mov	type, %g2 ; \
197	.align	16
198
199/*
200 * Game over if the window operation fails.
201 */
202#define	RSF_FATAL(type) \
203	sir	type ; \
204	.align	16
205
206/*
207 * Magic to resume from a failed fill a few instructions after the corrsponding
208 * restore.  This is used on return from the kernel to usermode.
209 */
210#define	RSF_FILL_MAGIC \
211	rdpr	%tnpc, %g1 ; \
212	add	%g1, RSF_FILL_INC, %g1 ; \
213	wrpr	%g1, 0, %tnpc ; \
214	done ; \
215	.align	16
216
217/*
218 * Spill to the pcb if a spill to the user stack in kernel mode fails.
219 */
220#define	RSF_SPILL_TOPCB \
221	b,a	%xcc, tl1_spill_topcb ; \
222	 nop ; \
223	.align	16
224
225DATA(intrnames)
226	.asciz	"foo"
227DATA(eintrnames)
228
229DATA(intrcnt)
230	.long	0
231DATA(eintrcnt)
232
233/*
234 * Trap table and associated macros
235 *
236 * Due to its size a trap table is an inherently hard thing to represent in
237 * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
238 * instructions each, many of which are identical.  The way that this is
239 * layed out is the instructions (8 or 32) for the actual trap vector appear
240 * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
241 * but if not supporting code can be placed just after the definition of the
242 * macro.  The macros are then instantiated in a different section (.trap),
243 * which is setup to be placed by the linker at the beginning of .text, and the
244 * code around the macros is moved to the end of trap table.  In this way the
245 * code that must be sequential in memory can be split up, and located near
246 * its supporting code so that it is easier to follow.
247 */
248
249	/*
250	 * Clean window traps occur when %cleanwin is zero to ensure that data
251	 * is not leaked between address spaces in registers.
252	 */
253	.macro	clean_window
254	clr	%o0
255	clr	%o1
256	clr	%o2
257	clr	%o3
258	clr	%o4
259	clr	%o5
260	clr	%o6
261	clr	%o7
262	clr	%l0
263	clr	%l1
264	clr	%l2
265	clr	%l3
266	clr	%l4
267	clr	%l5
268	clr	%l6
269	rdpr	%cleanwin, %l7
270	inc	%l7
271	wrpr	%l7, 0, %cleanwin
272	clr	%l7
273	retry
274	.align	128
275	.endm
276
277	/*
278	 * Stack fixups for entry from user mode.  We are still running on the
279	 * user stack, and with its live registers, so we must save soon.  We
280	 * are on alternate globals so we do have some registers.  Set the
281	 * transitional window state, save, and call a routine to get onto
282	 * the kernel stack.  If the save traps we attempt to spill a window
283	 * to the user stack.  If this fails, we spill the window to the pcb
284	 * and continue.
285	 *
286	 * NOTE: Must be called with alternate globals and clobbers %g1.
287	 */
288
289	.macro	tl0_kstack
290	rdpr	%wstate, %g1
291	wrpr	%g1, WSTATE_TRANSITION, %wstate
292	save
293	call	tl0_kstack_fixup
294	 rdpr	%canrestore, %o0
295	.endm
296
297	.macro	tl0_setup	type
298	tl0_kstack
299	rdpr	%pil, %o2
300	b	%xcc, tl0_trap
301	 mov	\type, %o0
302	.endm
303
304/*
305 * Setup the kernel stack and split the register windows when faulting from
306 * user space.
307 * %canrestore is passed in %o0 and %wstate in (alternate) %g1.
308 */
309ENTRY(tl0_kstack_fixup)
310	mov	%g1, %o3
311	and	%o3, WSTATE_MASK, %o1
312	sllx	%o0, WSTATE_USERSHIFT, %o1
313	wrpr	%o1, 0, %wstate
314	wrpr	%o0, 0, %otherwin
315	wrpr	%g0, 0, %canrestore
316	ldx	[PCPU(CURPCB)], %o0
317	set	UPAGES * PAGE_SIZE - SPOFF - CCFSZ, %o1
318	retl
319	 add	%o0, %o1, %sp
320END(tl0_kstack_fixup)
321
322	/*
323	 * Generic trap type.  Call trap() with the specified type.
324	 */
325	.macro	tl0_gen		type
326	tl0_setup \type
327	.align	32
328	.endm
329
330	.macro	tl0_wide	type
331	tl0_setup \type
332	.align	128
333	.endm
334
335	/*
336	 * This is used to suck up the massive swaths of reserved trap types.
337	 * Generates count "reserved" trap vectors.
338	 */
339	.macro	tl0_reserved	count
340	.rept	\count
341	tl0_gen	T_RESERVED
342	.endr
343	.endm
344
345	/*
346	 * NOTE: we cannot use mmu globals here because tl0_kstack may cause
347	 * an mmu fault.
348	 */
349	.macro	tl0_data_excptn
350	wrpr	%g0, PSTATE_ALT, %pstate
351	wr	%g0, ASI_DMMU, %asi
352	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
353	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
354	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g5
355	b	%xcc, tl0_sfsr_trap
356	 mov	T_DATA_EXCPTN, %g2
357	.align	32
358	.endm
359
360	.macro	tl0_align
361	wr	%g0, ASI_DMMU, %asi
362	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g3
363	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g4
364	b	%xcc, tl0_sfsr_trap
365	 mov	T_ALIGN, %g2
366	.align	32
367	.endm
368
369ENTRY(tl0_sfsr_trap)
370	/*
371	 * Clear the sfsr.
372	 */
373	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
374	membar	#Sync
375
376	/*
377	 * Get onto the kernel stack, save the mmu registers, and call
378	 * common code.
379	 */
380	tl0_kstack
381	sub	%sp, MF_SIZEOF, %sp
382	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFAR]
383	stx	%g4, [%sp + SPOFF + CCFSZ + MF_SFSR]
384	stx	%g5, [%sp + SPOFF + CCFSZ + MF_TAR]
385	rdpr	%pil, %o2
386	add	%sp, SPOFF + CCFSZ, %o1
387	b	%xcc, tl0_trap
388	 mov	%g2, %o0
389END(tl0_sfsr_trap)
390
391	.macro	tl0_intr level, mask
392	tl0_kstack
393	set	\mask, %o2
394	b	%xcc, tl0_intr_call_trap
395	 mov	\level, %o1
396	.align	32
397	.endm
398
399/*
400 * Actually call tl0_trap, and do some work that cannot be done in tl0_intr
401 * because of space constraints.
402 */
403ENTRY(tl0_intr_call_trap)
404	wr	%o2, 0, %asr21
405	rdpr	%pil, %o2
406	wrpr	%g0, %o1, %pil
407	b	%xcc, tl0_trap
408	 mov	T_INTR, %o0
409END(tl0_intr_call_trap)
410
411#define	INTR(level, traplvl)						\
412	tl ## traplvl ## _intr	level, 1 << level
413
414#define	TICK(traplvl) \
415	tl ## traplvl ## _intr	PIL_TICK, 1
416
417#define	INTR_LEVEL(tl)							\
418	INTR(1, tl) ;							\
419	INTR(2, tl) ;							\
420	INTR(3, tl) ;							\
421	INTR(4, tl) ;							\
422	INTR(5, tl) ;							\
423	INTR(6, tl) ;							\
424	INTR(7, tl) ;							\
425	INTR(8, tl) ;							\
426	INTR(9, tl) ;							\
427	INTR(10, tl) ;							\
428	INTR(11, tl) ;							\
429	INTR(12, tl) ;							\
430	INTR(13, tl) ;							\
431	TICK(tl) ;							\
432	INTR(15, tl) ;
433
434	.macro	tl0_intr_level
435	INTR_LEVEL(0)
436	.endm
437
438	.macro	tl0_intr_vector
439	b,a	intr_enqueue
440	.align	32
441	.endm
442
443	.macro	tl0_immu_miss
444	/*
445	 * Force kernel store order.
446	 */
447	wrpr	%g0, PSTATE_MMU, %pstate
448
449	/*
450	 * Extract the 8KB pointer and convert to an index.
451	 */
452	ldxa	[%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g1
453	srax	%g1, TTE_SHIFT, %g1
454
455	/*
456	 * Compute the stte address in the primary used tsb.
457	 */
458	and	%g1, (1 << TSB_PRIMARY_MASK_WIDTH) - 1, %g2
459	sllx	%g2, TSB_PRIMARY_STTE_SHIFT, %g2
460	setx	TSB_USER_MIN_ADDRESS, %g4, %g3
461	add	%g2, %g3, %g2
462
463	/*
464	 * Preload the tte tag target.
465	 */
466	ldxa	[%g0] ASI_IMMU_TAG_TARGET_REG, %g3
467
468	/*
469	 * Preload tte data bits to check inside the bucket loop.
470	 */
471	and	%g1, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g4
472	sllx	%g4, TD_VA_LOW_SHIFT, %g4
473	or	%g4, TD_EXEC, %g4
474
475	/*
476	 * Preload mask for tte data check.
477	 */
478	setx	TD_VA_LOW_MASK, %g5, %g1
479	or	%g1, TD_EXEC, %g1
480
481	/*
482	 * Loop over the sttes in this bucket
483	 */
484
485	/*
486	 * Load the tte.
487	 */
4881:	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g6
489
490	/*
491	 * Compare the tag.
492	 */
493	cmp	%g6, %g3
494	bne,pn	%xcc, 2f
495
496	/*
497	 * Compare the data.
498	 */
499	 xor	%g7, %g4, %g5
500	brgez,pn %g7, 2f
501	 andcc	%g5, %g1, %g0
502	bnz,pn	%xcc, 2f
503
504	/*
505	 * We matched a tte, load the tlb.
506	 */
507
508	/*
509	 * Set the reference bit, if it's currently clear.
510	 */
511	 andcc	%g7, TD_REF, %g0
512	bz,a,pn	%xcc, tl0_immu_miss_set_ref
513	 nop
514
515	/*
516	 * Load the tte data into the tlb and retry the instruction.
517	 */
518	stxa	%g7, [%g0] ASI_ITLB_DATA_IN_REG
519	retry
520
521	/*
522	 * Check the low bits to see if we've finished the bucket.
523	 */
5242:	add	%g2, STTE_SIZEOF, %g2
525	andcc	%g2, TSB_PRIMARY_STTE_MASK, %g0
526	bnz	%xcc, 1b
527	 nop
528	b,a	%xcc, tl0_immu_miss_trap
529	.align	128
530	.endm
531
532ENTRY(tl0_immu_miss_set_ref)
533	/*
534	 * Set the reference bit.
535	 */
536	add	%g2, TTE_DATA, %g2
5371:	or	%g7, TD_REF, %g1
538	casxa	[%g2] ASI_N, %g7, %g1
539	cmp	%g1, %g7
540	bne,a,pn %xcc, 1b
541	 mov	%g1, %g7
542
543#if KTR_COMPILE & KTR_CT1
544	CATR(KTR_CT1, "tl0_immu_miss: set ref"
545	    , %g2, %g3, %g4, 7, 8, 9)
5469:
547#endif
548
549	/*
550	 * May have become invalid, in which case start over.
551	 */
552	brgez,pn %g1, 2f
553	 nop
554
555	/*
556	 * Load the tte data into the tlb and retry the instruction.
557	 */
558	stxa	%g1, [%g0] ASI_ITLB_DATA_IN_REG
5592:	retry
560END(tl0_immu_miss_set_ref)
561
562ENTRY(tl0_immu_miss_trap)
563	/*
564	 * Switch to alternate globals.
565	 */
566	wrpr	%g0, PSTATE_ALT, %pstate
567
568	/*
569	 * Load the tar, sfar and sfsr aren't valid.
570	 */
571	wr	%g0, ASI_IMMU, %asi
572	ldxa	[%g0 + AA_IMMU_TAR] %asi, %g2
573
574#if KTR_COMPILE & KTR_CT1
575	CATR(KTR_CT1, "tl0_immu_miss: trap sp=%#lx tar=%#lx"
576	    , %g3, %g4, %g5, 7, 8, 9)
577	stx	%sp, [%g3 + KTR_PARM1]
578	stx	%g2, [%g3 + KTR_PARM2]
5799:
580#endif
581
582	/*
583	 * Save the mmu registers on the stack, and call common trap code.
584	 */
585	tl0_kstack
586	sub	%sp, MF_SIZEOF, %sp
587	stx	%g2, [%sp + SPOFF + CCFSZ + MF_TAR]
588	rdpr	%pil, %o2
589	add	%sp, SPOFF + CCFSZ, %o1
590	b	%xcc, tl0_trap
591	 mov	T_IMMU_MISS, %o0
592END(tl0_immu_miss_trap)
593
594	.macro	dmmu_miss_user
595	/*
596	 * Extract the 8KB pointer and convert to an index.
597	 */
598	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g1
599	srax	%g1, TTE_SHIFT, %g1
600
601	/*
602	 * Compute the stte address in the primary used tsb.
603	 */
604	and	%g1, (1 << TSB_PRIMARY_MASK_WIDTH) - 1, %g2
605	sllx	%g2, TSB_PRIMARY_STTE_SHIFT, %g2
606	setx	TSB_USER_MIN_ADDRESS, %g4, %g3
607	add	%g2, %g3, %g2
608
609	/*
610	 * Preload the tte tag target.
611	 */
612	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g3
613
614	/*
615	 * Preload tte data bits to check inside the bucket loop.
616	 */
617	and	%g1, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g4
618	sllx	%g4, TD_VA_LOW_SHIFT, %g4
619
620	/*
621	 * Preload mask for tte data check.
622	 */
623	setx	TD_VA_LOW_MASK, %g5, %g1
624
625	/*
626	 * Loop over the sttes in this bucket
627	 */
628
629	/*
630	 * Load the tte.
631	 */
6321:	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g6
633
634	/*
635	 * Compare the tag.
636	 */
637	cmp	%g6, %g3
638	bne,pn	%xcc, 2f
639
640	/*
641	 * Compare the data.
642	 */
643	 xor	%g7, %g4, %g5
644	brgez,pn %g7, 2f
645	 andcc	%g5, %g1, %g0
646	bnz,pn	%xcc, 2f
647
648	/*
649	 * We matched a tte, load the tlb.
650	 */
651
652	/*
653	 * Set the reference bit, if it's currently clear.
654	 */
655	 andcc	%g7, TD_REF, %g0
656	bz,a,pn	%xcc, dmmu_miss_user_set_ref
657	 nop
658
659	/*
660	 * Load the tte data into the tlb and retry the instruction.
661	 */
662	stxa	%g7, [%g0] ASI_DTLB_DATA_IN_REG
663	retry
664
665	/*
666	 * Check the low bits to see if we've finished the bucket.
667	 */
6682:	add	%g2, STTE_SIZEOF, %g2
669	andcc	%g2, TSB_PRIMARY_STTE_MASK, %g0
670	bnz	%xcc, 1b
671	 nop
672	.endm
673
674ENTRY(dmmu_miss_user_set_ref)
675	/*
676	 * Set the reference bit.
677	 */
678	add	%g2, TTE_DATA, %g2
6791:	or	%g7, TD_REF, %g1
680	casxa	[%g2] ASI_N, %g7, %g1
681	cmp	%g1, %g7
682	bne,a,pn %xcc, 1b
683	 mov	%g1, %g7
684
685#if KTR_COMPILE & KTR_CT1
686	CATR(KTR_CT1, "tl0_dmmu_miss: set ref"
687	    , %g2, %g3, %g4, 7, 8, 9)
6889:
689#endif
690
691	/*
692	 * May have become invalid, in which case start over.
693	 */
694	brgez,pn %g1, 2f
695	 nop
696
697	/*
698	 * Load the tte data into the tlb and retry the instruction.
699	 */
700	stxa	%g1, [%g0] ASI_DTLB_DATA_IN_REG
7012:	retry
702END(dmmu_miss_user_set_ref)
703
704	.macro	tl0_dmmu_miss
705	/*
706	 * Force kernel store order.
707	 */
708	wrpr	%g0, PSTATE_MMU, %pstate
709
710	/*
711	 * Try a fast inline lookup of the primary tsb.
712	 */
713	dmmu_miss_user
714
715	/*
716	 * Not in primary tsb, call c code.  Nothing else fits inline.
717	 */
718	b,a	tl0_dmmu_miss_trap
719	.align	128
720	.endm
721
722ENTRY(tl0_dmmu_miss_trap)
723	/*
724	 * Switch to alternate globals.
725	 */
726	wrpr	%g0, PSTATE_ALT, %pstate
727
728	/*
729	 * Load the tar, sfar and sfsr aren't valid.
730	 */
731	wr	%g0, ASI_DMMU, %asi
732	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g2
733
734#if KTR_COMPILE & KTR_CT1
735	CATR(KTR_CT1, "tl0_dmmu_miss: trap sp=%#lx tar=%#lx"
736	    , %g3, %g4, %g5, 7, 8, 9)
737	stx	%sp, [%g3 + KTR_PARM1]
738	stx	%g2, [%g3 + KTR_PARM2]
7399:
740#endif
741
742	/*
743	 * Save the mmu registers on the stack and call common trap code.
744	 */
745	tl0_kstack
746	sub	%sp, MF_SIZEOF, %sp
747	stx	%g2, [%sp + SPOFF + CCFSZ + MF_TAR]
748	rdpr	%pil, %o2
749	add	%sp, SPOFF + CCFSZ, %o1
750	b	%xcc, tl0_trap
751	 mov	T_DMMU_MISS, %o0
752END(tl0_dmmu_miss_trap)
753
754	.macro	tl0_dmmu_prot
755	/*
756	 * Switch to alternate globals.
757	 */
758	wrpr	%g0, PSTATE_ALT, %pstate
759
760	/*
761	 * Load the tar, sfar and sfsr.
762	 */
763	wr	%g0, ASI_DMMU, %asi
764	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g2
765	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g3
766	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g4
767	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
768	membar	#Sync
769
770	/*
771	 * Save the mmu registers on the stack, switch to alternate globals,
772	 * and call common trap code.
773	 */
774	tl0_kstack
775	sub	%sp, MF_SIZEOF, %sp
776	stx	%g2, [%sp + SPOFF + CCFSZ + MF_TAR]
777	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFAR]
778	stx	%g4, [%sp + SPOFF + CCFSZ + MF_SFSR]
779	rdpr	%pil, %o2
780	add	%sp, SPOFF + CCFSZ, %o1
781	b	%xcc, tl0_trap
782	 mov	T_DMMU_PROT, %o0
783	.align	128
784	.endm
785
786	.macro	tl0_spill_0_n
787	andcc	%sp, 1, %g0
788	bz,pn	%xcc, 2f
789	 wr	%g0, ASI_AIUP, %asi
7901:	SPILL(stxa, %sp + SPOFF, 8, %asi)
791	saved
792	wrpr	%g0, WSTATE_ASSUME64, %wstate
793	retry
794	.align	32
795	RSF_TRAP(T_SPILL)
796	RSF_TRAP(T_SPILL)
797	.endm
798
799	.macro	tl0_spill_1_n
800	andcc	%sp, 1, %g0
801	bnz	%xcc, 1b
802	 wr	%g0, ASI_AIUP, %asi
8032:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
804	SPILL(stwa, %sp, 4, %asi)
805	saved
806	wrpr	%g0, WSTATE_ASSUME32, %wstate
807	retry
808	.align	32
809	RSF_TRAP(T_SPILL)
810	RSF_TRAP(T_SPILL)
811	.endm
812
813	.macro	tl0_spill_2_n
814	wr	%g0, ASI_AIUP, %asi
815	SPILL(stxa, %sp + SPOFF, 8, %asi)
816	saved
817	retry
818	.align	32
819	RSF_ALIGN_RETRY(WSTATE_TEST32)
820	RSF_TRAP(T_SPILL)
821	.endm
822
823	.macro	tl0_spill_3_n
824	wr	%g0, ASI_AIUP, %asi
825	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
826	SPILL(stwa, %sp, 4, %asi)
827	saved
828	retry
829	.align	32
830	RSF_ALIGN_RETRY(WSTATE_TEST64)
831	RSF_TRAP(T_SPILL)
832	.endm
833
834	.macro	tl0_fill_0_n
835	andcc	%sp, 1, %g0
836	bz,pn	%xcc, 2f
837	 wr	%g0, ASI_AIUP, %asi
8381:	FILL(ldxa, %sp + SPOFF, 8, %asi)
839	restored
840	wrpr	%g0, WSTATE_ASSUME64, %wstate
841	retry
842	.align	32
843	RSF_TRAP(T_FILL)
844	RSF_TRAP(T_FILL)
845	.endm
846
847	.macro	tl0_fill_1_n
848	andcc	%sp, 1, %g0
849	bnz	%xcc, 1b
850	 wr	%g0, ASI_AIUP, %asi
8512:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
852	FILL(lduwa, %sp, 4, %asi)
853	restored
854	wrpr	%g0, WSTATE_ASSUME32, %wstate
855	retry
856	.align	32
857	RSF_TRAP(T_FILL)
858	RSF_TRAP(T_FILL)
859	.endm
860
861	.macro	tl0_fill_2_n
862	wr	%g0, ASI_AIUP, %asi
863	FILL(ldxa, %sp + SPOFF, 8, %asi)
864	restored
865	retry
866	.align	32
867	RSF_ALIGN_RETRY(WSTATE_TEST32)
868	RSF_TRAP(T_FILL)
869	.endm
870
871	.macro	tl0_fill_3_n
872	wr	%g0, ASI_AIUP, %asi
873	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
874	FILL(lduwa, %sp, 4, %asi)
875	restored
876	retry
877	.align	32
878	RSF_ALIGN_RETRY(WSTATE_TEST64)
879	RSF_TRAP(T_FILL)
880	.endm
881
882ENTRY(tl0_sftrap)
883	rdpr	%tstate, %g1
884	and	%g1, TSTATE_CWP_MASK, %g1
885	wrpr	%g1, 0, %cwp
886	tl0_kstack
887	rdpr	%pil, %o2
888	b	%xcc, tl0_trap
889	 mov	%g2, %o0
890END(tl0_sftrap)
891
892	.macro	tl0_spill_bad	count
893	.rept	\count
894	tl0_wide T_SPILL
895	.endr
896	.endm
897
898	.macro	tl0_fill_bad	count
899	.rept	\count
900	tl0_wide T_FILL
901	.endr
902	.endm
903
904	.macro	tl0_soft	count
905	.rept	\count
906	tl0_gen	T_SOFT
907	.endr
908	.endm
909
910	.macro	tl1_kstack
911	save	%sp, -CCFSZ, %sp
912	.endm
913
914	.macro	tl1_setup	type
915	tl1_kstack
916	mov	\type | T_KERNEL, %o0
917	b	%xcc, tl1_trap
918	 rdpr	%pil, %o2
919	.endm
920
921	.macro	tl1_gen		type
922	tl1_setup \type
923	.align	32
924	.endm
925
926	.macro	tl1_wide	type
927	tl1_setup \type
928	.align	128
929	.endm
930
931	.macro	tl1_reserved	count
932	.rept	\count
933	tl1_gen	T_RESERVED
934	.endr
935	.endm
936
937	.macro	tl1_insn_excptn
938	tl1_kstack
939	wrpr	%g0, PSTATE_ALT, %pstate
940	rdpr	%pil, %o2
941	b	%xcc, tl1_trap
942	 mov	T_INSN_EXCPTN | T_KERNEL, %o0
943	.align	32
944	.endm
945
946	.macro	tl1_data_excptn
947	b,a	%xcc, tl1_data_exceptn_trap
948	 nop
949	.align	32
950	.endm
951
952ENTRY(tl1_data_exceptn_trap)
953	wr	%g0, ASI_DMMU, %asi
954	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_MMU, 1f)
955	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
956	done
957
9581:	wrpr	%g0, PSTATE_ALT, %pstate
959	b	%xcc, tl1_sfsr_trap
960	 mov	T_DATA_EXCPTN | T_KERNEL, %g1
961END(tl1_data_exceptn)
962
963	/*
964	 * NOTE: We switch to mmu globals here, to avoid needing to save
965	 * alternates, which may be live.
966	 */
967	.macro	tl1_align
968	b	%xcc, tl1_align_trap
969	 wrpr	%g0, PSTATE_MMU, %pstate
970	.align	32
971	.endm
972
973ENTRY(tl1_align_trap)
974	wr	%g0, ASI_DMMU, %asi
975	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_ALIGN, 1f)
976	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
977	done
978
9791:	wrpr	%g0, PSTATE_ALT, %pstate
980	b	%xcc, tl1_sfsr_trap
981	 mov	T_ALIGN | T_KERNEL, %g1
982END(tl1_align_trap)
983
984ENTRY(tl1_sfsr_trap)
985!	wr	%g0, ASI_DMMU, %asi
986	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g2
987	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g3
988	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
989	membar	#Sync
990
991	tl1_kstack
992	sub	%sp, MF_SIZEOF, %sp
993	stx	%g2, [%sp + SPOFF + CCFSZ + MF_SFAR]
994	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFSR]
995	rdpr	%pil, %o2
996	add	%sp, SPOFF + CCFSZ, %o1
997	b	%xcc, tl1_trap
998	 mov	%g1, %o0
999END(tl1_align_trap)
1000
1001	.macro	tl1_intr level, mask, type
1002	tl1_kstack
1003	rdpr	%pil, %o2
1004	wrpr	%g0, \level, %pil
1005	set	\mask, %o3
1006	wr	%o3, 0, %asr21
1007	mov	T_INTR | T_KERNEL, %o0
1008	b	%xcc, tl1_trap
1009	 mov	\level, %o1
1010	.align	32
1011	.endm
1012
1013	.macro	tl1_intr_level
1014	INTR_LEVEL(1)
1015	.endm
1016
1017	.macro	tl1_intr_vector
1018	b,a	intr_enqueue
1019	.align	32
1020	.endm
1021
1022ENTRY(intr_enqueue)
1023#if KTR_COMPILE & KTR_CT1
1024	CATR(KTR_CT1, "intr_enqueue: p=%p (%s) tl=%#lx pc=%#lx sp=%#lx"
1025	   , %g1, %g2, %g3, 7, 8, 9)
1026	ldx	[PCPU(CURPROC)], %g2
1027	stx	%g2, [%g1 + KTR_PARM1]
1028	add	%g2, P_COMM, %g2
1029	stx	%g2, [%g1 + KTR_PARM2]
1030	rdpr	%tl, %g2
1031	stx	%g2, [%g1 + KTR_PARM3]
1032	rdpr	%tpc, %g2
1033	stx	%g2, [%g1 + KTR_PARM4]
1034	stx	%sp, [%g1 + KTR_PARM5]
10359:
1036#endif
1037
1038	/*
1039	 * Find the head of the queue and advance it.
1040	 */
1041	ldx	[PCPU(IQ)], %g1
1042	ldx	[%g1 + IQ_HEAD], %g2
1043	add	%g2, 1, %g3
1044	and	%g3, IQ_MASK, %g3
1045	stx	%g3, [%g1 + IQ_HEAD]
1046
1047#if KTR_COMPILE & KTR_CT1
1048	CATR(KTR_CT1, "intr_enqueue: cpu=%d head=%d tail=%d iqe=%d"
1049	    , %g4, %g5, %g6, 7, 8, 9)
1050	lduw	[PCPU(CPUID)], %g5
1051	stx	%g5, [%g4 + KTR_PARM1]
1052	stx	%g3, [%g4 + KTR_PARM2]
1053	ldx	[%g1 + IQ_TAIL], %g5
1054	stx	%g5, [%g4 + KTR_PARM3]
1055	stx	%g2, [%g4 + KTR_PARM4]
10569:
1057#endif
1058
1059#ifdef INVARIANTS
1060	/*
1061	 * If the new head is the same as the tail, the next interrupt will
1062	 * overwrite unserviced packets.  This is bad.
1063	 */
1064	ldx	[%g1 + IQ_TAIL], %g4
1065	cmp	%g4, %g3
1066	be	%xcc, 3f
1067	 nop
1068#endif
1069
1070	/*
1071	 * Load the interrupt packet from the hardware.
1072	 */
1073	wr	%g0, ASI_SDB_INTR_R, %asi
1074	ldxa	[%g0] ASI_INTR_RECEIVE, %g3
1075	ldxa	[%g0 + AA_SDB_INTR_D0] %asi, %g4
1076	ldxa	[%g0 + AA_SDB_INTR_D1] %asi, %g5
1077	ldxa	[%g0 + AA_SDB_INTR_D2] %asi, %g6
1078	stxa	%g0, [%g0] ASI_INTR_RECEIVE
1079	membar	#Sync
1080
1081	/*
1082	 * Store the tag and first data word in the iqe.  These are always
1083	 * valid.
1084	 */
1085	sllx	%g2, IQE_SHIFT, %g2
1086	add	%g2, %g1, %g2
1087	stw	%g3, [%g2 + IQE_TAG]
1088	stx	%g4, [%g2 + IQE_VEC]
1089
1090	/*
1091	 * Find the interrupt vector associated with this source.
1092	 */
1093	ldx	[PCPU(IVT)], %g3
1094	sllx	%g4, IV_SHIFT, %g4
1095
1096	/*
1097	 * If the 2nd data word, the function, is zero the actual function
1098	 * and argument are in the interrupt vector table, so retrieve them.
1099	 * The function is used as a lock on the vector data.  If it can be
1100	 * read atomically as non-zero, the argument and priority are valid.
1101	 * Otherwise this is either a true stray interrupt, or someone is
1102	 * trying to deregister the source as we speak.  In either case,
1103	 * bail and log a stray.
1104	 */
1105	brnz,pn %g5, 1f
1106	 add	%g3, %g4, %g3
1107	casxa	[%g3] ASI_N, %g0, %g5
1108	brz,pn	%g5, 2f
1109	 ldx	[%g3 + IV_ARG], %g6
1110
1111	/*
1112	 * Save the priority and the two remaining data words in the iqe.
1113	 */
11141:	lduw	[%g3 + IV_PRI], %g4
1115	stw	%g4, [%g2 + IQE_PRI]
1116	stx	%g5, [%g2 + IQE_FUNC]
1117	stx	%g6, [%g2 + IQE_ARG]
1118
1119	/*
1120	 * Trigger a softint at the level indicated by the priority.
1121	 */
1122	mov	1, %g3
1123	sllx	%g3, %g4, %g3
1124	wr	%g3, 0, %asr20
1125
1126#if KTR_COMPILE & KTR_CT1
1127	CATR(KTR_CT1, "intr_enqueue: tag=%#lx vec=%#lx pri=%d func=%p arg=%p"
1128	    , %g1, %g3, %g4, 7, 8, 9)
1129	lduw	[%g2 + IQE_TAG], %g3
1130	stx	%g3, [%g1 + KTR_PARM1]
1131	ldx	[%g2 + IQE_VEC], %g3
1132	stx	%g3, [%g1 + KTR_PARM2]
1133	lduw	[%g2 + IQE_PRI], %g3
1134	stx	%g3, [%g1 + KTR_PARM3]
1135	stx	%g5, [%g1 + KTR_PARM4]
1136	stx	%g6, [%g1 + KTR_PARM5]
11379:
1138#endif
1139
1140	retry
1141
1142	/*
1143	 * Either this is a true stray interrupt, or an interrupt occured
1144	 * while the source was being deregistered.  In either case, just
1145	 * log the stray and return.  XXX
1146	 */
11472:	DEBUGGER()
1148
1149#ifdef INVARIANTS
1150	/*
1151	 * The interrupt queue is about to overflow.  We are in big trouble.
1152	 */
11533:	DEBUGGER()
1154#endif
1155END(intr_enqueue)
1156
1157	.macro	tl1_immu_miss
1158	wrpr	%g0, PSTATE_ALT, %pstate
1159	tl1_kstack
1160	rdpr	%pil, %o2
1161	b	%xcc, tl1_trap
1162	 mov	T_IMMU_MISS | T_KERNEL, %o0
1163	.align	128
1164	.endm
1165
1166	.macro	tl1_dmmu_miss
1167	/*
1168	 * Load the target tte tag, and extract the context.  If the context
1169	 * is non-zero handle as user space access.  In either case, load the
1170	 * tsb 8k pointer.
1171	 */
1172	ldxa	[%g0] ASI_DMMU_TAG_TARGET_REG, %g1
1173	srlx	%g1, TT_CTX_SHIFT, %g2
1174	brnz,pn	%g2, tl1_dmmu_miss_user
1175	 ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g2
1176
1177	/*
1178	 * Convert the tte pointer to an stte pointer, and add extra bits to
1179	 * accomodate for large tsb.
1180	 */
1181	sllx	%g2, STTE_SHIFT - TTE_SHIFT, %g2
1182#ifdef notyet
1183	mov	AA_DMMU_TAR, %g3
1184	ldxa	[%g3] ASI_DMMU, %g3
1185	srlx	%g3, TSB_1M_STTE_SHIFT, %g3
1186	and	%g3, TSB_KERNEL_MASK >> TSB_1M_STTE_SHIFT, %g3
1187	sllx	%g3, TSB_1M_STTE_SHIFT, %g3
1188	add	%g2, %g3, %g2
1189#endif
1190
1191	/*
1192	 * Load the tte, check that it's valid and that the tags match.
1193	 */
1194	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
1195	brgez,pn %g5, 2f
1196	 cmp	%g4, %g1
1197	bne	%xcc, 2f
1198	 EMPTY
1199
1200	/*
1201	 * Set the refence bit, if its currently clear.
1202	 */
1203	andcc	%g5, TD_REF, %g0
1204	bnz	%xcc, 1f
1205	 or	%g5, TD_REF, %g1
1206	stx	%g1, [%g2 + ST_TTE + TTE_DATA]
1207
1208	/*
1209	 * Load the tte data into the TLB and retry the instruction.
1210	 */
12111:	stxa	%g5, [%g0] ASI_DTLB_DATA_IN_REG
1212	retry
1213
1214	/*
1215	 * Switch to alternate globals.
1216	 */
12172:	wrpr	%g0, PSTATE_ALT, %pstate
1218
1219	wr	%g0, ASI_DMMU, %asi
1220	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1221
1222	tl1_kstack
1223	sub	%sp, MF_SIZEOF, %sp
1224	stx	%g1, [%sp + SPOFF + CCFSZ + MF_TAR]
1225	wrpr	%g0, PSTATE_ALT, %pstate
1226	rdpr	%pil, %o2
1227	add	%sp, SPOFF + CCFSZ, %o1
1228	b	%xcc, tl1_trap
1229	 mov	T_DMMU_MISS | T_KERNEL, %o0
1230	.align	128
1231	.endm
1232
1233ENTRY(tl1_dmmu_miss_user)
1234	/*
1235	 * Try a fast inline lookup of the primary tsb.
1236	 */
1237	dmmu_miss_user
1238
1239	/* Handle faults during window spill/fill. */
1240	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_MMU, 1f)
1241#if KTR_COMPILE & KTR_CT1
1242	CATR(KTR_CT1, "tl1_dmmu_miss_user: resume spillfill npc=%#lx"
1243	    , %g1, %g2, %g3, 7, 8, 9)
1244	rdpr	%tnpc, %g2
1245	stx	%g2, [%g1 + KTR_PARM1]
12469:
1247#endif
1248	done
12491:
1250
1251#if KTR_COMPILE & KTR_CT1
1252	CATR(KTR_CT1, "tl1_dmmu_miss_user: trap", %g1, %g2, %g3, 7, 8, 9)
12539:
1254#endif
1255
1256	/*
1257	 * Switch to alternate globals.
1258	 */
1259	wrpr	%g0, PSTATE_ALT, %pstate
1260
1261	wr	%g0, ASI_DMMU, %asi
1262	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1263#if KTR_COMPILE & KTR_CT1
1264	CATR(KTR_CT1, "tl1_dmmu_miss: trap sp=%#lx tar=%#lx"
1265	    , %g2, %g3, %g4, 7, 8, 9)
1266	stx	%sp, [%g2 + KTR_PARM1]
1267	stx	%g1, [%g2 + KTR_PARM2]
12689:
1269#endif
1270
1271	tl1_kstack
1272	sub	%sp, MF_SIZEOF, %sp
1273	stx	%g1, [%sp + SPOFF + CCFSZ + MF_TAR]
1274	rdpr	%pil, %o2
1275	add	%sp, SPOFF + CCFSZ, %o1
1276	b	%xcc, tl1_trap
1277	 mov	T_DMMU_MISS | T_KERNEL, %o0
1278END(tl1_dmmu_miss_user)
1279
1280	.macro	tl1_dmmu_prot
1281	wr	%g0, ASI_DMMU, %asi
1282	RESUME_SPILLFILL_MAGIC(%g1, %g2, RSF_MMU, 1f)
1283	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1284	done
1285
1286	/*
1287	 * Switch to alternate globals.
1288	 */
12891:	wrpr	%g0, PSTATE_ALT, %pstate
1290
1291	/*
1292	 * Load the sfar, sfsr and tar.  Clear the sfsr.
1293	 */
1294	ldxa	[%g0 + AA_DMMU_TAR] %asi, %g1
1295	ldxa	[%g0 + AA_DMMU_SFAR] %asi, %g2
1296	ldxa	[%g0 + AA_DMMU_SFSR] %asi, %g3
1297	stxa	%g0, [%g0 + AA_DMMU_SFSR] %asi
1298	membar	#Sync
1299
1300	tl1_kstack
1301	sub	%sp, MF_SIZEOF, %sp
1302	stx	%g1, [%sp + SPOFF + CCFSZ + MF_TAR]
1303	stx	%g2, [%sp + SPOFF + CCFSZ + MF_SFAR]
1304	stx	%g3, [%sp + SPOFF + CCFSZ + MF_SFSR]
1305	rdpr	%pil, %o2
1306	add	%sp, SPOFF + CCFSZ, %o1
1307	b	%xcc, tl1_trap
1308	 mov	T_DMMU_PROT | T_KERNEL, %o0
1309	.align	128
1310	.endm
1311
1312	.macro	tl1_spill_0_n
1313	SPILL(stx, %sp + SPOFF, 8, EMPTY)
1314	saved
1315	retry
1316	.align	32
1317	RSF_FATAL(T_SPILL)
1318	RSF_FATAL(T_SPILL)
1319	.endm
1320
1321	.macro	tl1_spill_4_n
1322	andcc	%sp, 1, %g0
1323	bz,pn	%xcc, 2f
1324	 wr	%g0, ASI_AIUP, %asi
13251:	SPILL(stxa, %sp + SPOFF, 8, %asi)
1326	saved
1327	retry
1328	.align	32
1329	RSF_SPILL_TOPCB
1330	RSF_SPILL_TOPCB
1331	.endm
1332
1333	.macro	tl1_spill_5_n
1334	andcc	%sp, 1, %g0
1335	bnz	%xcc, 1b
1336	 wr	%g0, ASI_AIUP, %asi
13372:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1338	SPILL(stwa, %sp, 4, %asi)
1339	saved
1340	retry
1341	.align	32
1342	RSF_SPILL_TOPCB
1343	RSF_SPILL_TOPCB
1344	.endm
1345
1346	.macro	tl1_spill_6_n
1347	wr	%g0, ASI_AIUP, %asi
1348	SPILL(stxa, %sp + SPOFF, 8, %asi)
1349	saved
1350	retry
1351	.align	32
1352	RSF_ALIGN_RETRY(WSTATE_TRANSITION | WSTATE_TEST32)
1353	RSF_SPILL_TOPCB
1354	.endm
1355
1356	.macro	tl1_spill_7_n
1357	wr	%g0, ASI_AIUP, %asi
1358	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1359	SPILL(stwa, %sp, 4, %asi)
1360	saved
1361	retry
1362	.align	32
1363	RSF_ALIGN_RETRY(WSTATE_TRANSITION | WSTATE_TEST64)
1364	RSF_SPILL_TOPCB
1365	.endm
1366
1367	.macro	tl1_spill_0_o
1368	andcc	%sp, 1, %g0
1369	bz,pn	%xcc, 2f
1370	 wr	%g0, ASI_AIUP, %asi
13711:	SPILL(stxa, %sp + SPOFF, 8, %asi)
1372	saved
1373	wrpr	%g0, WSTATE_ASSUME64 << WSTATE_USERSHIFT, %wstate
1374	retry
1375	.align	32
1376	RSF_SPILL_TOPCB
1377	RSF_SPILL_TOPCB
1378	.endm
1379
1380	.macro	tl1_spill_1_o
1381	andcc	%sp, 1, %g0
1382	bnz	%xcc, 1b
1383	 wr	%g0, ASI_AIUP, %asi
13842:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1385	SPILL(stwa, %sp, 4, %asi)
1386	saved
1387	wrpr	%g0, WSTATE_ASSUME32 << WSTATE_USERSHIFT, %wstate
1388	retry
1389	.align	32
1390	RSF_SPILL_TOPCB
1391	RSF_SPILL_TOPCB
1392	.endm
1393
1394	.macro	tl1_spill_2_o
1395	wr	%g0, ASI_AIUP, %asi
1396	SPILL(stxa, %sp + SPOFF, 8, %asi)
1397	saved
1398	retry
1399	.align	32
1400	RSF_ALIGN_RETRY(WSTATE_TEST32 << WSTATE_USERSHIFT)
1401	RSF_SPILL_TOPCB
1402	.endm
1403
1404	.macro	tl1_spill_3_o
1405	wr	%g0, ASI_AIUP, %asi
1406	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1407	SPILL(stwa, %sp, 4, %asi)
1408	saved
1409	retry
1410	.align	32
1411	RSF_ALIGN_RETRY(WSTATE_TEST64 << WSTATE_USERSHIFT)
1412	RSF_SPILL_TOPCB
1413	.endm
1414
1415	.macro	tl1_fill_0_n
1416	FILL(ldx, %sp + SPOFF, 8, EMPTY)
1417	restored
1418	retry
1419	.align	32
1420	RSF_FATAL(T_FILL)
1421	RSF_FATAL(T_FILL)
1422	.endm
1423
1424	.macro	tl1_fill_4_n
1425	andcc	%sp, 1, %g0
1426	bz,pn	%xcc, 2f
1427	 wr	%g0, ASI_AIUP, %asi
14281:	FILL(ldxa, %sp + SPOFF, 8, %asi)
1429	restored
1430	retry
1431	.align 32
1432	RSF_FILL_MAGIC
1433	RSF_FILL_MAGIC
1434	.endm
1435
1436	.macro	tl1_fill_5_n
1437	andcc	%sp, 1, %g0
1438	bnz,pn	%xcc, 1b
1439	 wr	%g0, ASI_AIUP, %asi
14402:	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1441	FILL(lduwa, %sp, 4, %asi)
1442	restored
1443	retry
1444	.align 32
1445	RSF_FILL_MAGIC
1446	RSF_FILL_MAGIC
1447	.endm
1448
1449	.macro	tl1_fill_6_n
1450	wr	%g0, ASI_AIUP, %asi
1451	FILL(ldxa, %sp + SPOFF, 8, %asi)
1452	restored
1453	retry
1454	.align 32
1455	RSF_ALIGN_RETRY(WSTATE_TEST32 | WSTATE_TRANSITION)
1456	RSF_FILL_MAGIC
1457	.endm
1458
1459	.macro	tl1_fill_7_n
1460	wr	%g0, ASI_AIUP, %asi
1461	wrpr	%g0, PSTATE_ALT | PSTATE_AM, %pstate
1462	FILL(lduwa, %sp, 4, %asi)
1463	restored
1464	retry
1465	.align 32
1466	RSF_ALIGN_RETRY(WSTATE_TEST64 | WSTATE_TRANSITION)
1467	RSF_FILL_MAGIC
1468	.endm
1469
1470/*
1471 * This is used to spill windows that are still occupied with user
1472 * data on kernel entry to the pcb.
1473 */
1474ENTRY(tl1_spill_topcb)
1475	wrpr	%g0, PSTATE_ALT, %pstate
1476
1477	/* Free some globals for our use. */
1478	sub	%g6, 24, %g6
1479	stx	%g1, [%g6]
1480	stx	%g2, [%g6 + 8]
1481	stx	%g3, [%g6 + 16]
1482
1483	ldx	[PCPU(CURPCB)], %g1
1484	ldx	[%g1 + PCB_NSAVED], %g2
1485
1486	sllx	%g2, 3, %g3
1487	add	%g3, %g1, %g3
1488	stx	%sp, [%g3 + PCB_RWSP]
1489
1490	sllx	%g2, 7, %g3
1491	add	%g3, %g1, %g3
1492	SPILL(stx, %g3 + PCB_RW, 8, EMPTY)
1493
1494	inc	%g2
1495	stx	%g2, [%g1 + PCB_NSAVED]
1496
1497#if KTR_COMPILE & KTR_CT1
1498	CATR(KTR_CT1, "tl1_spill_topcb: pc=%lx sp=%#lx nsaved=%d"
1499	   , %g1, %g2, %g3, 7, 8, 9)
1500	rdpr	%tpc, %g2
1501	stx	%g2, [%g1 + KTR_PARM1]
1502	stx	%sp, [%g1 + KTR_PARM2]
1503	ldx	[PCPU(CURPCB)], %g2
1504	ldx	[%g2 + PCB_NSAVED], %g2
1505	stx	%g2, [%g1 + KTR_PARM3]
15069:
1507#endif
1508
1509	saved
1510
1511	ldx	[%g6 + 16], %g3
1512	ldx	[%g6 + 8], %g2
1513	ldx	[%g6], %g1
1514	add	%g6, 24, %g6
1515	retry
1516END(tl1_spill_topcb)
1517
1518	.macro	tl1_spill_bad	count
1519	.rept	\count
1520	tl1_wide T_SPILL
1521	.endr
1522	.endm
1523
1524	.macro	tl1_fill_bad	count
1525	.rept	\count
1526	tl1_wide T_FILL
1527	.endr
1528	.endm
1529
1530	.macro	tl1_breakpoint
1531	b,a	%xcc, tl1_breakpoint_trap
1532	.align	32
1533	.endm
1534
1535ENTRY(tl1_breakpoint_trap)
1536	tl1_kstack
1537	sub	%sp, KF_SIZEOF, %sp
1538	flushw
1539	stx	%fp, [%sp + SPOFF + CCFSZ + KF_FP]
1540	mov	T_BREAKPOINT | T_KERNEL, %o0
1541	add	%sp, SPOFF + CCFSZ, %o1
1542	b	%xcc, tl1_trap
1543	 rdpr	%pil, %o2
1544END(tl1_breakpoint_trap)
1545
1546	.macro	tl1_soft	count
1547	.rept	\count
1548	tl1_gen	T_SOFT | T_KERNEL
1549	.endr
1550	.endm
1551
1552	.sect	.trap
1553	.align	0x8000
1554	.globl	tl0_base
1555
1556tl0_base:
1557	tl0_reserved	1		! 0x0 unused
1558tl0_power_on:
1559	tl0_gen		T_POWER_ON	! 0x1 power on reset
1560tl0_watchdog:
1561	tl0_gen		T_WATCHDOG	! 0x2 watchdog rest
1562tl0_reset_ext:
1563	tl0_gen		T_RESET_EXT	! 0x3 externally initiated reset
1564tl0_reset_soft:
1565	tl0_gen		T_RESET_SOFT	! 0x4 software initiated reset
1566tl0_red_state:
1567	tl0_gen		T_RED_STATE	! 0x5 red state exception
1568	tl0_reserved	2		! 0x6-0x7 reserved
1569tl0_insn_excptn:
1570	tl0_gen		T_INSN_EXCPTN	! 0x8 instruction access exception
1571	tl0_reserved	1		! 0x9 reserved
1572tl0_insn_error:
1573	tl0_gen		T_INSN_ERROR	! 0xa instruction access error
1574	tl0_reserved	5		! 0xb-0xf reserved
1575tl0_insn_illegal:
1576	tl0_gen		T_INSN_ILLEGAL	! 0x10 illegal instruction
1577tl0_priv_opcode:
1578	tl0_gen		T_PRIV_OPCODE	! 0x11 privileged opcode
1579	tl0_reserved	14		! 0x12-0x1f reserved
1580tl0_fp_disabled:
1581	tl0_gen		T_FP_DISABLED	! 0x20 floating point disabled
1582tl0_fp_ieee:
1583	tl0_gen		T_FP_IEEE	! 0x21 floating point exception ieee
1584tl0_fp_other:
1585	tl0_gen		T_FP_OTHER	! 0x22 floating point exception other
1586tl0_tag_ovflw:
1587	tl0_gen		T_TAG_OVFLW	! 0x23 tag overflow
1588tl0_clean_window:
1589	clean_window			! 0x24 clean window
1590tl0_divide:
1591	tl0_gen		T_DIVIDE	! 0x28 division by zero
1592	tl0_reserved	7		! 0x29-0x2f reserved
1593tl0_data_excptn:
1594	tl0_data_excptn			! 0x30 data access exception
1595	tl0_reserved	1		! 0x31 reserved
1596tl0_data_error:
1597	tl0_gen		T_DATA_ERROR	! 0x32 data access error
1598	tl0_reserved	1		! 0x33 reserved
1599tl0_align:
1600	tl0_align			! 0x34 memory address not aligned
1601tl0_align_lddf:
1602	tl0_gen		T_ALIGN_LDDF	! 0x35 lddf memory address not aligned
1603tl0_align_stdf:
1604	tl0_gen		T_ALIGN_STDF	! 0x36 stdf memory address not aligned
1605tl0_priv_action:
1606	tl0_gen		T_PRIV_ACTION	! 0x37 privileged action
1607	tl0_reserved	9		! 0x38-0x40 reserved
1608tl0_intr_level:
1609	tl0_intr_level			! 0x41-0x4f interrupt level 1 to 15
1610	tl0_reserved	16		! 0x50-0x5f reserved
1611tl0_intr_vector:
1612	tl0_intr_vector			! 0x60 interrupt vector
1613tl0_watch_phys:
1614	tl0_gen		T_WATCH_PHYS	! 0x61 physical address watchpoint
1615tl0_watch_virt:
1616	tl0_gen		T_WATCH_VIRT	! 0x62 virtual address watchpoint
1617tl0_ecc:
1618	tl0_gen		T_ECC		! 0x63 corrected ecc error
1619tl0_immu_miss:
1620	tl0_immu_miss			! 0x64 fast instruction access mmu miss
1621tl0_dmmu_miss:
1622	tl0_dmmu_miss			! 0x68 fast data access mmu miss
1623tl0_dmmu_prot:
1624	tl0_dmmu_prot			! 0x6c fast data access protection
1625	tl0_reserved	16		! 0x70-0x7f reserved
1626tl0_spill_0_n:
1627	tl0_spill_0_n			! 0x80 spill 0 normal
1628tl0_spill_1_n:
1629	tl0_spill_1_n			! 0x84 spill 1 normal
1630tl0_spill_2_n:
1631	tl0_spill_2_n			! 0x88 spill 2 normal
1632tl0_spill_3_n:
1633	tl0_spill_3_n			! 0x8c spill 3 normal
1634	tl0_spill_bad	12		! 0x90-0xbf spill normal, other
1635tl0_fill_0_n:
1636	tl0_fill_0_n			! 0xc0 fill 0 normal
1637tl0_fill_1_n:
1638	tl0_fill_1_n			! 0xc4 fill 1 normal
1639tl0_fill_2_n:
1640	tl0_fill_2_n			! 0xc8 fill 2 normal
1641tl0_fill_3_n:
1642	tl0_fill_3_n			! 0xcc fill 3 normal
1643	tl0_fill_bad	12		! 0xc4-0xff fill normal, other
1644tl0_sun_syscall:
1645	tl0_reserved	1		! 0x100 sun system call
1646tl0_breakpoint:
1647	tl0_gen		T_BREAKPOINT	! 0x101 breakpoint
1648	tl0_soft	6		! 0x102-0x107 trap instruction
1649	tl0_soft	1		! 0x108 SVr4 syscall
1650	tl0_gen		T_SYSCALL	! 0x109 BSD syscall
1651	tl0_soft	118		! 0x110-0x17f trap instruction
1652	tl0_reserved	128		! 0x180-0x1ff reserved
1653
1654tl1_base:
1655	tl1_reserved	1		! 0x200 unused
1656tl1_power_on:
1657	tl1_gen		T_POWER_ON	! 0x201 power on reset
1658tl1_watchdog:
1659	tl1_gen		T_WATCHDOG	! 0x202 watchdog rest
1660tl1_reset_ext:
1661	tl1_gen		T_RESET_EXT	! 0x203 externally initiated reset
1662tl1_reset_soft:
1663	tl1_gen		T_RESET_SOFT	! 0x204 software initiated reset
1664tl1_red_state:
1665	tl1_gen		T_RED_STATE	! 0x205 red state exception
1666	tl1_reserved	2		! 0x206-0x207 reserved
1667tl1_insn_excptn:
1668	tl1_insn_excptn			! 0x208 instruction access exception
1669	tl1_reserved	1		! 0x209 reserved
1670tl1_insn_error:
1671	tl1_gen		T_INSN_ERROR	! 0x20a instruction access error
1672	tl1_reserved	5		! 0x20b-0x20f reserved
1673tl1_insn_illegal:
1674	tl1_gen		T_INSN_ILLEGAL	! 0x210 illegal instruction
1675tl1_priv_opcode:
1676	tl1_gen		T_PRIV_OPCODE	! 0x211 privileged opcode
1677	tl1_reserved	14		! 0x212-0x21f reserved
1678tl1_fp_disabled:
1679	tl1_gen		T_FP_DISABLED	! 0x220 floating point disabled
1680tl1_fp_ieee:
1681	tl1_gen		T_FP_IEEE	! 0x221 floating point exception ieee
1682tl1_fp_other:
1683	tl1_gen		T_FP_OTHER	! 0x222 floating point exception other
1684tl1_tag_ovflw:
1685	tl1_gen		T_TAG_OVFLW	! 0x223 tag overflow
1686tl1_clean_window:
1687	clean_window			! 0x224 clean window
1688tl1_divide:
1689	tl1_gen		T_DIVIDE	! 0x228 division by zero
1690	tl1_reserved	7		! 0x229-0x22f reserved
1691tl1_data_excptn:
1692	tl1_data_excptn			! 0x230 data access exception
1693	tl1_reserved	1		! 0x231 reserved
1694tl1_data_error:
1695	tl1_gen		T_DATA_ERROR	! 0x232 data access error
1696	tl1_reserved	1		! 0x233 reserved
1697tl1_align:
1698	tl1_align			! 0x234 memory address not aligned
1699tl1_align_lddf:
1700	tl1_gen		T_ALIGN_LDDF	! 0x235 lddf memory address not aligned
1701tl1_align_stdf:
1702	tl1_gen		T_ALIGN_STDF	! 0x236 stdf memory address not aligned
1703tl1_priv_action:
1704	tl1_gen		T_PRIV_ACTION	! 0x237 privileged action
1705	tl1_reserved	9		! 0x238-0x240 reserved
1706tl1_intr_level:
1707	tl1_intr_level			! 0x241-0x24f interrupt level 1 to 15
1708	tl1_reserved	16		! 0x250-0x25f reserved
1709tl1_intr_vector:
1710	tl1_intr_vector			! 0x260 interrupt vector
1711tl1_watch_phys:
1712	tl1_gen		T_WATCH_PHYS	! 0x261 physical address watchpoint
1713tl1_watch_virt:
1714	tl1_gen		T_WATCH_VIRT	! 0x262 virtual address watchpoint
1715tl1_ecc:
1716	tl1_gen		T_ECC		! 0x263 corrected ecc error
1717tl1_immu_miss:
1718	tl1_immu_miss			! 0x264 fast instruction access mmu miss
1719tl1_dmmu_miss:
1720	tl1_dmmu_miss			! 0x268 fast data access mmu miss
1721tl1_dmmu_prot:
1722	tl1_dmmu_prot			! 0x26c fast data access protection
1723	tl1_reserved	16		! 0x270-0x27f reserved
1724tl1_spill_0_n:
1725	tl1_spill_0_n			! 0x280 spill 0 normal
1726	tl1_spill_bad	3		! 0x284-0x28f spill normal
1727tl1_spill_4_n:
1728	tl1_spill_4_n			! 0x290 spill 4 normal
1729tl1_spill_5_n:
1730	tl1_spill_5_n			! 0x294 spill 5 normal
1731tl1_spill_6_n:
1732	tl1_spill_6_n			! 0x298 spill 6 normal
1733tl1_spill_7_n:
1734	tl1_spill_7_n			! 0x29c spill 7 normal
1735tl1_spill_0_o:
1736	tl1_spill_0_o			! 0x2a0 spill 0 other
1737tl1_spill_1_o:
1738	tl1_spill_1_o			! 0x2a4 spill 1 other
1739tl1_spill_2_o:
1740	tl1_spill_2_o			! 0x2a8 spill 2 other
1741tl1_spill_3_o:
1742	tl1_spill_3_o			! 0x2ac spill 3 other
1743	tl1_spill_bad	4		! 0x2a0-0x2bf spill other
1744tl1_fill_0_n:
1745	tl1_fill_0_n			! 0x2c0 fill 0 normal
1746	tl1_fill_bad	3		! 0x2c4-0x2cf fill normal
1747tl1_fill_4_n:
1748	tl1_fill_4_n			! 0x2d0 fill 4 normal
1749tl1_fill_5_n:
1750	tl1_fill_5_n			! 0x2d4 fill 5 normal
1751tl1_fill_6_n:
1752	tl1_fill_6_n			! 0x2d8 fill 6 normal
1753tl1_fill_7_n:
1754	tl1_fill_7_n			! 0x2dc fill 7 normal
1755	tl1_fill_bad	8		! 0x2e0-0x2ff fill other
1756	tl1_reserved	1		! 0x300 trap instruction
1757tl1_breakpoint:
1758	tl1_breakpoint			! 0x301 breakpoint
1759	tl1_gen		T_RESTOREWP	! 0x302 restore watchpoint (debug)
1760	tl1_soft	125		! 0x303-0x37f trap instruction
1761	tl1_reserved	128		! 0x380-0x3ff reserved
1762
1763/*
1764 * User trap entry point.
1765 *
1766 * void tl0_trap(u_long type, u_long arg, u_long pil, u_long wstate)
1767 *
1768 * The following setup has been performed:
1769 *	- the windows have been split and the active user window has been saved
1770 *	  (maybe just to the pcb)
1771 *	- we are on the current kernel stack and a frame has been setup, there
1772 *	  may be extra trap specific stuff below the frame
1773 *	- we are on alternate globals and interrupts are disabled
1774 *
1775 * We build a trapframe, switch to normal globals, enable interrupts and call
1776 * trap.
1777 *
1778 * NOTE: Due to a chip bug, we must save the trap state registers in memory
1779 * early.
1780 *
1781 * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
1782 * it has been pre-set in alternate globals, so we read it from there and setup
1783 * the normal %g7 *before* enabling interrupts.  This avoids any possibility
1784 * of cpu migration and using the wrong globalp.
1785 */
1786ENTRY(tl0_trap)
1787	/*
1788	 * Force kernel store order.
1789	 */
1790	wrpr	%g0, PSTATE_ALT, %pstate
1791
1792	sub	%sp, TF_SIZEOF, %sp
1793
1794	rdpr	%tstate, %l0
1795	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
1796	rdpr	%tpc, %l1
1797	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
1798	rdpr	%tnpc, %l2
1799	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
1800
1801	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
1802	stx	%o1, [%sp + SPOFF + CCFSZ + TF_ARG]
1803	stx	%o2, [%sp + SPOFF + CCFSZ + TF_PIL]
1804	stx	%o3, [%sp + SPOFF + CCFSZ + TF_WSTATE]
1805
1806.Ltl0_trap_fill:
1807	mov	%g7, %l0
1808	wrpr	%g0, PSTATE_NORMAL, %pstate
1809	mov	%l0, %g7	/* set up the normal %g7 */
1810	wrpr	%g0, PSTATE_KERNEL, %pstate
1811
1812	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
1813	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
1814	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
1815	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
1816	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
1817	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
1818	stx	%g7, [%sp + SPOFF + CCFSZ + TF_G7]
1819
1820#if KTR_COMPILE & KTR_CT1
1821	CATR(KTR_CT1, "tl0_trap: p=%p type=%#x arg=%#lx pil=%#lx ws=%#lx"
1822	    , %g1, %g2, %g3, 7, 8, 9)
1823	ldx	[PCPU(CURPROC)], %g2
1824	stx	%g2, [%g1 + KTR_PARM1]
1825	stx	%o0, [%g1 + KTR_PARM2]
1826	stx	%o1, [%g1 + KTR_PARM3]
1827	stx	%o2, [%g1 + KTR_PARM4]
1828	stx	%o3, [%g1 + KTR_PARM5]
18299:
1830#endif
1831
1832	stx	%i0, [%sp + SPOFF + CCFSZ + TF_O0]
1833	stx	%i1, [%sp + SPOFF + CCFSZ + TF_O1]
1834	stx	%i2, [%sp + SPOFF + CCFSZ + TF_O2]
1835	stx	%i3, [%sp + SPOFF + CCFSZ + TF_O3]
1836	stx	%i4, [%sp + SPOFF + CCFSZ + TF_O4]
1837	stx	%i5, [%sp + SPOFF + CCFSZ + TF_O5]
1838	stx	%i6, [%sp + SPOFF + CCFSZ + TF_O6]
1839	stx	%i7, [%sp + SPOFF + CCFSZ + TF_O7]
1840
1841.Ltl0_trap_spill:
1842	call	trap
1843	 add	%sp, CCFSZ + SPOFF, %o0
1844
1845	/* Fallthough. */
1846END(tl0_trap)
1847
1848/* Return to tl0 (user process). */
1849ENTRY(tl0_ret)
1850#if KTR_COMPILE & KTR_CT1
1851	CATR(KTR_CT1, "tl0_ret: p=%p (%s) pil=%#lx sflag=%#x"
1852	    , %g1, %g2, %g3, 7, 8, 9)
1853	ldx	[PCPU(CURPROC)], %g2
1854	stx	%g2, [%g1 + KTR_PARM1]
1855	add	%g2, P_COMM, %g3
1856	stx	%g3, [%g1 + KTR_PARM2]
1857	rdpr	%pil, %g3
1858	stx	%g3, [%g1 + KTR_PARM3]
1859	lduw	[%g2 + P_SFLAG], %g3
1860	stx	%g3, [%g1 + KTR_PARM4]
18619:
1862#endif
1863
1864	wrpr	%g0, PIL_TICK, %pil
1865	ldx	[PCPU(CURPROC)], %o0
1866	lduw	[%o0 + P_SFLAG], %o1
1867	and	%o1, PS_ASTPENDING | PS_NEEDRESCHED, %o1
1868	brz,pt	%o1, 1f
1869	 nop
1870	call	ast
1871	 add	%sp, CCFSZ + SPOFF, %o0
1872
18731:	ldx	[PCPU(CURPCB)], %o0
1874	ldx	[%o0 + PCB_NSAVED], %o1
1875	mov	T_SPILL, %o0
1876	brnz,a,pn %o1, .Ltl0_trap_spill
1877	 stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
1878
1879	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
1880	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
1881	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
1882	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
1883	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
1884	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
1885	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
1886
1887	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
1888	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
1889	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
1890	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
1891	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
1892	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
1893	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
1894	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
1895
1896	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l0
1897	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l1
1898	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l2
1899	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l3
1900	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l4
1901
1902	wrpr	%g0, PSTATE_ALT, %pstate
1903
1904	wrpr	%l0, 0, %pil
1905
1906	wrpr	%l1, 0, %tstate
1907	wrpr	%l2, 0, %tpc
1908	wrpr	%l3, 0, %tnpc
1909
1910	/*
1911	 * Restore the user window state.
1912	 * NOTE: whenever we come here, it should be with %canrestore = 0.
1913	 */
1914	srlx	%l4, WSTATE_USERSHIFT, %g1
1915	wrpr	%g1, WSTATE_TRANSITION, %wstate
1916	rdpr	%otherwin, %g2
1917	wrpr	%g2, 0, %canrestore
1918	wrpr	%g0, 0, %otherwin
1919	wrpr	%g2, 0, %cleanwin
1920
1921	/*
1922	 * If this instruction causes a fill trap which fails to fill a window
1923	 * from the user stack, we will resume at tl0_ret_fill_end and call
1924	 * back into the kernel.
1925	 */
1926	restore
1927tl0_ret_fill:
1928
1929#if KTR_COMPILE & KTR_CT1
1930	CATR(KTR_CT1, "tl0_ret: return p=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
1931	    , %g2, %g3, %g4, 7, 8, 9)
1932	ldx	[PCPU(CURPROC)], %g3
1933	stx	%g3, [%g2 + KTR_PARM1]
1934	rdpr	%tstate, %g3
1935	stx	%g3, [%g2 + KTR_PARM2]
1936	rdpr	%tpc, %g3
1937	stx	%g3, [%g2 + KTR_PARM3]
1938	stx	%sp, [%g2 + KTR_PARM4]
1939	stx	%g1, [%g2 + KTR_PARM5]
19409:
1941#endif
1942
1943	wrpr	%g1, 0, %wstate
1944	retry
1945tl0_ret_fill_end:
1946
1947#if KTR_COMPILE & KTR_CT1
1948	CATR(KTR_CT1, "tl0_ret: fill magic wstate=%#lx sp=%#lx"
1949	    , %l0, %l1, %l2, 7, 8, 9)
1950	stx	%l4, [%l0 + KTR_PARM1]
1951	stx	%sp, [%l0 + KTR_PARM2]
19529:
1953#endif
1954
1955	/*
1956	 * The fill failed and magic has been preformed.  Call trap again,
1957	 * which will copyin the window on the user's behalf.
1958	 */
1959	wrpr	%l4, 0, %wstate
1960	mov	T_FILL, %o0
1961	b	%xcc, .Ltl0_trap_fill
1962	 stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
1963END(tl0_ret)
1964
1965/*
1966 * Kernel trap entry point
1967 *
1968 * void tl1_trap(u_long type, u_long arg, u_long pil)
1969 *
1970 * This is easy because the stack is already setup and the windows don't need
1971 * to be split.  We build a trapframe and call trap(), the same as above, but
1972 * the outs don't need to be saved.
1973 *
1974 * NOTE: See comments above tl0_trap for song and dance about chip bugs and
1975 * setting up globalp.
1976 */
1977ENTRY(tl1_trap)
1978	sub	%sp, TF_SIZEOF, %sp
1979
1980	rdpr	%tstate, %l0
1981	stx	%l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
1982	rdpr	%tpc, %l1
1983	stx	%l1, [%sp + SPOFF + CCFSZ + TF_TPC]
1984	rdpr	%tnpc, %l2
1985	stx	%l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
1986
1987#if KTR_COMPILE & KTR_CT1
1988	setx	trap_mask, %l4, %l3
1989	andn	%o1, T_KERNEL, %l4
1990	mov	1, %l5
1991	sllx	%l5, %l4, %l4
1992	ldx	[%l3], %l5
1993	and	%l4, %l5, %l4
1994	brz	%l4, 9f
1995	 nop
1996	CATR(KTR_CT1, "tl1_trap: p=%p pil=%#lx type=%#lx arg=%#lx pc=%#lx"
1997	    , %l3, %l4, %l5, 7, 8, 9)
1998	ldx	[PCPU(CURPROC)], %l4
1999	stx	%l4, [%l3 + KTR_PARM1]
2000#if 0
2001	add	%l4, P_COMM, %l4
2002	stx	%l4, [%l3 + KTR_PARM2]
2003#else
2004	stx	%o2, [%l3 + KTR_PARM2]
2005#endif
2006	andn	%o0, T_KERNEL, %l4
2007	stx	%l4, [%l3 + KTR_PARM3]
2008	stx	%o1, [%l3 + KTR_PARM4]
2009	stx	%l1, [%l3 + KTR_PARM5]
20109:
2011#endif
2012
2013	wrpr	%g0, 1, %tl
2014	/* We may have trapped before %g7 was set up correctly. */
2015	mov	%g7, %l0
2016	wrpr	%g0, PSTATE_NORMAL, %pstate
2017	mov	%l0, %g7
2018	wrpr	%g0, PSTATE_KERNEL, %pstate
2019
2020	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2021	stx	%o1, [%sp + SPOFF + CCFSZ + TF_ARG]
2022	stx	%o2, [%sp + SPOFF + CCFSZ + TF_PIL]
2023
2024	stx	%g1, [%sp + SPOFF + CCFSZ + TF_G1]
2025	stx	%g2, [%sp + SPOFF + CCFSZ + TF_G2]
2026	stx	%g3, [%sp + SPOFF + CCFSZ + TF_G3]
2027	stx	%g4, [%sp + SPOFF + CCFSZ + TF_G4]
2028	stx	%g5, [%sp + SPOFF + CCFSZ + TF_G5]
2029	stx	%g6, [%sp + SPOFF + CCFSZ + TF_G6]
2030
2031	call	trap
2032	 add	%sp, CCFSZ + SPOFF, %o0
2033
2034	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
2035	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
2036	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
2037	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
2038	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
2039	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
2040
2041	ldx	[%sp + SPOFF + CCFSZ + TF_PIL], %l0
2042	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l1
2043	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l2
2044	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l3
2045
2046	wrpr	%g0, PSTATE_ALT, %pstate
2047
2048	wrpr	%l0, 0, %pil
2049
2050	wrpr	%g0, 2, %tl
2051	wrpr	%l1, 0, %tstate
2052	wrpr	%l2, 0, %tpc
2053	wrpr	%l3, 0, %tnpc
2054
2055#if KTR_COMPILE & KTR_CT1
2056	ldx	[%sp + SPOFF + CCFSZ + TF_TYPE], %l5
2057	andn	%l5, T_KERNEL, %l4
2058	mov	1, %l5
2059	sllx	%l5, %l4, %l4
2060	setx	trap_mask, %l4, %l3
2061	ldx	[%l3], %l5
2062	and	%l4, %l5, %l4
2063	brz	%l4, 9f
2064	 nop
2065	CATR(KTR_CT1, "tl1_trap: return p=%p pil=%#lx sp=%#lx pc=%#lx"
2066	    , %l3, %l4, %l5, 7, 8, 9)
2067	ldx	[PCPU(CURPROC)], %l4
2068	stx	%l4, [%l3 + KTR_PARM1]
2069	stx	%l0, [%l3 + KTR_PARM2]
2070	stx	%sp, [%l3 + KTR_PARM3]
2071	stx	%l2, [%l3 + KTR_PARM4]
20729:
2073#endif
2074
2075	restore
2076	retry
2077END(tl1_trap)
2078
2079/*
2080 * Freshly forked processes come here when switched to for the first time.
2081 * The arguments to fork_exit() have been setup in the locals, we must move
2082 * them to the outs.
2083 */
2084ENTRY(fork_trampoline)
2085#if KTR_COMPILE & KTR_CT1
2086	CATR(KTR_CT1, "fork_trampoline: p=%p (%s) cwp=%#lx"
2087	    , %g1, %g2, %g3, 7, 8, 9)
2088	ldx	[PCPU(CURPROC)], %g2
2089	stx	%g2, [%g1 + KTR_PARM1]
2090	add	%g2, P_COMM, %g2
2091	stx	%g2, [%g1 + KTR_PARM2]
2092	rdpr	%cwp, %g2
2093	stx	%g2, [%g1 + KTR_PARM3]
20949:
2095#endif
2096	mov	%l0, %o0
2097	mov	%l1, %o1
2098	mov	%l2, %o2
2099	call	fork_exit
2100	 nop
2101	b,a	%xcc, tl0_ret
2102END(fork_trampoline)
2103