1/*	$NetBSD$	*/
2
3/*
4 * Copyright (c) 2006-2010 Matthew R. Green
5 * Copyright (c) 1996-2002 Eduardo Horvath
6 * Copyright (c) 1996 Paul Kranenburg
7 * Copyright (c) 1996
8 * 	The President and Fellows of Harvard College.
9 *	All rights reserved.
10 * Copyright (c) 1992, 1993
11 *	The Regents of the University of California.
12 *	All rights reserved.
13 *
14 * This software was developed by the Computer Systems Engineering group
15 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
16 * contributed to Berkeley.
17 *
18 * All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Lawrence Berkeley Laboratory.
22 *	This product includes software developed by Harvard University.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 *    notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 *    notice, this list of conditions and the following disclaimer in the
31 *    documentation and/or other materials provided with the
32 *    distribution.
33 * 3. All advertising materials mentioning features or use of this
34 *    software must display the following acknowledgement:
35 *	This product includes software developed by the University of
36 *	California, Berkeley and its contributors.
37 *	This product includes software developed by Harvard University.
38 *	This product includes software developed by Paul Kranenburg.
39 * 4. Neither the name of the University nor the names of its
40 *    contributors may be used to endorse or promote products derived
41 *    from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
44 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
45 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
46 * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR
47 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
51 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
52 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
53 * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 *	@(#)locore.s	8.4 (Berkeley) 12/10/93
57 */
58
59#undef	PARANOID		/* Extremely expensive consistency checks */
60#undef	NO_VCACHE		/* Map w/D$ disabled */
61#undef	TRAPSTATS		/* Count traps */
62#undef	TRAPS_USE_IG		/* Use Interrupt Globals for all traps */
63#define	HWREF			/* Track ref/mod bits in trap handlers */
64#undef	DCACHE_BUG		/* Flush D$ around ASI_PHYS accesses */
65#undef	NO_TSB			/* Don't use TSB */
66#define	BB_ERRATA_1		/* writes to TICK_CMPR may fail */
67#undef	TLB_FLUSH_LOWVA		/* also flush 32-bit entries from the MMU */
68
69#include "opt_ddb.h"
70#include "opt_kgdb.h"
71#include "opt_multiprocessor.h"
72#include "opt_compat_netbsd.h"
73#include "opt_compat_netbsd32.h"
74#include "opt_lockdebug.h"
75
76#include "assym.h"
77#include <machine/param.h>
78#include <machine/types.h>
79#include <sparc64/sparc64/intreg.h>
80#include <sparc64/sparc64/timerreg.h>
81#include <machine/ctlreg.h>
82#include <machine/psl.h>
83#include <machine/signal.h>
84#include <machine/trap.h>
85#include <machine/frame.h>
86#include <machine/pte.h>
87#include <machine/pmap.h>
88#include <machine/intr.h>
89#include <machine/asm.h>
90#include <machine/locore.h>
91#include <sys/syscall.h>
92
93#define BLOCK_SIZE SPARC64_BLOCK_SIZE
94#define BLOCK_ALIGN SPARC64_BLOCK_ALIGN
95
96#include "ksyms.h"
97
98#if 1
99/*
100 * Try to issue an elf note to ask the Solaris
101 * bootloader to align the kernel properly.
102 */
103	.section	.note
104	.word	0x0d
105	.word	4		! Dunno why
106	.word	1
1070:	.asciz	"SUNW Solaris"
1081:
109	.align	4
110	.word	0x0400000
111#endif
112
113	.register	%g2,#scratch
114	.register	%g3,#scratch
115
116
117	.data
118	.globl	_C_LABEL(data_start)
119_C_LABEL(data_start):					! Start of data segment
120
121#ifdef KGDB
122/*
123 * Another item that must be aligned, easiest to put it here.
124 */
125KGDB_STACK_SIZE = 2048
126	.globl	_C_LABEL(kgdb_stack)
127_C_LABEL(kgdb_stack):
128	.space	KGDB_STACK_SIZE		! hope this is enough
129#endif
130
131#ifdef NOTDEF_DEBUG
132/*
133 * This stack is used when we detect kernel stack corruption.
134 */
135	.space	USPACE
136	.align	16
137panicstack:
138#endif
139
140/*
141 * romp is the prom entry pointer
142 * romtba is the prom trap table base address
143 */
144	.globl	romp
145romp:	POINTER	0
146	.globl	romtba
147romtba:	POINTER	0
148
149	_ALIGN
150	.text
151
152/*
153 * The v9 trap frame is stored in the special trap registers.  The
154 * register window is only modified on window overflow, underflow,
155 * and clean window traps, where it points to the register window
156 * needing service.  Traps have space for 8 instructions, except for
157 * the window overflow, underflow, and clean window traps which are
158 * 32 instructions long, large enough to in-line.
159 *
160 * The spitfire CPU (Ultra I) has 4 different sets of global registers.
161 * (blah blah...)
162 *
163 * I used to generate these numbers by address arithmetic, but gas's
164 * expression evaluator has about as much sense as your average slug
165 * (oddly enough, the code looks about as slimy too).  Thus, all the
166 * trap numbers are given as arguments to the trap macros.  This means
167 * there is one line per trap.  Sigh.
168 *
169 * Hardware interrupt vectors can be `linked'---the linkage is to regular
170 * C code---or rewired to fast in-window handlers.  The latter are good
171 * for unbuffered hardware like the Zilog serial chip and the AMD audio
172 * chip, where many interrupts can be handled trivially with pseudo-DMA
173 * or similar.  Only one `fast' interrupt can be used per level, however,
174 * and direct and `fast' interrupts are incompatible.  Routines in intr.c
175 * handle setting these, with optional paranoia.
176 */
177
178/*
179 *	TA8 -- trap align for 8 instruction traps
180 *	TA32 -- trap align for 32 instruction traps
181 */
182#define TA8	.align 32
183#define TA32	.align 128
184
185/*
186 * v9 trap macros:
187 *
188 *	We have a problem with v9 traps; we have no registers to put the
189 *	trap type into.  But we do have a %tt register which already has
190 *	that information.  Trap types in these macros are all dummys.
191 */
192	/* regular vectored traps */
193
194#define	VTRAP(type, label) \
195	ba,a,pt	%icc,label; nop; NOTREACHED; TA8
196
197	/* hardware interrupts (can be linked or made `fast') */
198#define	HARDINT4U(lev) \
199	VTRAP(lev, _C_LABEL(sparc_interrupt))
200
201	/* software interrupts (may not be made direct, sorry---but you
202	   should not be using them trivially anyway) */
203#define	SOFTINT4U(lev, bit) \
204	HARDINT4U(lev)
205
206	/* traps that just call trap() */
207#define	TRAP(type)	VTRAP(type, slowtrap)
208
209	/* architecturally undefined traps (cause panic) */
210#ifndef DEBUG
211#define	UTRAP(type)	sir; VTRAP(type, slowtrap)
212#else
213#define	UTRAP(type)	VTRAP(type, slowtrap)
214#endif
215
216	/* software undefined traps (may be replaced) */
217#define	STRAP(type)	VTRAP(type, slowtrap)
218
219/* breakpoint acts differently under kgdb */
220#ifdef KGDB
221#define	BPT		VTRAP(T_BREAKPOINT, bpt)
222#define	BPT_KGDB_EXEC	VTRAP(T_KGDB_EXEC, bpt)
223#else
224#define	BPT		TRAP(T_BREAKPOINT)
225#define	BPT_KGDB_EXEC	TRAP(T_KGDB_EXEC)
226#endif
227
228#define	SYSCALL		VTRAP(0x100, syscall_setup)
229#ifdef notyet
230#define	ZS_INTERRUPT	ba,a,pt %icc, zshard; nop; TA8
231#else
232#define	ZS_INTERRUPT4U	HARDINT4U(12)
233#endif
234
235
236/*
237 * Macro to clear %tt so we don't get confused with old traps.
238 */
239#ifdef DEBUG
240#define CLRTT	wrpr	%g0,0x1ff,%tt
241#else
242#define CLRTT
243#endif
244
245/*
246 * Here are some oft repeated traps as macros.
247 */
248
249	/* spill a 64-bit register window */
250#define SPILL64(label,as) \
251label:	\
252	wr	%g0, as, %asi; \
253	stxa	%l0, [%sp+BIAS+0x00]%asi; \
254	stxa	%l1, [%sp+BIAS+0x08]%asi; \
255	stxa	%l2, [%sp+BIAS+0x10]%asi; \
256	stxa	%l3, [%sp+BIAS+0x18]%asi; \
257	stxa	%l4, [%sp+BIAS+0x20]%asi; \
258	stxa	%l5, [%sp+BIAS+0x28]%asi; \
259	stxa	%l6, [%sp+BIAS+0x30]%asi; \
260	\
261	stxa	%l7, [%sp+BIAS+0x38]%asi; \
262	stxa	%i0, [%sp+BIAS+0x40]%asi; \
263	stxa	%i1, [%sp+BIAS+0x48]%asi; \
264	stxa	%i2, [%sp+BIAS+0x50]%asi; \
265	stxa	%i3, [%sp+BIAS+0x58]%asi; \
266	stxa	%i4, [%sp+BIAS+0x60]%asi; \
267	stxa	%i5, [%sp+BIAS+0x68]%asi; \
268	stxa	%i6, [%sp+BIAS+0x70]%asi; \
269	\
270	stxa	%i7, [%sp+BIAS+0x78]%asi; \
271	saved; \
272	CLRTT; \
273	retry; \
274	NOTREACHED; \
275	TA32
276
277	/* spill a 32-bit register window */
278#define SPILL32(label,as) \
279label:	\
280	wr	%g0, as, %asi; \
281	srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
282	stwa	%l0, [%sp+0x00]%asi; \
283	stwa	%l1, [%sp+0x04]%asi; \
284	stwa	%l2, [%sp+0x08]%asi; \
285	stwa	%l3, [%sp+0x0c]%asi; \
286	stwa	%l4, [%sp+0x10]%asi; \
287	stwa	%l5, [%sp+0x14]%asi; \
288	\
289	stwa	%l6, [%sp+0x18]%asi; \
290	stwa	%l7, [%sp+0x1c]%asi; \
291	stwa	%i0, [%sp+0x20]%asi; \
292	stwa	%i1, [%sp+0x24]%asi; \
293	stwa	%i2, [%sp+0x28]%asi; \
294	stwa	%i3, [%sp+0x2c]%asi; \
295	stwa	%i4, [%sp+0x30]%asi; \
296	stwa	%i5, [%sp+0x34]%asi; \
297	\
298	stwa	%i6, [%sp+0x38]%asi; \
299	stwa	%i7, [%sp+0x3c]%asi; \
300	saved; \
301	CLRTT; \
302	retry; \
303	NOTREACHED; \
304	TA32
305
306	/* Spill either 32-bit or 64-bit register window. */
307#define SPILLBOTH(label64,label32,as) \
308	andcc	%sp, 1, %g0; \
309	bnz,pt	%xcc, label64+4;	/* Is it a v9 or v8 stack? */ \
310	 wr	%g0, as, %asi; \
311	ba,pt	%xcc, label32+8; \
312	 srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
313	NOTREACHED; \
314	TA32
315
316	/* fill a 64-bit register window */
317#define FILL64(label,as) \
318label: \
319	wr	%g0, as, %asi; \
320	ldxa	[%sp+BIAS+0x00]%asi, %l0; \
321	ldxa	[%sp+BIAS+0x08]%asi, %l1; \
322	ldxa	[%sp+BIAS+0x10]%asi, %l2; \
323	ldxa	[%sp+BIAS+0x18]%asi, %l3; \
324	ldxa	[%sp+BIAS+0x20]%asi, %l4; \
325	ldxa	[%sp+BIAS+0x28]%asi, %l5; \
326	ldxa	[%sp+BIAS+0x30]%asi, %l6; \
327	\
328	ldxa	[%sp+BIAS+0x38]%asi, %l7; \
329	ldxa	[%sp+BIAS+0x40]%asi, %i0; \
330	ldxa	[%sp+BIAS+0x48]%asi, %i1; \
331	ldxa	[%sp+BIAS+0x50]%asi, %i2; \
332	ldxa	[%sp+BIAS+0x58]%asi, %i3; \
333	ldxa	[%sp+BIAS+0x60]%asi, %i4; \
334	ldxa	[%sp+BIAS+0x68]%asi, %i5; \
335	ldxa	[%sp+BIAS+0x70]%asi, %i6; \
336	\
337	ldxa	[%sp+BIAS+0x78]%asi, %i7; \
338	restored; \
339	CLRTT; \
340	retry; \
341	NOTREACHED; \
342	TA32
343
344	/* fill a 32-bit register window */
345#define FILL32(label,as) \
346label:	\
347	wr	%g0, as, %asi; \
348	srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
349	lda	[%sp+0x00]%asi, %l0; \
350	lda	[%sp+0x04]%asi, %l1; \
351	lda	[%sp+0x08]%asi, %l2; \
352	lda	[%sp+0x0c]%asi, %l3; \
353	lda	[%sp+0x10]%asi, %l4; \
354	lda	[%sp+0x14]%asi, %l5; \
355	\
356	lda	[%sp+0x18]%asi, %l6; \
357	lda	[%sp+0x1c]%asi, %l7; \
358	lda	[%sp+0x20]%asi, %i0; \
359	lda	[%sp+0x24]%asi, %i1; \
360	lda	[%sp+0x28]%asi, %i2; \
361	lda	[%sp+0x2c]%asi, %i3; \
362	lda	[%sp+0x30]%asi, %i4; \
363	lda	[%sp+0x34]%asi, %i5; \
364	\
365	lda	[%sp+0x38]%asi, %i6; \
366	lda	[%sp+0x3c]%asi, %i7; \
367	restored; \
368	CLRTT; \
369	retry; \
370	NOTREACHED; \
371	TA32
372
373	/* fill either 32-bit or 64-bit register window. */
374#define FILLBOTH(label64,label32,as) \
375	andcc	%sp, 1, %i0; \
376	bnz	(label64)+4; /* See if it's a v9 stack or v8 */ \
377	 wr	%g0, as, %asi; \
378	ba	(label32)+8; \
379	 srl	%sp, 0, %sp; /* fixup 32-bit pointers */ \
380	NOTREACHED; \
381	TA32
382
383	.globl	start, _C_LABEL(kernel_text)
384	_C_LABEL(kernel_text) = kernel_start		! for kvm_mkdb(8)
385kernel_start:
386	/* Traps from TL=0 -- traps from user mode */
387#ifdef __STDC__
388#define TABLE(name)	user_ ## name
389#else
390#define	TABLE(name)	user_/**/name
391#endif
392	.globl	_C_LABEL(trapbase)
393_C_LABEL(trapbase):
394	b dostart; nop; TA8	! 000 = reserved -- Use it to boot
395	/* We should not get the next 5 traps */
396	UTRAP(0x001)		! 001 = POR Reset -- ROM should get this
397	UTRAP(0x002)		! 002 = WDR -- ROM should get this
398	UTRAP(0x003)		! 003 = XIR -- ROM should get this
399	UTRAP(0x004)		! 004 = SIR -- ROM should get this
400	UTRAP(0x005)		! 005 = RED state exception
401	UTRAP(0x006); UTRAP(0x007)
402	VTRAP(T_INST_EXCEPT, textfault)	! 008 = instr. access except
403	VTRAP(T_TEXTFAULT, textfault)	! 009 = instr access MMU miss
404	VTRAP(T_INST_ERROR, textfault)	! 00a = instr. access err
405	UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f)
406	TRAP(T_ILLINST)			! 010 = illegal instruction
407	TRAP(T_PRIVINST)		! 011 = privileged instruction
408	UTRAP(0x012)			! 012 = unimplemented LDD
409	UTRAP(0x013)			! 013 = unimplemented STD
410	UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018)
411	UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d)
412	UTRAP(0x01e); UTRAP(0x01f)
413	TRAP(T_FPDISABLED)		! 020 = fp instr, but EF bit off in psr
414	TRAP(T_FP_IEEE_754)		! 021 = ieee 754 exception
415	TRAP(T_FP_OTHER)		! 022 = other fp exception
416	TRAP(T_TAGOF)			! 023 = tag overflow
417	rdpr %cleanwin, %o7		! 024-027 = clean window trap
418	inc %o7				!	This handler is in-lined and cannot fault
419#ifdef DEBUG
420	set	0xbadcafe, %l0		! DEBUG -- compiler should not rely on zero-ed registers.
421#else
422	clr	%l0
423#endif
424	wrpr %g0, %o7, %cleanwin	!       Nucleus (trap&IRQ) code does not need clean windows
425
426	mov %l0,%l1; mov %l0,%l2	!	Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done
427	mov %l0,%l3; mov %l0,%l4
428#if 0
429#ifdef DIAGNOSTIC
430	!!
431	!! Check the sp redzone
432	!!
433	!! Since we can't spill the current window, we'll just keep
434	!! track of the frame pointer.  Problems occur when the routine
435	!! allocates and uses stack storage.
436	!!
437!	rdpr	%wstate, %l5	! User stack?
438!	cmp	%l5, WSTATE_KERN
439!	bne,pt	%icc, 7f
440	 sethi	%hi(CPCB), %l5
441	LDPTR	[%l5 + %lo(CPCB)], %l5	! If pcb < fp < pcb+sizeof(pcb)
442	inc	PCB_SIZE, %l5		! then we have a stack overflow
443	btst	%fp, 1			! 64-bit stack?
444	sub	%fp, %l5, %l7
445	bnz,a,pt	%icc, 1f
446	 inc	BIAS, %l7		! Remove BIAS
4471:
448	cmp	%l7, PCB_SIZE
449	blu	%xcc, cleanwin_overflow
450#endif
451#endif
452	mov %l0, %l5
453	mov %l0, %l6; mov %l0, %l7; mov %l0, %o0; mov %l0, %o1
454
455	mov %l0, %o2; mov %l0, %o3; mov %l0, %o4; mov %l0, %o5;
456	mov %l0, %o6; mov %l0, %o7
457	CLRTT
458	retry; nop; NOTREACHED; TA32
459	TRAP(T_DIV0)			! 028 = divide by zero
460	UTRAP(0x029)			! 029 = internal processor error
461	UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f)
462	VTRAP(T_DATAFAULT, winfault)	! 030 = data fetch fault
463	UTRAP(0x031)			! 031 = data MMU miss -- no MMU
464	VTRAP(T_DATA_ERROR, winfault)	! 032 = data access error
465	VTRAP(T_DATA_PROT, winfault)	! 033 = data protection fault
466	TRAP(T_ALIGN)			! 034 = address alignment error -- we could fix it inline...
467	TRAP(T_LDDF_ALIGN)		! 035 = LDDF address alignment error -- we could fix it inline...
468	TRAP(T_STDF_ALIGN)		! 036 = STDF address alignment error -- we could fix it inline...
469	TRAP(T_PRIVACT)			! 037 = privileged action
470	UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c);
471	UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f);
472	VTRAP(T_ASYNC_ERROR, winfault)	! 040 = data fetch fault
473	SOFTINT4U(1, IE_L1)		! 041 = level 1 interrupt
474	HARDINT4U(2)			! 042 = level 2 interrupt
475	HARDINT4U(3)			! 043 = level 3 interrupt
476	SOFTINT4U(4, IE_L4)		! 044 = level 4 interrupt
477	HARDINT4U(5)			! 045 = level 5 interrupt
478	SOFTINT4U(6, IE_L6)		! 046 = level 6 interrupt
479	HARDINT4U(7)			! 047 = level 7 interrupt
480	HARDINT4U(8)			! 048 = level 8 interrupt
481	HARDINT4U(9)			! 049 = level 9 interrupt
482	HARDINT4U(10)			! 04a = level 10 interrupt
483	HARDINT4U(11)			! 04b = level 11 interrupt
484	ZS_INTERRUPT4U			! 04c = level 12 (zs) interrupt
485	HARDINT4U(13)			! 04d = level 13 interrupt
486	HARDINT4U(14)			! 04e = level 14 interrupt
487	HARDINT4U(15)			! 04f = nonmaskable interrupt
488	UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055)
489	UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b)
490	UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f)
491	VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector
492	TRAP(T_PA_WATCHPT)		! 061 = physical address data watchpoint
493	TRAP(T_VA_WATCHPT)		! 062 = virtual address data watchpoint
494	TRAP(T_ECCERR)			! 063 = corrected ECC error
495ufast_IMMU_miss:			! 064 = fast instr access MMU miss
496	ldxa	[%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
497#ifdef NO_TSB
498	ba,a	%icc, instr_miss
499#endif
500	ldxa	[%g0] ASI_IMMU, %g1	! Load IMMU tag target register
501	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag:data into %g4:%g5
502	brgez,pn %g5, instr_miss	! Entry invalid?  Punt
503	 cmp	%g1, %g4		! Compare TLB tags
504	bne,pn %xcc, instr_miss		! Got right tag?
505	 nop
506	CLRTT
507	stxa	%g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
508	retry				! Try new mapping
5091:
510	sir
511	TA32
512ufast_DMMU_miss:			! 068 = fast data access MMU miss
513	ldxa	[%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
514#ifdef NO_TSB
515	ba,a	%icc, data_miss
516#endif
517	ldxa	[%g0] ASI_DMMU, %g1	! Load DMMU tag target register
518	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag and data into %g4 and %g5
519	brgez,pn %g5, data_miss		! Entry invalid?  Punt
520	 cmp	%g1, %g4		! Compare TLB tags
521	bnz,pn	%xcc, data_miss		! Got right tag?
522	 nop
523	CLRTT
524#ifdef TRAPSTATS
525	sethi	%hi(_C_LABEL(udhit)), %g1
526	lduw	[%g1+%lo(_C_LABEL(udhit))], %g2
527	inc	%g2
528	stw	%g2, [%g1+%lo(_C_LABEL(udhit))]
529#endif
530	stxa	%g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
531	retry				! Try new mapping
5321:
533	sir
534	TA32
535ufast_DMMU_protection:			! 06c = fast data access MMU protection
536#ifdef TRAPSTATS
537	sethi	%hi(_C_LABEL(udprot)), %g1
538	lduw	[%g1+%lo(_C_LABEL(udprot))], %g2
539	inc	%g2
540	stw	%g2, [%g1+%lo(_C_LABEL(udprot))]
541#endif
542#ifdef HWREF
543	ba,a,pt	%xcc, dmmu_write_fault
544#else
545	ba,a,pt	%xcc, winfault
546#endif
547	nop
548	TA32
549	UTRAP(0x070)			! Implementation dependent traps
550	UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076)
551	UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c)
552	UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f)
553TABLE(uspill):
554	SPILL64(uspill8,ASI_AIUS)	! 0x080 spill_0_normal -- used to save user windows in user mode
555	SPILL32(uspill4,ASI_AIUS)	! 0x084 spill_1_normal
556	SPILLBOTH(uspill8,uspill4,ASI_AIUS)	 ! 0x088 spill_2_normal
557	UTRAP(0x08c); TA32		! 0x08c spill_3_normal
558TABLE(kspill):
559	SPILL64(kspill8,ASI_N)		! 0x090 spill_4_normal -- used to save supervisor windows
560	SPILL32(kspill4,ASI_N)		! 0x094 spill_5_normal
561	SPILLBOTH(kspill8,kspill4,ASI_N) ! 0x098 spill_6_normal
562	UTRAP(0x09c); TA32		! 0x09c spill_7_normal
563TABLE(uspillk):
564	SPILL64(uspillk8,ASI_AIUS)	! 0x0a0 spill_0_other -- used to save user windows in supervisor mode
565	SPILL32(uspillk4,ASI_AIUS)	! 0x0a4 spill_1_other
566	SPILLBOTH(uspillk8,uspillk4,ASI_AIUS) ! 0x0a8 spill_2_other
567	UTRAP(0x0ac); TA32		! 0x0ac spill_3_other
568	UTRAP(0x0b0); TA32		! 0x0b0 spill_4_other
569	UTRAP(0x0b4); TA32		! 0x0b4 spill_5_other
570	UTRAP(0x0b8); TA32		! 0x0b8 spill_6_other
571	UTRAP(0x0bc); TA32		! 0x0bc spill_7_other
572TABLE(ufill):
573	FILL64(ufill8,ASI_AIUS)		! 0x0c0 fill_0_normal -- used to fill windows when running user mode
574	FILL32(ufill4,ASI_AIUS)		! 0x0c4 fill_1_normal
575	FILLBOTH(ufill8,ufill4,ASI_AIUS) ! 0x0c8 fill_2_normal
576	UTRAP(0x0cc); TA32		! 0x0cc fill_3_normal
577TABLE(kfill):
578	FILL64(kfill8,ASI_N)		! 0x0d0 fill_4_normal -- used to fill windows when running supervisor mode
579	FILL32(kfill4,ASI_N)		! 0x0d4 fill_5_normal
580	FILLBOTH(kfill8,kfill4,ASI_N)	! 0x0d8 fill_6_normal
581	UTRAP(0x0dc); TA32		! 0x0dc fill_7_normal
582TABLE(ufillk):
583	FILL64(ufillk8,ASI_AIUS)	! 0x0e0 fill_0_other
584	FILL32(ufillk4,ASI_AIUS)	! 0x0e4 fill_1_other
585	FILLBOTH(ufillk8,ufillk4,ASI_AIUS) ! 0x0e8 fill_2_other
586	UTRAP(0x0ec); TA32		! 0x0ec fill_3_other
587	UTRAP(0x0f0); TA32		! 0x0f0 fill_4_other
588	UTRAP(0x0f4); TA32		! 0x0f4 fill_5_other
589	UTRAP(0x0f8); TA32		! 0x0f8 fill_6_other
590	UTRAP(0x0fc); TA32		! 0x0fc fill_7_other
591TABLE(syscall):
592	SYSCALL				! 0x100 = sun syscall
593	BPT				! 0x101 = pseudo breakpoint instruction
594	STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107)
595	SYSCALL				! 0x108 = svr4 syscall
596	SYSCALL				! 0x109 = bsd syscall
597	BPT_KGDB_EXEC			! 0x10a = enter kernel gdb on kernel startup
598	STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f);
599	STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117)
600	STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f)
601	STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127)
602	STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f)
603	STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137)
604	STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f)
605	SYSCALL				! 0x140 SVID syscall (Solaris 2.7)
606	SYSCALL				! 0x141 SPARC International syscall
607	SYSCALL				! 0x142	OS Vendor syscall
608	SYSCALL				! 0x143 HW OEM syscall
609	STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147)
610	STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f)
611	STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157)
612	STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f)
613	STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167)
614	STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f)
615	STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177)
616	STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f)
617	! Traps beyond 0x17f are reserved
618	UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187)
619	UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f)
620	UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197)
621	UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f)
622	UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7)
623	UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af)
624	UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7)
625	UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf)
626	UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7)
627	UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf)
628	UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7)
629	UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df)
630	UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7)
631	UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef)
632	UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7)
633	UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff)
634
635	/* Traps from TL>0 -- traps from supervisor mode */
636#undef TABLE
637#ifdef __STDC__
638#define	TABLE(name)	nucleus_ ## name
639#else
640#define	TABLE(name)	nucleus_/**/name
641#endif
642trapbase_priv:
643	UTRAP(0x000)			! 000 = reserved -- Use it to boot
644	/* We should not get the next 5 traps */
645	UTRAP(0x001)			! 001 = POR Reset -- ROM should get this
646	UTRAP(0x002)			! 002 = WDR Watchdog -- ROM should get this
647	UTRAP(0x003)			! 003 = XIR -- ROM should get this
648	UTRAP(0x004)			! 004 = SIR -- ROM should get this
649	UTRAP(0x005)			! 005 = RED state exception
650	UTRAP(0x006); UTRAP(0x007)
651ktextfault:
652	VTRAP(T_INST_EXCEPT, textfault)	! 008 = instr. access except
653	VTRAP(T_TEXTFAULT, textfault)	! 009 = instr access MMU miss -- no MMU
654	VTRAP(T_INST_ERROR, textfault)	! 00a = instr. access err
655	UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f)
656	TRAP(T_ILLINST)			! 010 = illegal instruction
657	TRAP(T_PRIVINST)		! 011 = privileged instruction
658	UTRAP(0x012)			! 012 = unimplemented LDD
659	UTRAP(0x013)			! 013 = unimplemented STD
660	UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018)
661	UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d)
662	UTRAP(0x01e); UTRAP(0x01f)
663	TRAP(T_FPDISABLED)		! 020 = fp instr, but EF bit off in psr
664	TRAP(T_FP_IEEE_754)		! 021 = ieee 754 exception
665	TRAP(T_FP_OTHER)		! 022 = other fp exception
666	TRAP(T_TAGOF)			! 023 = tag overflow
667	clr	%l0
668#ifdef DEBUG
669	set	0xbadbeef, %l0		! DEBUG
670#endif
671	mov %l0, %l1; mov %l0, %l2	! 024-027 = clean window trap
672	rdpr %cleanwin, %o7		!	This handler is in-lined and cannot fault
673	inc %o7; mov %l0, %l3		!       Nucleus (trap&IRQ) code does not need clean windows
674	wrpr %g0, %o7, %cleanwin	!	Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done
675#ifdef NOT_DEBUG
676	!!
677	!! Check the sp redzone
678	!!
679	rdpr	%wstate, t1
680	cmp	t1, WSTATE_KERN
681	bne,pt	icc, 7f
682	 sethi	%hi(_C_LABEL(redzone)), t1
683	ldx	[t1 + %lo(_C_LABEL(redzone))], t2
684	cmp	%sp, t2			! if sp >= t2, not in red zone
685	blu	panic_red		! and can continue normally
6867:
687#endif
688	mov %l0, %l4; mov %l0, %l5; mov %l0, %l6; mov %l0, %l7
689	mov %l0, %o0; mov %l0, %o1; mov %l0, %o2; mov %l0, %o3
690
691	mov %l0, %o4; mov %l0, %o5; mov %l0, %o6; mov %l0, %o7
692	CLRTT
693	retry; nop; TA32
694	TRAP(T_DIV0)			! 028 = divide by zero
695	UTRAP(0x029)			! 029 = internal processor error
696	UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f)
697kdatafault:
698	VTRAP(T_DATAFAULT, winfault)	! 030 = data fetch fault
699	UTRAP(0x031)			! 031 = data MMU miss -- no MMU
700	VTRAP(T_DATA_ERROR, winfault)	! 032 = data fetch fault
701	VTRAP(T_DATA_PROT, winfault)	! 033 = data fetch fault
702	VTRAP(T_ALIGN, checkalign)	! 034 = address alignment error -- we could fix it inline...
703	TRAP(T_LDDF_ALIGN)		! 035 = LDDF address alignment error -- we could fix it inline...
704	TRAP(T_STDF_ALIGN)		! 036 = STDF address alignment error -- we could fix it inline...
705	TRAP(T_PRIVACT)			! 037 = privileged action
706	UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c);
707	UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f);
708	VTRAP(T_ASYNC_ERROR, winfault)	! 040 = data fetch fault
709	SOFTINT4U(1, IE_L1)		! 041 = level 1 interrupt
710	HARDINT4U(2)			! 042 = level 2 interrupt
711	HARDINT4U(3)			! 043 = level 3 interrupt
712	SOFTINT4U(4, IE_L4)		! 044 = level 4 interrupt
713	HARDINT4U(5)			! 045 = level 5 interrupt
714	SOFTINT4U(6, IE_L6)		! 046 = level 6 interrupt
715	HARDINT4U(7)			! 047 = level 7 interrupt
716	HARDINT4U(8)			! 048 = level 8 interrupt
717	HARDINT4U(9)			! 049 = level 9 interrupt
718	HARDINT4U(10)			! 04a = level 10 interrupt
719	HARDINT4U(11)			! 04b = level 11 interrupt
720	ZS_INTERRUPT4U			! 04c = level 12 (zs) interrupt
721	HARDINT4U(13)			! 04d = level 13 interrupt
722	HARDINT4U(14)			! 04e = level 14 interrupt
723	HARDINT4U(15)			! 04f = nonmaskable interrupt
724	UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055)
725	UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b)
726	UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f)
727	VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector
728	TRAP(T_PA_WATCHPT)		! 061 = physical address data watchpoint
729	TRAP(T_VA_WATCHPT)		! 062 = virtual address data watchpoint
730	TRAP(T_ECCERR)			! 063 = corrected ECC error
731kfast_IMMU_miss:			! 064 = fast instr access MMU miss
732	ldxa	[%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
733#ifdef NO_TSB
734	ba,a	%icc, instr_miss
735#endif
736	ldxa	[%g0] ASI_IMMU, %g1	! Load IMMU tag target register
737	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag:data into %g4:%g5
738	brgez,pn %g5, instr_miss	! Entry invalid?  Punt
739	 cmp	%g1, %g4		! Compare TLB tags
740	bne,pn %xcc, instr_miss		! Got right tag?
741	 nop
742	CLRTT
743	stxa	%g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
744	retry				! Try new mapping
7451:
746	sir
747	TA32
748kfast_DMMU_miss:			! 068 = fast data access MMU miss
749	ldxa	[%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
750#ifdef NO_TSB
751	ba,a	%icc, data_miss
752#endif
753	ldxa	[%g0] ASI_DMMU, %g1	! Load DMMU tag target register
754	ldda	[%g2] ASI_NUCLEUS_QUAD_LDD, %g4	! Load TSB tag and data into %g4 and %g5
755	brgez,pn %g5, data_miss		! Entry invalid?  Punt
756	 cmp	%g1, %g4		! Compare TLB tags
757	bnz,pn	%xcc, data_miss		! Got right tag?
758	 nop
759	CLRTT
760#ifdef TRAPSTATS
761	sethi	%hi(_C_LABEL(kdhit)), %g1
762	lduw	[%g1+%lo(_C_LABEL(kdhit))], %g2
763	inc	%g2
764	stw	%g2, [%g1+%lo(_C_LABEL(kdhit))]
765#endif
766	stxa	%g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
767	retry				! Try new mapping
7681:
769	sir
770	TA32
771kfast_DMMU_protection:			! 06c = fast data access MMU protection
772#ifdef TRAPSTATS
773	sethi	%hi(_C_LABEL(kdprot)), %g1
774	lduw	[%g1+%lo(_C_LABEL(kdprot))], %g2
775	inc	%g2
776	stw	%g2, [%g1+%lo(_C_LABEL(kdprot))]
777#endif
778#ifdef HWREF
779	ba,a,pt	%xcc, dmmu_write_fault
780#else
781	ba,a,pt	%xcc, winfault
782#endif
783	nop
784	TA32
785	UTRAP(0x070)			! Implementation dependent traps
786	UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076)
787	UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c)
788	UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f)
789TABLE(uspill):
790	SPILL64(1,ASI_AIUS)		! 0x080 spill_0_normal -- used to save user windows
791	SPILL32(2,ASI_AIUS)		! 0x084 spill_1_normal
792	SPILLBOTH(1b,2b,ASI_AIUS)	! 0x088 spill_2_normal
793	UTRAP(0x08c); TA32		! 0x08c spill_3_normal
794TABLE(kspill):
795	SPILL64(1,ASI_N)		! 0x090 spill_4_normal -- used to save supervisor windows
796	SPILL32(2,ASI_N)		! 0x094 spill_5_normal
797	SPILLBOTH(1b,2b,ASI_N)		! 0x098 spill_6_normal
798	UTRAP(0x09c); TA32		! 0x09c spill_7_normal
799TABLE(uspillk):
800	SPILL64(1,ASI_AIUS)		! 0x0a0 spill_0_other -- used to save user windows in nucleus mode
801	SPILL32(2,ASI_AIUS)		! 0x0a4 spill_1_other
802	SPILLBOTH(1b,2b,ASI_AIUS)	! 0x0a8 spill_2_other
803	UTRAP(0x0ac); TA32		! 0x0ac spill_3_other
804	UTRAP(0x0b0); TA32		! 0x0b0 spill_4_other
805	UTRAP(0x0b4); TA32		! 0x0b4 spill_5_other
806	UTRAP(0x0b8); TA32		! 0x0b8 spill_6_other
807	UTRAP(0x0bc); TA32		! 0x0bc spill_7_other
808TABLE(ufill):
809	FILL64(nufill8,ASI_AIUS)	! 0x0c0 fill_0_normal -- used to fill windows when running nucleus mode from user
810	FILL32(nufill4,ASI_AIUS)	! 0x0c4 fill_1_normal
811	FILLBOTH(nufill8,nufill4,ASI_AIUS) ! 0x0c8 fill_2_normal
812	UTRAP(0x0cc); TA32		! 0x0cc fill_3_normal
813TABLE(sfill):
814	FILL64(sfill8,ASI_N)		! 0x0d0 fill_4_normal -- used to fill windows when running nucleus mode from supervisor
815	FILL32(sfill4,ASI_N)		! 0x0d4 fill_5_normal
816	FILLBOTH(sfill8,sfill4,ASI_N)	! 0x0d8 fill_6_normal
817	UTRAP(0x0dc); TA32		! 0x0dc fill_7_normal
818TABLE(kfill):
819	FILL64(nkfill8,ASI_AIUS)	! 0x0e0 fill_0_other -- used to fill user windows when running nucleus mode -- will we ever use this?
820	FILL32(nkfill4,ASI_AIUS)	! 0x0e4 fill_1_other
821	FILLBOTH(nkfill8,nkfill4,ASI_AIUS)! 0x0e8 fill_2_other
822	UTRAP(0x0ec); TA32		! 0x0ec fill_3_other
823	UTRAP(0x0f0); TA32		! 0x0f0 fill_4_other
824	UTRAP(0x0f4); TA32		! 0x0f4 fill_5_other
825	UTRAP(0x0f8); TA32		! 0x0f8 fill_6_other
826	UTRAP(0x0fc); TA32		! 0x0fc fill_7_other
827TABLE(syscall):
828	SYSCALL				! 0x100 = sun syscall
829	BPT				! 0x101 = pseudo breakpoint instruction
830	STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107)
831	SYSCALL				! 0x108 = svr4 syscall
832	SYSCALL				! 0x109 = bsd syscall
833	BPT_KGDB_EXEC			! 0x10a = enter kernel gdb on kernel startup
834	STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f);
835	STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117)
836	STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f)
837	STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127)
838	STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f)
839	STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137)
840	STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f)
841	STRAP(0x140); STRAP(0x141); STRAP(0x142); STRAP(0x143); STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147)
842	STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f)
843	STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157)
844	STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f)
845	STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167)
846	STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f)
847	STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177)
848	STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f)
849	! Traps beyond 0x17f are reserved
850	UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187)
851	UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f)
852	UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197)
853	UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f)
854	UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7)
855	UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af)
856	UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7)
857	UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf)
858	UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7)
859	UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf)
860	UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7)
861	UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df)
862	UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7)
863	UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef)
864	UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7)
865	UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff)
866
867#if 0
868/*
869 * If the cleanwin trap handler detects an overfow we come here.
870 * We need to fix up the window registers, switch to the interrupt
871 * stack, and then trap to the debugger.
872 */
873cleanwin_overflow:
874	!! We've already incremented %cleanwin
875	!! So restore %cwp
876	rdpr	%cwp, %l0
877	dec	%l0
878	wrpr	%l0, %g0, %cwp
879	set	EINTSTACK-STKB-CC64FSZ, %l0
880	save	%l0, 0, %sp
881
882	ta	1		! Enter debugger
883	sethi	%hi(1f), %o0
884	call	_C_LABEL(panic)
885	 or	%o0, %lo(1f), %o0
886	restore
887	retry
888	.data
8891:
890	.asciz	"Kernel stack overflow!"
891	_ALIGN
892	.text
893#endif
894
895#ifdef NOTDEF_DEBUG
896/*
897 * A hardware red zone is impossible.  We simulate one in software by
898 * keeping a `red zone' pointer; if %sp becomes less than this, we panic.
899 * This is expensive and is only enabled when debugging.
900 */
901#define	REDSIZE	(PCB_SIZE)	/* Mark used portion of pcb structure out of bounds */
902#define	REDSTACK 2048		/* size of `panic: stack overflow' region */
903	.data
904	_ALIGN
905redzone:
906	.xword	_C_LABEL(XXX) + REDSIZE
907redstack:
908	.space	REDSTACK
909eredstack:
910Lpanic_red:
911	.asciz	"kernel stack overflow"
912	_ALIGN
913	.text
914
915	/* set stack pointer redzone to base+minstack; alters base */
916#define	SET_SP_REDZONE(base, tmp) \
917	add	base, REDSIZE, base; \
918	sethi	%hi(_C_LABEL(redzone)), tmp; \
919	stx	base, [tmp + %lo(_C_LABEL(redzone))]
920
921	/* variant with a constant */
922#define	SET_SP_REDZONE_CONST(const, tmp1, tmp2) \
923	set	(const) + REDSIZE, tmp1; \
924	sethi	%hi(_C_LABEL(redzone)), tmp2; \
925	stx	tmp1, [tmp2 + %lo(_C_LABEL(redzone))]
926
927	/* check stack pointer against redzone (uses two temps) */
928#define	CHECK_SP_REDZONE(t1, t2) \
929	sethi	KERNBASE, t1;	\
930	cmp	%sp, t1;	\
931	blu,pt	%xcc, 7f;	\
932	 sethi	%hi(_C_LABEL(redzone)), t1; \
933	ldx	[t1 + %lo(_C_LABEL(redzone))], t2; \
934	cmp	%sp, t2;	/* if sp >= t2, not in red zone */ \
935	blu	panic_red; nop;	/* and can continue normally */ \
9367:
937
938panic_red:
939	/* move to panic stack */
940	stx	%g0, [t1 + %lo(_C_LABEL(redzone))];
941	set	eredstack - BIAS, %sp;
942	/* prevent panic() from lowering ipl */
943	sethi	%hi(_C_LABEL(panicstr)), t2;
944	set	Lpanic_red, t2;
945	st	t2, [t1 + %lo(_C_LABEL(panicstr))];
946	wrpr	g0, 15, %pil		/* t1 = splhigh() */
947	save	%sp, -CCF64SZ, %sp;	/* preserve current window */
948	sethi	%hi(Lpanic_red), %o0;
949	call	_C_LABEL(panic);
950	 or %o0, %lo(Lpanic_red), %o0;
951
952
953#else
954
955#define	SET_SP_REDZONE(base, tmp)
956#define	SET_SP_REDZONE_CONST(const, t1, t2)
957#define	CHECK_SP_REDZONE(t1, t2)
958#endif
959
960#define TRACESIZ	0x01000
961	.globl	_C_LABEL(trap_trace)
962	.globl	_C_LABEL(trap_trace_ptr)
963	.globl	_C_LABEL(trap_trace_end)
964	.globl	_C_LABEL(trap_trace_dis)
965	.data
966_C_LABEL(trap_trace_dis):
967	.word	1, 1		! Starts disabled.  DDB turns it on.
968_C_LABEL(trap_trace_ptr):
969	.word	0, 0, 0, 0
970_C_LABEL(trap_trace):
971	.space	TRACESIZ
972_C_LABEL(trap_trace_end):
973	.space	0x20		! safety margin
974
975
976/*
977 * v9 machines do not have a trap window.
978 *
979 * When we take a trap the trap state is pushed on to the stack of trap
980 * registers, interrupts are disabled, then we switch to an alternate set
981 * of global registers.
982 *
983 * The trap handling code needs to allocate a trap frame on the kernel, or
984 * for interrupts, the interrupt stack, save the out registers to the trap
985 * frame, then switch to the normal globals and save them to the trap frame
986 * too.
987 *
988 * XXX it would be good to save the interrupt stack frame to the kernel
989 * stack so we wouldn't have to copy it later if we needed to handle a AST.
990 *
991 * Since kernel stacks are all on one page and the interrupt stack is entirely
992 * within the locked TLB, we can use physical addressing to save out our
993 * trap frame so we don't trap during the TRAP_SETUP() operation.  There
994 * is unfortunately no supportable method for issuing a non-trapping save.
995 *
996 * However, if we use physical addresses to save our trapframe, we will need
997 * to clear out the data cache before continuing much further.
998 *
999 * In short, what we need to do is:
1000 *
1001 *	all preliminary processing is done using the alternate globals
1002 *
1003 *	When we allocate our trap windows we must give up our globals because
1004 *	their state may have changed during the save operation
1005 *
1006 *	we need to save our normal globals as soon as we have a stack
1007 *
1008 * Finally, we may now call C code.
1009 *
1010 * This macro will destroy %g5-%g7.  %g0-%g4 remain unchanged.
1011 *
1012 * In order to properly handle nested traps without lossage, alternate
1013 * global %g6 is used as a kernel stack pointer.  It is set to the last
1014 * allocated stack pointer (trapframe) and the old value is stored in
1015 * tf_kstack.  It is restored when returning from a trap.  It is cleared
1016 * on entering user mode.
1017 */
1018
1019 /*
1020  * Other misc. design criteria:
1021  *
1022  * When taking an address fault, fault info is in the sfsr, sfar,
1023  * TLB_TAG_ACCESS registers.  If we take another address fault
1024  * while trying to handle the first fault then that information,
1025  * the only information that tells us what address we trapped on,
1026  * can potentially be lost.  This trap can be caused when allocating
1027  * a register window with which to handle the trap because the save
1028  * may try to store or restore a register window that corresponds
1029  * to part of the stack that is not mapped.  Preventing this trap,
1030  * while possible, is much too complicated to do in a trap handler,
1031  * and then we will need to do just as much work to restore the processor
1032  * window state.
1033  *
1034  * Possible solutions to the problem:
1035  *
1036  * Since we have separate AG, MG, and IG, we could have all traps
1037  * above level-1 preserve AG and use other registers.  This causes
1038  * a problem for the return from trap code which is coded to use
1039  * alternate globals only.
1040  *
1041  * We could store the trapframe and trap address info to the stack
1042  * using physical addresses.  Then we need to read it back using
1043  * physical addressing, or flush the D$.
1044  *
1045  * We could identify certain registers to hold address fault info.
1046  * this means that these registers need to be preserved across all
1047  * fault handling.  But since we only have 7 useable globals, that
1048  * really puts a cramp in our style.
1049  *
1050  * Finally, there is the issue of returning from kernel mode to user
1051  * mode.  If we need to issue a restore of a user window in kernel
1052  * mode, we need the window control registers in a user mode setup.
1053  * If the trap handlers notice the register windows are in user mode,
1054  * they will allocate a trapframe at the bottom of the kernel stack,
1055  * overwriting the frame we were trying to return to.  This means that
1056  * we must complete the restoration of all registers *before* switching
1057  * to a user-mode window configuration.
1058  *
1059  * Essentially we need to be able to write re-entrant code w/no stack.
1060  */
1061	.data
1062trap_setup_msg:
1063	.asciz	"TRAP_SETUP: tt=%x osp=%x nsp=%x tl=%x tpc=%x\n"
1064	_ALIGN
1065intr_setup_msg:
1066	.asciz	"INTR_SETUP: tt=%x osp=%x nsp=%x tl=%x tpc=%x\n"
1067	_ALIGN
1068	.text
1069
1070#ifdef DEBUG
1071	/* Only save a snapshot of locals and ins in DEBUG kernels */
1072#define	SAVE_LOCALS_INS	\
1073	/* Save local registers to trap frame */ \
1074	stx	%l0, [%g6 + CC64FSZ + STKB + TF_L + (0*8)]; \
1075	stx	%l1, [%g6 + CC64FSZ + STKB + TF_L + (1*8)]; \
1076	stx	%l2, [%g6 + CC64FSZ + STKB + TF_L + (2*8)]; \
1077	stx	%l3, [%g6 + CC64FSZ + STKB + TF_L + (3*8)]; \
1078	stx	%l4, [%g6 + CC64FSZ + STKB + TF_L + (4*8)]; \
1079	stx	%l5, [%g6 + CC64FSZ + STKB + TF_L + (5*8)]; \
1080	stx	%l6, [%g6 + CC64FSZ + STKB + TF_L + (6*8)]; \
1081	stx	%l7, [%g6 + CC64FSZ + STKB + TF_L + (7*8)]; \
1082\
1083	/* Save in registers to trap frame */ \
1084	stx	%i0, [%g6 + CC64FSZ + STKB + TF_I + (0*8)]; \
1085	stx	%i1, [%g6 + CC64FSZ + STKB + TF_I + (1*8)]; \
1086	stx	%i2, [%g6 + CC64FSZ + STKB + TF_I + (2*8)]; \
1087	stx	%i3, [%g6 + CC64FSZ + STKB + TF_I + (3*8)]; \
1088	stx	%i4, [%g6 + CC64FSZ + STKB + TF_I + (4*8)]; \
1089	stx	%i5, [%g6 + CC64FSZ + STKB + TF_I + (5*8)]; \
1090	stx	%i6, [%g6 + CC64FSZ + STKB + TF_I + (6*8)]; \
1091	stx	%i7, [%g6 + CC64FSZ + STKB + TF_I + (7*8)]; \
1092\
1093	stx	%g1, [%g6 + CC64FSZ + STKB + TF_FAULT];
1094#else
1095#define	SAVE_LOCALS_INS
1096#endif
1097
1098#ifdef _LP64
1099#define	FIXUP_TRAP_STACK \
1100	btst	1, %g6;						/* Fixup 64-bit stack if necessary */ \
1101	bnz,pt	%icc, 1f; \
1102	 add	%g6, %g5, %g6;					/* Allocate a stack frame */ \
1103	inc	-BIAS, %g6; \
11041:
1105#else
1106#define	FIXUP_TRAP_STACK \
1107	srl	%g6, 0, %g6;					/* truncate at 32-bits */ \
1108	btst	1, %g6;						/* Fixup 64-bit stack if necessary */ \
1109	add	%g6, %g5, %g6;					/* Allocate a stack frame */ \
1110	add	%g6, BIAS, %g5; \
1111	movne	%icc, %g5, %g6;
1112#endif
1113
1114#ifdef _LP64
1115#define	TRAP_SETUP(stackspace) \
1116	sethi	%hi(CPCB), %g6; \
1117	sethi	%hi((stackspace)), %g5; \
1118	LDPTR	[%g6 + %lo(CPCB)], %g6; \
1119	sethi	%hi(USPACE), %g7;				/* Always multiple of page size */ \
1120	or	%g5, %lo((stackspace)), %g5; \
1121	add	%g6, %g7, %g6; \
1122	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
1123	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
1124	\
1125	sub	%g7, WSTATE_KERN, %g7;				/* Compare & leave in register */ \
1126	movrz	%g7, %sp, %g6;					/* Select old (kernel) stack or base of kernel stack */ \
1127	FIXUP_TRAP_STACK \
1128	SAVE_LOCALS_INS	\
1129	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
1130	stx	%i0, [%sp + CC64FSZ + BIAS + TF_O + (0*8)];		/* Save out registers to trap frame */ \
1131	stx	%i1, [%sp + CC64FSZ + BIAS + TF_O + (1*8)]; \
1132	stx	%i2, [%sp + CC64FSZ + BIAS + TF_O + (2*8)]; \
1133	stx	%i3, [%sp + CC64FSZ + BIAS + TF_O + (3*8)]; \
1134	stx	%i4, [%sp + CC64FSZ + BIAS + TF_O + (4*8)]; \
1135	stx	%i5, [%sp + CC64FSZ + BIAS + TF_O + (5*8)]; \
1136\
1137	stx	%i6, [%sp + CC64FSZ + BIAS + TF_O + (6*8)]; \
1138	brz,pt	%g7, 1f;					/* If we were in kernel mode start saving globals */ \
1139	 stx	%i7, [%sp + CC64FSZ + BIAS + TF_O + (7*8)]; \
1140	mov	CTX_PRIMARY, %g7; \
1141	/* came from user mode -- switch to kernel mode stack */ \
1142	rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
1143	wrpr	%g0, 0, %canrestore; \
1144	wrpr	%g0, %g5, %otherwin; \
1145	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
1146\
1147	stxa	%g0, [%g7] ASI_DMMU; 				/* Switch MMU to kernel primary context */ \
1148	sethi	%hi(KERNBASE), %g5; \
1149	flush	%g5;						/* Some convenient address that won't trap */ \
11501:
1151
1152/*
1153 * Interrupt setup is almost exactly like trap setup, but we need to
1154 * go to the interrupt stack if (a) we came from user mode or (b) we
1155 * came from kernel mode on the kernel stack.
1156 *
1157 * We don't guarantee any registers are preserved during this operation.
1158 * So we can be more efficient.
1159 */
1160#define	INTR_SETUP(stackspace) \
1161	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
1162	\
1163	sethi	%hi(EINTSTACK-BIAS), %g6; \
1164	sethi	%hi(EINTSTACK-INTSTACK), %g4; \
1165	\
1166	or	%g6, %lo(EINTSTACK-BIAS), %g6;			/* Base of interrupt stack */ \
1167	dec	%g4;						/* Make it into a mask */ \
1168	\
1169	sub	%g6, %sp, %g1;					/* Offset from interrupt stack */ \
1170	sethi	%hi((stackspace)), %g5; \
1171	\
1172	or	%g5, %lo((stackspace)), %g5; \
1173\
1174	andn	%g1, %g4, %g4;					/* Are we out of the interrupt stack range? */ \
1175	xor	%g7, WSTATE_KERN, %g3;				/* Are we on the user stack ? */ \
1176	\
1177	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
1178	orcc	%g3, %g4, %g0;					/* Definitely not off the interrupt stack */ \
1179	\
1180	sethi	%hi(CPUINFO_VA + CI_EINTSTACK), %g4; \
1181	bz,a,pt	%xcc, 1f; \
1182	 mov	%sp, %g6; \
1183	\
1184	ldx	[%g4 + %lo(CPUINFO_VA + CI_EINTSTACK)], %g4; \
1185	movrnz	%g4, %g4, %g6;					/* Use saved intr stack if exists */ \
1186	\
11871:	add	%g6, %g5, %g5;					/* Allocate a stack frame */ \
1188	btst	1, %g6; \
1189	bnz,pt	%icc, 1f; \
1190\
1191	 mov	%g5, %g6; \
1192	\
1193	add	%g5, -BIAS, %g6; \
1194	\
11951:	SAVE_LOCALS_INS	\
1196	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
1197	stx	%i0, [%sp + CC64FSZ + BIAS + TF_O + (0*8)];		/* Save out registers to trap frame */ \
1198	stx	%i1, [%sp + CC64FSZ + BIAS + TF_O + (1*8)]; \
1199	stx	%i2, [%sp + CC64FSZ + BIAS + TF_O + (2*8)]; \
1200	stx	%i3, [%sp + CC64FSZ + BIAS + TF_O + (3*8)]; \
1201	stx	%i4, [%sp + CC64FSZ + BIAS + TF_O + (4*8)]; \
1202\
1203	stx	%i5, [%sp + CC64FSZ + BIAS + TF_O + (5*8)]; \
1204	stx	%i6, [%sp + CC64FSZ + BIAS + TF_O + (6*8)]; \
1205	stx	%i6, [%sp + CC64FSZ + BIAS + TF_G + (0*8)];		/* Save fp in clockframe->cf_fp */ \
1206	brz,pt	%g3, 1f;					/* If we were in kernel mode start saving globals */ \
1207	 stx	%i7, [%sp + CC64FSZ + BIAS + TF_O + (7*8)]; \
1208	/* came from user mode -- switch to kernel mode stack */ \
1209	 rdpr	%otherwin, %g5;					/* Has this already been done? */ \
1210	\
1211	brnz,pn	%g5, 1f;					/* Don't set this twice */ \
1212	\
1213	 rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
1214\
1215	wrpr	%g0, 0, %canrestore; \
1216	\
1217	wrpr	%g0, %g5, %otherwin; \
1218	\
1219	sethi	%hi(KERNBASE), %g5; \
1220	mov	CTX_PRIMARY, %g7; \
1221	\
1222	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
1223	\
1224	stxa	%g0, [%g7] ASI_DMMU; 				/* Switch MMU to kernel primary context */ \
1225	\
1226	flush	%g5;						/* Some convenient address that won't trap */ \
12271:
1228
1229#else /* _LP64 */
1230
1231#define	TRAP_SETUP(stackspace) \
1232	sethi	%hi(CPCB), %g6; \
1233	sethi	%hi((stackspace)), %g5; \
1234	LDPTR	[%g6 + %lo(CPCB)], %g6; \
1235	sethi	%hi(USPACE), %g7; \
1236	or	%g5, %lo((stackspace)), %g5; \
1237	add	%g6, %g7, %g6; \
1238	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
1239	\
1240	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
1241	subcc	%g7, WSTATE_KERN, %g7;				/* Compare & leave in register */ \
1242	movz	%icc, %sp, %g6;					/* Select old (kernel) stack or base of kernel stack */ \
1243	FIXUP_TRAP_STACK \
1244	SAVE_LOCALS_INS \
1245	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
1246	stx	%i0, [%sp + CC64FSZ + STKB + TF_O + (0*8)];		/* Save out registers to trap frame */ \
1247	stx	%i1, [%sp + CC64FSZ + STKB + TF_O + (1*8)]; \
1248	stx	%i2, [%sp + CC64FSZ + STKB + TF_O + (2*8)]; \
1249	stx	%i3, [%sp + CC64FSZ + STKB + TF_O + (3*8)]; \
1250	stx	%i4, [%sp + CC64FSZ + STKB + TF_O + (4*8)]; \
1251	stx	%i5, [%sp + CC64FSZ + STKB + TF_O + (5*8)]; \
1252	\
1253	stx	%i6, [%sp + CC64FSZ + STKB + TF_O + (6*8)]; \
1254	brz,pn	%g7, 1f;					/* If we were in kernel mode start saving globals */ \
1255	 stx	%i7, [%sp + CC64FSZ + STKB + TF_O + (7*8)]; \
1256	mov	CTX_PRIMARY, %g7; \
1257	/* came from user mode -- switch to kernel mode stack */ \
1258	rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
1259	wrpr	%g0, 0, %canrestore; \
1260	wrpr	%g0, %g5, %otherwin; \
1261	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
1262	\
1263	stxa	%g0, [%g7] ASI_DMMU; 				/* Switch MMU to kernel primary context */ \
1264	sethi	%hi(KERNBASE), %g5; \
1265	flush	%g5;						/* Some convenient address that won't trap */ \
12661:
1267
1268/*
1269 * Interrupt setup is almost exactly like trap setup, but we need to
1270 * go to the interrupt stack if (a) we came from user mode or (b) we
1271 * came from kernel mode on the kernel stack.
1272 *
1273 * We don't guarantee any registers are preserved during this operation.
1274 */
1275#define	INTR_SETUP(stackspace) \
1276	sethi	%hi(EINTSTACK), %g1; \
1277	sethi	%hi((stackspace)), %g5; \
1278	btst	1, %sp; \
1279	add	%sp, BIAS, %g6; \
1280	movz	%icc, %sp, %g6; \
1281	or	%g1, %lo(EINTSTACK), %g1; \
1282	srl	%g6, 0, %g6;					/* truncate at 32-bits */ \
1283	set	(EINTSTACK-INTSTACK), %g7; \
1284	or	%g5, %lo((stackspace)), %g5; \
1285	sub	%g1, %g6, %g2;					/* Determine if we need to switch to intr stack or not */ \
1286	dec	%g7;						/* Make it into a mask */ \
1287	sethi	%hi(CPUINFO_VA + CI_EINTSTACK), %g3; \
1288	andncc	%g2, %g7, %g0;					/* XXXXXXXXXX This assumes kernel addresses are unique from user addresses */ \
1289	LDPTR	[%g3 + %lo(CPUINFO_VA + CI_EINTSTACK)], %g3; \
1290	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
1291	movrnz	%g3, %g3, %g1;					/* Use saved intr stack if exists */ \
1292	sra	%g5, 0, %g5;					/* Sign extend the damn thing */ \
1293	movnz	%xcc, %g1, %g6;					/* Stay on interrupt stack? */ \
1294	cmp	%g7, WSTATE_KERN;				/* User or kernel sp? */ \
1295	movnz	%icc, %g1, %g6;					/* Stay on interrupt stack? */ \
1296	add	%g6, %g5, %g6;					/* Allocate a stack frame */ \
1297	\
1298	SAVE_LOCALS_INS \
1299	save	%g6, 0, %sp;					/* If we fault we should come right back here */ \
1300	stx	%i0, [%sp + CC64FSZ + STKB + TF_O + (0*8)];		/* Save out registers to trap frame */ \
1301	stx	%i1, [%sp + CC64FSZ + STKB + TF_O + (1*8)]; \
1302	stx	%i2, [%sp + CC64FSZ + STKB + TF_O + (2*8)]; \
1303	stx	%i3, [%sp + CC64FSZ + STKB + TF_O + (3*8)]; \
1304	stx	%i4, [%sp + CC64FSZ + STKB + TF_O + (4*8)]; \
1305	stx	%i5, [%sp + CC64FSZ + STKB + TF_O + (5*8)]; \
1306	stx	%i6, [%sp + CC64FSZ + STKB + TF_O + (6*8)]; \
1307	stx	%i6, [%sp + CC64FSZ + STKB + TF_G + (0*8)];		/* Save fp in clockframe->cf_fp */ \
1308	rdpr	%wstate, %g7;					/* Find if we're from user mode */ \
1309	stx	%i7, [%sp + CC64FSZ + STKB + TF_O + (7*8)]; \
1310	cmp	%g7, WSTATE_KERN;				/* Compare & leave in register */ \
1311	be,pn	%icc, 1f;					/* If we were in kernel mode start saving globals */ \
1312	/* came from user mode -- switch to kernel mode stack */ \
1313	 rdpr	%otherwin, %g5;					/* Has this already been done? */ \
1314	tst	%g5; tnz %xcc, 1; nop; /* DEBUG -- this should _NEVER_ happen */ \
1315	brnz,pn	%g5, 1f;					/* Don't set this twice */ \
1316	 rdpr	%canrestore, %g5;				/* Fixup register window state registers */ \
1317	wrpr	%g0, 0, %canrestore; \
1318	mov	CTX_PRIMARY, %g7; \
1319	wrpr	%g0, %g5, %otherwin; \
1320	sethi	%hi(KERNBASE), %g5; \
1321	wrpr	%g0, WSTATE_KERN, %wstate;			/* Enable kernel mode window traps -- now we can trap again */ \
1322	stxa	%g0, [%g7] ASI_DMMU; 				/* Switch MMU to kernel primary context */ \
1323	flush	%g5;						/* Some convenient address that won't trap */ \
13241:
1325#endif /* _LP64 */
1326
1327#ifdef DEBUG
1328
1329	/* Look up kpte to test algorithm */
1330	.globl	asmptechk
1331asmptechk:
1332	mov	%o0, %g4	! pmap->pm_segs
1333	mov	%o1, %g3	! Addr to lookup -- mind the context
1334
1335	srax	%g3, HOLESHIFT, %g5			! Check for valid address
1336	brz,pt	%g5, 0f					! Should be zero or -1
1337	 inc	%g5					! Make -1 -> 0
1338	brnz,pn	%g5, 1f					! Error!
13390:
1340	 srlx	%g3, STSHIFT, %g5
1341	and	%g5, STMASK, %g5
1342	sll	%g5, 3, %g5
1343	add	%g4, %g5, %g4
1344	DLFLUSH(%g4,%g5)
1345	ldxa	[%g4] ASI_PHYS_CACHED, %g4		! Remember -- UNSIGNED
1346	DLFLUSH2(%g5)
1347	brz,pn	%g4, 1f					! NULL entry? check somewhere else
1348
1349	 srlx	%g3, PDSHIFT, %g5
1350	and	%g5, PDMASK, %g5
1351	sll	%g5, 3, %g5
1352	add	%g4, %g5, %g4
1353	DLFLUSH(%g4,%g5)
1354	ldxa	[%g4] ASI_PHYS_CACHED, %g4		! Remember -- UNSIGNED
1355	DLFLUSH2(%g5)
1356	brz,pn	%g4, 1f					! NULL entry? check somewhere else
1357
1358	 srlx	%g3, PTSHIFT, %g5			! Convert to ptab offset
1359	and	%g5, PTMASK, %g5
1360	sll	%g5, 3, %g5
1361	add	%g4, %g5, %g4
1362	DLFLUSH(%g4,%g5)
1363	ldxa	[%g4] ASI_PHYS_CACHED, %g6
1364	DLFLUSH2(%g5)
1365	brgez,pn %g6, 1f				! Entry invalid?  Punt
1366	 srlx	%g6, 32, %o0
1367	retl
1368	 srl	%g6, 0, %o1
13691:
1370	mov	%g0, %o1
1371	retl
1372	 mov	%g0, %o0
1373
1374	.data
13752:
1376	.asciz	"asmptechk: %x %x %x %x:%x\r\n"
1377	_ALIGN
1378	.text
1379#endif
1380
1381/*
1382 * This is the MMU protection handler.  It's too big to fit
1383 * in the trap table so I moved it here.  It's relatively simple.
1384 * It looks up the page mapping in the page table associated with
1385 * the trapping context.  It checks to see if the S/W writable bit
1386 * is set.  If so, it sets the H/W write bit, marks the tte modified,
1387 * and enters the mapping into the MMU.  Otherwise it does a regular
1388 * data fault.
1389 */
1390	ICACHE_ALIGN
1391dmmu_write_fault:
1392	mov	TLB_TAG_ACCESS, %g3
1393	sethi	%hi(0x1fff), %g6			! 8K context mask
1394	ldxa	[%g3] ASI_DMMU, %g3			! Get fault addr from Tag Target
1395	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g4
1396	or	%g6, %lo(0x1fff), %g6
1397	LDPTR	[%g4 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g4
1398	srax	%g3, HOLESHIFT, %g5			! Check for valid address
1399	and	%g3, %g6, %g6				! Isolate context
1400
1401	inc	%g5					! (0 or -1) -> (1 or 0)
1402	sllx	%g6, 3, %g6				! Make it into an offset into ctxbusy
1403	ldx	[%g4+%g6], %g4				! Load up our page table.
1404	srlx	%g3, STSHIFT, %g6
1405	cmp	%g5, 1
1406	bgu,pn %xcc, winfix				! Error!
1407	 srlx	%g3, PDSHIFT, %g5
1408	and	%g6, STMASK, %g6
1409	sll	%g6, 3, %g6
1410
1411	and	%g5, PDMASK, %g5
1412	sll	%g5, 3, %g5
1413	add	%g6, %g4, %g4
1414	DLFLUSH(%g4,%g6)
1415	ldxa	[%g4] ASI_PHYS_CACHED, %g4
1416	DLFLUSH2(%g6)
1417	srlx	%g3, PTSHIFT, %g6			! Convert to ptab offset
1418	and	%g6, PTMASK, %g6
1419	add	%g5, %g4, %g5
1420	brz,pn	%g4, winfix				! NULL entry? check somewhere else
1421	 nop
1422
1423	ldxa	[%g5] ASI_PHYS_CACHED, %g4
1424	sll	%g6, 3, %g6
1425	brz,pn	%g4, winfix				! NULL entry? check somewhere else
1426	 add	%g6, %g4, %g6
14271:
1428	ldxa	[%g6] ASI_PHYS_CACHED, %g4
1429	brgez,pn %g4, winfix				! Entry invalid?  Punt
1430	 or	%g4, TTE_MODIFY|TTE_ACCESS|TTE_W, %g7	! Update the modified bit
1431
1432	btst	TTE_REAL_W|TTE_W, %g4			! Is it a ref fault?
1433	bz,pn	%xcc, winfix				! No -- really fault
1434#ifdef DEBUG
1435	/* Make sure we don't try to replace a kernel translation */
1436	/* This should not be necessary */
1437	sllx	%g3, 64-13, %g2				! Isolate context bits
1438	sethi	%hi(KERNBASE), %g5			! Don't need %lo
1439	brnz,pt	%g2, 0f					! Ignore context != 0
1440	 set	0x0800000, %g2				! 8MB
1441	sub	%g3, %g5, %g5
1442	cmp	%g5, %g2
1443	tlu	%xcc, 1; nop
1444	blu,pn	%xcc, winfix				! Next insn in delay slot is unimportant
14450:
1446#endif
1447	/* Need to check for and handle large pages. */
1448	 srlx	%g4, 61, %g5				! Isolate the size bits
1449	ldxa	[%g0] ASI_DMMU_8KPTR, %g2		! Load DMMU 8K TSB pointer
1450	andcc	%g5, 0x3, %g5				! 8K?
1451	bnz,pn	%icc, winfix				! We punt to the pmap code since we can't handle policy
1452	 ldxa	[%g0] ASI_DMMU, %g1			! Load DMMU tag target register
1453	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and write it out
1454	membar	#StoreLoad
1455	cmp	%g4, %g7
1456	bne,pn	%xcc, 1b
1457	 or	%g4, TTE_MODIFY|TTE_ACCESS|TTE_W, %g4	! Update the modified bit
1458	stx	%g1, [%g2]				! Update TSB entry tag
1459	mov	SFSR, %g7
1460	stx	%g4, [%g2+8]				! Update TSB entry data
1461	nop
1462
1463#ifdef TRAPSTATS
1464	sethi	%hi(_C_LABEL(protfix)), %g1
1465	lduw	[%g1+%lo(_C_LABEL(protfix))], %g2
1466	inc	%g2
1467	stw	%g2, [%g1+%lo(_C_LABEL(protfix))]
1468#endif
1469	mov	DEMAP_PAGE_SECONDARY, %g1		! Secondary flush
1470	mov	DEMAP_PAGE_NUCLEUS, %g5			! Nucleus flush
1471	stxa	%g0, [%g7] ASI_DMMU			! clear out the fault
1472	sllx	%g3, (64-13), %g7			! Need to demap old entry first
1473	andn	%g3, 0xfff, %g6
1474	movrz	%g7, %g5, %g1				! Pick one
1475	or	%g6, %g1, %g6
1476	membar	#Sync
1477	stxa	%g6, [%g6] ASI_DMMU_DEMAP		! Do the demap
1478	membar	#Sync
1479
1480	stxa	%g4, [%g0] ASI_DMMU_DATA_IN		! Enter new mapping
1481	membar	#Sync
1482	retry
1483
1484/*
1485 * Each memory data access fault from a fast access miss handler comes here.
1486 * We will quickly check if this is an original prom mapping before going
1487 * to the generic fault handler
1488 *
1489 * We will assume that %pil is not lost so we won't bother to save it
1490 * unless we're in an interrupt handler.
1491 *
1492 * On entry:
1493 *	We are on one of the alternate set of globals
1494 *	%g1 = MMU tag target
1495 *	%g2 = 8Kptr
1496 *	%g3 = TLB TAG ACCESS
1497 *
1498 * On return:
1499 *
1500 */
1501	ICACHE_ALIGN
1502data_miss:
1503#ifdef TRAPSTATS
1504	set	_C_LABEL(kdmiss), %g3
1505	set	_C_LABEL(udmiss), %g4
1506	rdpr	%tl, %g6
1507	dec	%g6
1508	movrz	%g6, %g4, %g3
1509	lduw	[%g3], %g4
1510	inc	%g4
1511	stw	%g4, [%g3]
1512#endif
1513	mov	TLB_TAG_ACCESS, %g3			! Get real fault page
1514	sethi	%hi(0x1fff), %g6			! 8K context mask
1515	ldxa	[%g3] ASI_DMMU, %g3			! from tag access register
1516	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g4
1517	or	%g6, %lo(0x1fff), %g6
1518	LDPTR	[%g4 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g4
1519	srax	%g3, HOLESHIFT, %g5			! Check for valid address
1520	and	%g3, %g6, %g6				! Isolate context
1521
1522	inc	%g5					! (0 or -1) -> (1 or 0)
1523	sllx	%g6, 3, %g6				! Make it into an offset into ctxbusy
1524	ldx	[%g4+%g6], %g4				! Load up our page table.
1525#ifdef DEBUG
1526	/* Make sure we don't try to replace a kernel translation */
1527	/* This should not be necessary */
1528	brnz,pt	%g6, 1f			! If user context continue miss
1529	sethi	%hi(KERNBASE), %g7			! Don't need %lo
1530	set	0x0800000, %g6				! 8MB
1531	sub	%g3, %g7, %g7
1532	cmp	%g7, %g6
1533	tlu	%xcc, 1; nop
15341:
1535#endif
1536	srlx	%g3, STSHIFT, %g6
1537	cmp	%g5, 1
1538	bgu,pn %xcc, winfix				! Error!
1539	 srlx	%g3, PDSHIFT, %g5
1540	and	%g6, STMASK, %g6
1541
1542	sll	%g6, 3, %g6
1543	and	%g5, PDMASK, %g5
1544	sll	%g5, 3, %g5
1545	add	%g6, %g4, %g4
1546	ldxa	[%g4] ASI_PHYS_CACHED, %g4
1547	srlx	%g3, PTSHIFT, %g6			! Convert to ptab offset
1548	and	%g6, PTMASK, %g6
1549	add	%g5, %g4, %g5
1550	brz,pn	%g4, data_nfo				! NULL entry? check somewhere else
1551
1552	 nop
1553	ldxa	[%g5] ASI_PHYS_CACHED, %g4
1554	sll	%g6, 3, %g6
1555	brz,pn	%g4, data_nfo				! NULL entry? check somewhere else
1556	 add	%g6, %g4, %g6
1557
15581:
1559	ldxa	[%g6] ASI_PHYS_CACHED, %g4
1560	brgez,pn %g4, data_nfo				! Entry invalid?  Punt
1561	 or	%g4, TTE_ACCESS, %g7			! Update the access bit
1562
1563	btst	TTE_ACCESS, %g4				! Need to update access git?
1564	bne,pt	%xcc, 1f
1565	 nop
1566	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and write it out
1567	cmp	%g4, %g7
1568	bne,pn	%xcc, 1b
1569	 or	%g4, TTE_ACCESS, %g4			! Update the access bit
1570
15711:
1572	stx	%g1, [%g2]				! Update TSB entry tag
1573	stx	%g4, [%g2+8]				! Update TSB entry data
1574	stxa	%g4, [%g0] ASI_DMMU_DATA_IN		! Enter new mapping
1575	membar	#Sync
1576	CLRTT
1577	retry
1578	NOTREACHED
1579/*
1580 * We had a data miss but did not find a mapping.  Insert
1581 * a NFO mapping to satisfy speculative loads and return.
1582 * If this had been a real load, it will re-execute and
1583 * result in a data fault or protection fault rather than
1584 * a TLB miss.  We insert an 8K TTE with the valid and NFO
1585 * bits set.  All others should zero.  The TTE looks like this:
1586 *
1587 *	0x9000000000000000
1588 *
1589 */
1590data_nfo:
1591	sethi	%hi(0x90000000), %g4			! V(0x8)|NFO(0x1)
1592	sllx	%g4, 32, %g4
1593	stxa	%g4, [%g0] ASI_DMMU_DATA_IN		! Enter new mapping
1594	membar	#Sync
1595	CLRTT
1596	retry
1597
1598/*
1599 * Handler for making the trap window shiny clean.
1600 *
1601 * If the store that trapped was to a kernel address, panic.
1602 *
1603 * If the store that trapped was to a user address, stick it in the PCB.
1604 * Since we don't want to force user code to use the standard register
1605 * convention if we don't have to, we will not assume that %fp points to
1606 * anything valid.
1607 *
1608 * On entry:
1609 *	We are on one of the alternate set of globals
1610 *	%g1 = %tl - 1, tstate[tl-1], scratch	- local
1611 *	%g2 = %tl				- local
1612 *	%g3 = MMU tag access			- in
1613 *	%g4 = %cwp				- local
1614 *	%g5 = scratch				- local
1615 *	%g6 = cpcb				- local
1616 *	%g7 = scratch				- local
1617 *
1618 * On return:
1619 *
1620 * NB:	 remove most of this from main codepath & cleanup I$
1621 */
1622winfault:
1623	mov	TLB_TAG_ACCESS, %g3	! Get real fault page from tag access register
1624	ldxa	[%g3] ASI_DMMU, %g3	! And put it into the non-MMU alternate regs
1625winfix:
1626	rdpr	%tl, %g2
1627	subcc	%g2, 1, %g1
1628	ble,pt	%icc, datafault		! Don't go below trap level 1
1629	 sethi	%hi(CPCB), %g6		! get current pcb
1630
1631
1632	wrpr	%g1, 0, %tl		! Pop a trap level
1633	rdpr	%tt, %g7		! Read type of prev. trap
1634	rdpr	%tstate, %g4		! Try to restore prev %cwp if we were executing a restore
1635	andn	%g7, 0x3f, %g5		!   window fill traps are all 0b 0000 11xx xxxx
1636
1637#if 1
1638	cmp	%g7, 0x30		! If we took a datafault just before this trap
1639	bne,pt	%icc, winfixfill	! our stack's probably bad so we need to switch somewhere else
1640	 nop
1641
1642	!!
1643	!! Double data fault -- bad stack?
1644	!!
1645	wrpr	%g2, %tl		! Restore trap level.
1646	sir				! Just issue a reset and don't try to recover.
1647	mov	%fp, %l6		! Save the frame pointer
1648	set	EINTSTACK+USPACE+CC64FSZ-STKB, %fp ! Set the frame pointer to the middle of the idle stack
1649	add	%fp, -CC64FSZ, %sp	! Create a stackframe
1650	wrpr	%g0, 15, %pil		! Disable interrupts, too
1651	wrpr	%g0, %g0, %canrestore	! Our stack is hozed and our PCB
1652	wrpr	%g0, 7, %cansave	!  probably is too, so blow away
1653	ba	slowtrap		!  all our register windows.
1654	 wrpr	%g0, 0x101, %tt
1655#endif
1656
1657winfixfill:
1658	cmp	%g5, 0x0c0		!   so we mask lower bits & compare to 0b 0000 1100 0000
1659	bne,pt	%icc, winfixspill	! Dump our trap frame -- we will retry the fill when the page is loaded
1660	 cmp	%g5, 0x080		!   window spill traps are all 0b 0000 10xx xxxx
1661
1662	!!
1663	!! This was a fill
1664	!!
1665#ifdef TRAPSTATS
1666	set	_C_LABEL(wfill), %g1
1667	lduw	[%g1], %g5
1668	inc	%g5
1669	stw	%g5, [%g1]
1670#endif
1671	btst	TSTATE_PRIV, %g4	! User mode?
1672	and	%g4, CWP, %g5		! %g4 = %cwp of trap
1673	wrpr	%g7, 0, %tt
1674	bz,a,pt	%icc, datafault		! We were in user mode -- normal fault
1675	 wrpr	%g5, %cwp		! Restore cwp from before fill trap -- regs should now be consisent
1676
1677	/*
1678	 * We're in a pickle here.  We were trying to return to user mode
1679	 * and the restore of the user window failed, so now we have one valid
1680	 * kernel window and a user window state.  If we do a TRAP_SETUP() now,
1681	 * our kernel window will be considered a user window and cause a
1682	 * fault when we try to save it later due to an invalid user address.
1683	 * If we return to where we faulted, our window state will not be valid
1684	 * and we will fault trying to enter user with our primary context of zero.
1685	 *
1686	 * What we'll do is arrange to have us return to return_from_trap so we will
1687	 * start the whole business over again.  But first, switch to a kernel window
1688	 * setup.  Let's see, canrestore and otherwin are zero.  Set WSTATE_KERN and
1689	 * make sure we're in kernel context and we're done.
1690	 */
1691
1692#ifdef TRAPSTATS
1693	set	_C_LABEL(kwfill), %g4
1694	lduw	[%g4], %g7
1695	inc	%g7
1696	stw	%g7, [%g4]
1697#endif
1698#if 0 /* Need to switch over to new stuff to fix WDR bug */
1699	wrpr	%g5, %cwp				! Restore cwp from before fill trap -- regs should now be consisent
1700	wrpr	%g2, %g0, %tl				! Restore trap level -- we need to reuse it
1701	set	return_from_trap, %g4
1702	set	CTX_PRIMARY, %g7
1703	wrpr	%g4, 0, %tpc
1704	stxa	%g0, [%g7] ASI_DMMU
1705	inc	4, %g4
1706	membar	#Sync
1707	flush	%g4					! Isn't this convenient?
1708	wrpr	%g0, WSTATE_KERN, %wstate
1709	wrpr	%g0, 0, %canrestore			! These should be zero but
1710	wrpr	%g0, 0, %otherwin			! clear them just in case
1711	rdpr	%ver, %g5
1712	and	%g5, CWP, %g5
1713	wrpr	%g0, 0, %cleanwin
1714	dec	1, %g5					! NWINDOWS-1-1
1715	wrpr	%g5, 0, %cansave			! Invalidate all windows
1716!	flushw						! DEBUG
1717	ba,pt	%icc, datafault
1718	 wrpr	%g4, 0, %tnpc
1719#else
1720	wrpr	%g2, %g0, %tl				! Restore trap level
1721	cmp	%g2, 3
1722	tne	%icc, 1
1723	rdpr	%tt, %g5
1724	wrpr	%g0, 1, %tl				! Revert to TL==1 XXX what if this wasn't in rft_user? Oh well.
1725	wrpr	%g5, %g0, %tt				! Set trap type correctly
1726/*
1727 * Here we need to implement the beginning of datafault.
1728 * TRAP_SETUP expects to come from either kernel mode or
1729 * user mode with at least one valid register window.  It
1730 * will allocate a trap frame, save the out registers, and
1731 * fix the window registers to think we have one user
1732 * register window.
1733 *
1734 * However, under these circumstances we don't have any
1735 * valid register windows, so we need to clean up the window
1736 * registers to prevent garbage from being saved to either
1737 * the user stack or the PCB before calling the datafault
1738 * handler.
1739 *
1740 * We could simply jump to datafault if we could somehow
1741 * make the handler issue a `saved' instruction immediately
1742 * after creating the trapframe.
1743 *
1744 * The following is duplicated from datafault:
1745 */
1746	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! We need to save volatile stuff to AG regs
1747#ifdef TRAPS_USE_IG
1748	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! We need to save volatile stuff to AG regs
1749#endif
1750	wr	%g0, ASI_DMMU, %asi			! We need to re-load trap info
1751	ldxa	[%g0 + TLB_TAG_ACCESS] %asi, %g1	! Get fault address from tag access register
1752	ldxa	[SFAR] %asi, %g2			! sync virt addr; must be read first
1753	ldxa	[SFSR] %asi, %g3			! get sync fault status register
1754	stxa	%g0, [SFSR] %asi			! Clear out fault now
1755
1756	TRAP_SETUP(-CC64FSZ-TF_SIZE)
1757	saved						! Blow away that one register window we didn't ever use.
1758	ba,a,pt	%icc, Ldatafault_internal		! Now we should return directly to user mode
1759	 nop
1760#endif
1761winfixspill:
1762	bne,a,pt	%xcc, datafault			! Was not a spill -- handle it normally
1763	 wrpr	%g2, 0, %tl				! Restore trap level for now XXXX
1764
1765	!!
1766	!! This was a spill
1767	!!
1768#if 1
1769	btst	TSTATE_PRIV, %g4	! From user mode?
1770	wrpr	%g2, 0, %tl		! We need to load the fault type so we can
1771	rdpr	%tt, %g5		! overwrite the lower trap and get it to the fault handler
1772	wrpr	%g1, 0, %tl
1773	wrpr	%g5, 0, %tt		! Copy over trap type for the fault handler
1774	and	%g4, CWP, %g5		! find %cwp from trap
1775	be,a,pt	%xcc, datafault		! Let's do a regular datafault.  When we try a save in datafault we'll
1776	 wrpr	%g5, 0, %cwp		!  return here and write out all dirty windows.
1777#endif
1778	wrpr	%g2, 0, %tl				! Restore trap level for now XXXX
1779	LDPTR	[%g6 + %lo(CPCB)], %g6	! This is in the locked TLB and should not fault
1780#ifdef TRAPSTATS
1781	set	_C_LABEL(wspill), %g7
1782	lduw	[%g7], %g5
1783	inc	%g5
1784	stw	%g5, [%g7]
1785#endif
1786
1787	/*
1788	 * Traverse kernel map to find paddr of cpcb and only us ASI_PHYS_CACHED to
1789	 * prevent any faults while saving the windows.  BTW if it isn't mapped, we
1790	 * will trap and hopefully panic.
1791	 */
1792
1793!	ba	0f					! DEBUG -- don't use phys addresses
1794	 wr	%g0, ASI_NUCLEUS, %asi			! In case of problems finding PA
1795	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g1
1796	LDPTR	[%g1 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g1	! Load start of ctxbusy
1797#ifdef DEBUG
1798	srax	%g6, HOLESHIFT, %g7			! Check for valid address
1799	brz,pt	%g7, 1f					! Should be zero or -1
1800	 addcc	%g7, 1, %g7					! Make -1 -> 0
1801	tnz	%xcc, 1					! Invalid address??? How did this happen?
18021:
1803#endif
1804	srlx	%g6, STSHIFT, %g7
1805	ldx	[%g1], %g1				! Load pointer to kernel_pmap
1806	and	%g7, STMASK, %g7
1807	sll	%g7, 3, %g7
1808	add	%g7, %g1, %g1
1809	DLFLUSH(%g1,%g7)
1810	ldxa	[%g1] ASI_PHYS_CACHED, %g1		! Load pointer to directory
1811	DLFLUSH2(%g7)
1812
1813	srlx	%g6, PDSHIFT, %g7			! Do page directory
1814	and	%g7, PDMASK, %g7
1815	sll	%g7, 3, %g7
1816	brz,pn	%g1, 0f
1817	 add	%g7, %g1, %g1
1818	DLFLUSH(%g1,%g7)
1819	ldxa	[%g1] ASI_PHYS_CACHED, %g1
1820	DLFLUSH2(%g7)
1821
1822	srlx	%g6, PTSHIFT, %g7			! Convert to ptab offset
1823	and	%g7, PTMASK, %g7
1824	brz	%g1, 0f
1825	 sll	%g7, 3, %g7
1826	add	%g1, %g7, %g7
1827	DLFLUSH(%g7,%g1)
1828	ldxa	[%g7] ASI_PHYS_CACHED, %g7		! This one is not
1829	DLFLUSH2(%g1)
1830	brgez	%g7, 0f
1831	 srlx	%g7, PGSHIFT, %g7			! Isolate PA part
1832	sll	%g6, 32-PGSHIFT, %g6			! And offset
1833	sllx	%g7, PGSHIFT+23, %g7			! There are 23 bits to the left of the PA in the TTE
1834	srl	%g6, 32-PGSHIFT, %g6
1835	srax	%g7, 23, %g7
1836	or	%g7, %g6, %g6				! Then combine them to form PA
1837
1838	wr	%g0, ASI_PHYS_CACHED, %asi		! Use ASI_PHYS_CACHED to prevent possible page faults
18390:
1840	/*
1841	 * Now save all user windows to cpcb.
1842	 */
1843#ifdef NOTDEF_DEBUG
1844	add	%g6, PCB_NSAVED, %g7
1845	DLFLUSH(%g7,%g5)
1846	lduba	[%g6 + PCB_NSAVED] %asi, %g7		! make sure that pcb_nsaved
1847	DLFLUSH2(%g5)
1848	brz,pt	%g7, 1f					! is zero, else
1849	 nop
1850	wrpr	%g0, 4, %tl
1851	sir						! Force a watchdog
18521:
1853#endif
1854	rdpr	%otherwin, %g7
1855	brnz,pt	%g7, 1f
1856	 rdpr	%canrestore, %g5
1857	rdpr	%cansave, %g1
1858	add	%g5, 1, %g7				! add the %cwp window to the list to save
1859!	movrnz	%g1, %g5, %g7				! If we're issuing a save
1860!	mov	%g5, %g7				! DEBUG
1861	wrpr	%g0, 0, %canrestore
1862	wrpr	%g7, 0, %otherwin			! Still in user mode -- need to switch to kernel mode
18631:
1864	mov	%g7, %g1
1865	add	%g6, PCB_NSAVED, %g7
1866	DLFLUSH(%g7,%g5)
1867	lduba	[%g6 + PCB_NSAVED] %asi, %g7		! Start incrementing pcb_nsaved
1868	DLFLUSH2(%g5)
1869
1870#ifdef DEBUG
1871	wrpr	%g0, 5, %tl
1872#endif
1873	mov	%g6, %g5
1874	brz,pt	%g7, winfixsave				! If it's in use, panic
1875	 saved						! frob window registers
1876
1877	/* PANIC */
1878!	sir						! Force a watchdog
1879#ifdef DEBUG
1880	wrpr	%g2, 0, %tl
1881#endif
1882	mov	%g7, %o2
1883	rdpr	%ver, %o1
1884	sethi	%hi(2f), %o0
1885	and	%o1, CWP, %o1
1886	wrpr	%g0, %o1, %cleanwin
1887	dec	1, %o1
1888	wrpr	%g0, %o1, %cansave			! kludge away any more window problems
1889	wrpr	%g0, 0, %canrestore
1890	wrpr	%g0, 0, %otherwin
1891	or	%lo(2f), %o0, %o0
1892	wrpr	%g0, WSTATE_KERN, %wstate
1893	sethi	%hi(PANICSTACK), %sp
1894	LDPTR	[%sp + %lo(PANICSTACK)], %sp
1895	add	%sp, -CC64FSZ-STKB, %sp
1896	ta	1; nop					! This helps out traptrace.
1897	call	_C_LABEL(panic)				! This needs to be fixed properly but we should panic here
1898	 mov	%g1, %o1
1899	NOTREACHED
1900	.data
19012:
1902	.asciz	"winfault: double invalid window at %p, nsaved=%d"
1903	_ALIGN
1904	.text
19053:
1906	saved
1907	save
1908winfixsave:
1909	stxa	%l0, [%g5 + PCB_RW + ( 0*8)] %asi	! Save the window in the pcb, we can schedule other stuff in here
1910	stxa	%l1, [%g5 + PCB_RW + ( 1*8)] %asi
1911	stxa	%l2, [%g5 + PCB_RW + ( 2*8)] %asi
1912	stxa	%l3, [%g5 + PCB_RW + ( 3*8)] %asi
1913	stxa	%l4, [%g5 + PCB_RW + ( 4*8)] %asi
1914	stxa	%l5, [%g5 + PCB_RW + ( 5*8)] %asi
1915	stxa	%l6, [%g5 + PCB_RW + ( 6*8)] %asi
1916	stxa	%l7, [%g5 + PCB_RW + ( 7*8)] %asi
1917
1918	stxa	%i0, [%g5 + PCB_RW + ( 8*8)] %asi
1919	stxa	%i1, [%g5 + PCB_RW + ( 9*8)] %asi
1920	stxa	%i2, [%g5 + PCB_RW + (10*8)] %asi
1921	stxa	%i3, [%g5 + PCB_RW + (11*8)] %asi
1922	stxa	%i4, [%g5 + PCB_RW + (12*8)] %asi
1923	stxa	%i5, [%g5 + PCB_RW + (13*8)] %asi
1924	stxa	%i6, [%g5 + PCB_RW + (14*8)] %asi
1925	stxa	%i7, [%g5 + PCB_RW + (15*8)] %asi
1926
1927!	rdpr	%otherwin, %g1	! Check to see if we's done
1928	dec	%g1
1929	wrpr	%g0, 7, %cleanwin			! BUGBUG -- we should not hardcode this, but I have no spare globals
1930	inc	16*8, %g5				! Move to next window
1931	inc	%g7					! inc pcb_nsaved
1932	brnz,pt	%g1, 3b
1933	 stxa	%o6, [%g5 + PCB_RW + (14*8)] %asi	! Save %sp so we can write these all out
1934
1935	/* fix up pcb fields */
1936	stba	%g7, [%g6 + PCB_NSAVED] %asi		! cpcb->pcb_nsaved = n
1937#if 0
1938	mov	%g7, %g5				! fixup window registers
19395:
1940	dec	%g5
1941	brgz,a,pt	%g5, 5b
1942	 restore
1943#ifdef NOT_DEBUG
1944	rdpr	%wstate, %g5				! DEBUG
1945	wrpr	%g0, WSTATE_KERN, %wstate		! DEBUG
1946	wrpr	%g0, 4, %tl
1947	rdpr	%cansave, %g7
1948	rdpr	%canrestore, %g6
1949	flushw						! DEBUG
1950	wrpr	%g2, 0, %tl
1951	wrpr	%g5, 0, %wstate				! DEBUG
1952#endif
1953#else
1954	/*
1955	 * We just issued a bunch of saves, so %cansave is now 0,
1956	 * probably (if we were doing a flushw then we may have
1957	 * come in with only partially full register windows and
1958	 * it may not be 0).
1959	 *
1960	 * %g7 contains the count of the windows we just finished
1961	 * saving.
1962	 *
1963	 * What we need to do now is move some of the windows from
1964	 * %canrestore to %cansave.  What we should do is take
1965	 * min(%canrestore, %g7) and move that over to %cansave.
1966	 *
1967	 * %g7 is the number of windows we flushed, so we should
1968	 * use that as a base.  Clear out %otherwin, set %cansave
1969	 * to min(%g7, NWINDOWS - 2), set %cleanwin to %canrestore
1970	 * + %cansave and the rest follows:
1971	 *
1972	 * %otherwin = 0
1973	 * %cansave = NWINDOWS - 2 - %canrestore
1974	 */
1975	wrpr	%g0, 0, %otherwin
1976	rdpr	%canrestore, %g1
1977	sub	%g1, %g7, %g1				! Calculate %canrestore - %g7
1978	movrlz	%g1, %g0, %g1				! Clamp at zero
1979	wrpr	%g1, 0, %canrestore			! This is the new canrestore
1980	rdpr	%ver, %g5
1981	and	%g5, CWP, %g5				! NWINDOWS-1
1982	dec	%g5					! NWINDOWS-2
1983	wrpr	%g5, 0, %cleanwin			! Set cleanwin to max, since we're in-kernel
1984	sub	%g5, %g1, %g5				! NWINDOWS-2-%canrestore
1985	wrpr	%g5, 0, %cansave
1986#ifdef NOT_DEBUG
1987	rdpr	%wstate, %g5				! DEBUG
1988	wrpr	%g0, WSTATE_KERN, %wstate		! DEBUG
1989	wrpr	%g0, 4, %tl
1990	flushw						! DEBUG
1991	wrpr	%g2, 0, %tl
1992	wrpr	%g5, 0, %wstate				! DEBUG
1993#endif
1994#endif
1995
1996#ifdef NOTDEF_DEBUG
1997	set	panicstack-CC64FSZ, %g1
1998	save	%g1, 0, %sp
1999	GLOBTOLOC
2000	rdpr	%wstate, %l0
2001	wrpr	%g0, WSTATE_KERN, %wstate
2002	set	8f, %o0
2003	mov	%g7, %o1
2004	call	printf
2005	 mov	%g5, %o2
2006	wrpr	%l0, 0, %wstate
2007	LOCTOGLOB
2008	restore
2009	.data
20108:
2011	.asciz	"winfix: spill fixup\n"
2012	_ALIGN
2013	.text
2014#endif
2015!	rdpr	%tl, %g2				! DEBUG DEBUG -- did we trap somewhere?
2016	sub	%g2, 1, %g1
2017	rdpr	%tt, %g2
2018	wrpr	%g1, 0, %tl				! We will not attempt to re-execute the spill, so dump our trap frame permanently
2019	wrpr	%g2, 0, %tt				! Move trap type from fault frame here, overwriting spill
2020
2021	/* Did we save a user or kernel window ? */
2022!	srax	%g3, 48, %g5				! User or kernel store? (TAG TARGET)
2023	sllx	%g3, (64-13), %g5			! User or kernel store? (TAG ACCESS)
2024	sethi	%hi(dcache_size), %g7
2025	ld	[%g7 + %lo(dcache_size)], %g7
2026	sethi	%hi(dcache_line_size), %g6
2027	ld	[%g6 + %lo(dcache_line_size)], %g6
2028	brnz,pt	%g5, 1f					! User fault -- save windows to pcb
2029	 sub	%g7, %g6, %g7
2030
2031	and	%g4, CWP, %g4				! %g4 = %cwp of trap
2032	wrpr	%g4, 0, %cwp				! Kernel fault -- restore %cwp and force and trap to debugger
2033	!!
2034	!! Here we managed to fault trying to access a kernel window
2035	!! This is a bug.  Switch to the interrupt stack if we aren't
2036	!! there already and then trap into the debugger or panic.
2037	!!
2038	sethi	%hi(EINTSTACK-BIAS), %g6
2039	btst	1, %sp
2040	bnz,pt	%icc, 0f
2041	 mov	%sp, %g1
2042	add	%sp, -BIAS, %g1
20430:
2044	or	%g6, %lo(EINTSTACK-BIAS), %g6
2045	set	(EINTSTACK-INTSTACK), %g7	! XXXXXXXXXX This assumes kernel addresses are unique from user addresses
2046	sub	%g6, %g1, %g2				! Determine if we need to switch to intr stack or not
2047	dec	%g7					! Make it into a mask
2048	andncc	%g2, %g7, %g0				! XXXXXXXXXX This assumes kernel addresses are unique from user addresses */ \
2049	movz	%xcc, %g1, %g6				! Stay on interrupt stack?
2050	add	%g6, -CCFSZ, %g6			! Allocate a stack frame
2051	mov	%sp, %l6				! XXXXX Save old stack pointer
2052	mov	%g6, %sp
2053	ta	1; nop					! Enter debugger
2054	NOTREACHED
20551:
2056#if 1
2057	/* Now we need to blast away the D$ to make sure we're in sync */
2058	stxa	%g0, [%g7] ASI_DCACHE_TAG
2059	brnz,pt	%g7, 1b
2060	 sub	%g7, %g6, %g7
2061#endif
2062
2063#ifdef NOTDEF_DEBUG
2064	set	panicstack-CC64FSZ, %g5
2065	save	%g5, 0, %sp
2066	GLOBTOLOC
2067	rdpr	%wstate, %l0
2068	wrpr	%g0, WSTATE_KERN, %wstate
2069	set	8f, %o0
2070	call	printf
2071	 mov	%fp, %o1
2072	wrpr	%l0, 0, %wstate
2073	LOCTOGLOB
2074	restore
2075	.data
20768:
2077	.asciz	"winfix: kernel spill retry\n"
2078	_ALIGN
2079	.text
2080#endif
2081#ifdef TRAPSTATS
2082	set	_C_LABEL(wspillskip), %g4
2083	lduw	[%g4], %g5
2084	inc	%g5
2085	stw	%g5, [%g4]
2086#endif
2087	/*
2088	 * If we had WSTATE_KERN then we had at least one valid kernel window.
2089	 * We should re-execute the trapping save.
2090	 */
2091	rdpr	%wstate, %g3
2092	mov	%g3, %g3
2093	cmp	%g3, WSTATE_KERN
2094	bne,pt	%icc, 1f
2095	 nop
2096	retry						! Now we can complete the save
20971:
2098	/*
2099	 * Since we had a WSTATE_USER, we had no valid kernel windows.  This should
2100	 * only happen inside TRAP_SETUP or INTR_SETUP. Emulate
2101	 * the instruction, clean up the register windows, then done.
2102	 */
2103	rdpr	%cwp, %g1
2104	inc	%g1
2105	rdpr	%tstate, %g2
2106	wrpr	%g1, %cwp
2107	andn	%g2, CWP, %g2
2108	wrpr	%g1, %g2, %tstate
2109	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate
2110#ifdef TRAPS_USE_IG
2111	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
2112#endif
2113	mov	%g6, %sp
2114	done
2115
2116/*
2117 * Each memory data access fault, from user or kernel mode,
2118 * comes here.
2119 *
2120 * We will assume that %pil is not lost so we won't bother to save it
2121 * unless we're in an interrupt handler.
2122 *
2123 * On entry:
2124 *	We are on one of the alternate set of globals
2125 *	%g1 = MMU tag target
2126 *	%g2 = %tl
2127 *
2128 * On return:
2129 *
2130 */
2131datafault:
2132	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! We need to save volatile stuff to AG regs
2133#ifdef TRAPS_USE_IG
2134	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! We need to save volatile stuff to AG regs
2135#endif
2136	wr	%g0, ASI_DMMU, %asi			! We need to re-load trap info
2137	ldxa	[%g0 + TLB_TAG_ACCESS] %asi, %g1	! Get fault address from tag access register
2138	ldxa	[SFAR] %asi, %g2			! sync virt addr; must be read first
2139	ldxa	[SFSR] %asi, %g3			! get sync fault status register
2140	stxa	%g0, [SFSR] %asi			! Clear out fault now
2141
2142	TRAP_SETUP(-CC64FSZ-TF_SIZE)
2143Ldatafault_internal:
2144	INCR64(CPUINFO_VA+CI_NFAULT)			! cnt.v_faults++ (clobbers %o0,%o1)
2145!	ldx	[%sp + CC64FSZ + STKB + TF_FAULT], %g1	! DEBUG make sure this has not changed
2146	mov	%g1, %o0				! Move these to the out regs so we can save the globals
2147	mov	%g2, %o4
2148	mov	%g3, %o5
2149
2150	ldxa	[%g0] ASI_AFAR, %o2			! get async fault address
2151	ldxa	[%g0] ASI_AFSR, %o3			! get async fault status
2152	mov	-1, %g7
2153	stxa	%g7, [%g0] ASI_AFSR			! And clear this out, too
2154
2155	wrpr	%g0, PSTATE_KERN, %pstate		! Get back to normal globals
2156
2157	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + (1*8)]	! save g1
2158	rdpr	%tt, %o1					! find out what trap brought us here
2159	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + (2*8)]	! save g2
2160	rdpr	%tstate, %g1
2161	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + (3*8)]	! (sneak g3 in here)
2162	rdpr	%tpc, %g2
2163	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + (4*8)]	! sneak in g4
2164	rdpr	%tnpc, %g3
2165	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + (5*8)]	! sneak in g5
2166	mov	%g2, %o7					! Make the fault address look like the return address
2167	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + (6*8)]	! sneak in g6
2168	rd	%y, %g5						! save y
2169	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + (7*8)]	! sneak in g7
2170
2171	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]
2172	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]		! set tf.tf_psr, tf.tf_pc
2173	stx	%g2, [%sp + CC64FSZ + STKB + TF_PC]		! set tf.tf_npc
2174	stx	%g3, [%sp + CC64FSZ + STKB + TF_NPC]
2175
2176	rdpr	%pil, %g4
2177	stb	%g4, [%sp + CC64FSZ + STKB + TF_PIL]
2178	stb	%g4, [%sp + CC64FSZ + STKB + TF_OLDPIL]
2179
2180#if 1
2181	rdpr	%tl, %g7
2182	dec	%g7
2183	movrlz	%g7, %g0, %g7
2184	wrpr	%g0, %g7, %tl		! Revert to kernel mode
2185#else
2186	wrpr	%g0, 0, %tl		! Revert to kernel mode
2187#endif
2188	/* Finish stackframe, call C trap handler */
2189	flushw						! Get this clean so we won't take any more user faults
2190#ifdef NOTDEF_DEBUG
2191	set	CPCB, %o7
2192	LDPTR	[%o7], %o7
2193	ldub	[%o7 + PCB_NSAVED], %o7
2194	brz,pt	%o7, 2f
2195	 nop
2196	save	%sp, -CC64FSZ, %sp
2197	set	1f, %o0
2198	call printf
2199	 mov	%i7, %o1
2200	ta	1; nop
2201	 restore
2202	.data
22031:	.asciz	"datafault: nsaved = %d\n"
2204	_ALIGN
2205	.text
22062:
2207#endif
2208	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
2209	!! In our case we need to clear it before calling any C-code
2210	clr	%g4
2211
2212	/*
2213	 * Right now the registers have the following values:
2214	 *
2215	 *	%o0 -- MMU_TAG_ACCESS
2216	 *	%o1 -- TT
2217	 *	%o2 -- afar
2218	 *	%o3 -- afsr
2219	 *	%o4 -- sfar
2220	 *	%o5 -- sfsr
2221	 */
2222
2223	cmp	%o1, T_DATA_ERROR
2224	st	%g5, [%sp + CC64FSZ + STKB + TF_Y]
2225	wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Restore default ASI
2226	be,pn	%icc, data_error
2227	 wrpr	%g0, PSTATE_INTR, %pstate	! reenable interrupts
2228
2229	mov	%o0, %o3			! (argument: trap address)
2230	mov	%g2, %o2			! (argument: trap pc)
2231	call	_C_LABEL(data_access_fault)	! data_access_fault(&tf, type,
2232						!	pc, addr, sfva, sfsr)
2233	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
2234	wrpr	%g0, PSTATE_KERN, %pstate		! disable interrupts
2235
2236data_recover:
2237#ifdef TRAPSTATS
2238	set	_C_LABEL(uintrcnt), %g1
2239	stw	%g0, [%g1]
2240	set	_C_LABEL(iveccnt), %g1
2241	stw	%g0, [%g1]
2242#endif
2243	b	return_from_trap			! go return
2244	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1		! Load this for return_from_trap
2245	NOTREACHED
2246
2247data_error:
2248	call	_C_LABEL(data_access_error)	! data_access_error(&tf, type,
2249						!	afva, afsr, sfva, sfsr)
2250	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
2251	ba	data_recover
2252	 nop
2253	NOTREACHED
2254
2255/*
2256 * Each memory instruction access fault from a fast access handler comes here.
2257 * We will quickly check if this is an original prom mapping before going
2258 * to the generic fault handler
2259 *
2260 * We will assume that %pil is not lost so we won't bother to save it
2261 * unless we're in an interrupt handler.
2262 *
2263 * On entry:
2264 *	We are on one of the alternate set of globals
2265 *	%g1 = MMU tag target
2266 *	%g2 = TSB entry ptr
2267 *	%g3 = TLB Tag Access
2268 *
2269 * On return:
2270 *
2271 */
2272
2273	ICACHE_ALIGN
2274instr_miss:
2275#ifdef TRAPSTATS
2276	set	_C_LABEL(ktmiss), %g3
2277	set	_C_LABEL(utmiss), %g4
2278	rdpr	%tl, %g6
2279	dec	%g6
2280	movrz	%g6, %g4, %g3
2281	lduw	[%g3], %g4
2282	inc	%g4
2283	stw	%g4, [%g3]
2284#endif
2285	mov	TLB_TAG_ACCESS, %g3			! Get real fault page
2286	sethi	%hi(0x1fff), %g7			! 8K context mask
2287	ldxa	[%g3] ASI_IMMU, %g3			! from tag access register
2288	sethi	%hi(CPUINFO_VA+CI_CTXBUSY), %g4
2289	or	%g7, %lo(0x1fff), %g7
2290	LDPTR	[%g4 + %lo(CPUINFO_VA+CI_CTXBUSY)], %g4
2291	srax	%g3, HOLESHIFT, %g5			! Check for valid address
2292	and	%g3, %g7, %g6				! Isolate context
2293	sllx	%g6, 3, %g6				! Make it into an offset into ctxbusy
2294	inc	%g5					! (0 or -1) -> (1 or 0)
2295	ldx	[%g4+%g6], %g4				! Load up our page table.
2296#ifdef DEBUG
2297	/* Make sure we don't try to replace a kernel translation */
2298	/* This should not be necessary */
2299	brnz,pt	%g6, 1f					! If user context continue miss
2300	sethi	%hi(KERNBASE), %g7			! Don't need %lo
2301	set	0x0800000, %g6				! 8MB
2302	sub	%g3, %g7, %g7
2303	cmp	%g7, %g6
2304	tlu	%xcc, 1; nop
23051:
2306#endif
2307	srlx	%g3, STSHIFT, %g6
2308	cmp	%g5, 1
2309	bgu,pn %xcc, textfault				! Error!
2310	 srlx	%g3, PDSHIFT, %g5
2311	and	%g6, STMASK, %g6
2312	sll	%g6, 3, %g6
2313	and	%g5, PDMASK, %g5
2314	nop
2315
2316	sll	%g5, 3, %g5
2317	add	%g6, %g4, %g4
2318	ldxa	[%g4] ASI_PHYS_CACHED, %g4
2319	srlx	%g3, PTSHIFT, %g6			! Convert to ptab offset
2320	and	%g6, PTMASK, %g6
2321	add	%g5, %g4, %g5
2322	brz,pn	%g4, textfault				! NULL entry? check somewhere else
2323	 nop
2324
2325	ldxa	[%g5] ASI_PHYS_CACHED, %g4
2326	sll	%g6, 3, %g6
2327	brz,pn	%g4, textfault				! NULL entry? check somewhere else
2328	 add	%g6, %g4, %g6
23291:
2330	ldxa	[%g6] ASI_PHYS_CACHED, %g4
2331	brgez,pn %g4, textfault
2332	 nop
2333
2334	/* Check if it's an executable mapping. */
2335	andcc	%g4, TTE_EXEC, %g0
2336	bz,pn	%xcc, textfault
2337	 nop
2338
2339	or	%g4, TTE_ACCESS, %g7			! Update accessed bit
2340	btst	TTE_ACCESS, %g4				! Need to update access git?
2341	bne,pt	%xcc, 1f
2342	 nop
2343	casxa	[%g6] ASI_PHYS_CACHED, %g4, %g7		!  and store it
2344	cmp	%g4, %g7
2345	bne,pn	%xcc, 1b
2346	 or	%g4, TTE_ACCESS, %g4			! Update accessed bit
23471:
2348	stx	%g1, [%g2]				! Update TSB entry tag
2349	stx	%g4, [%g2+8]				! Update TSB entry data
2350	stxa	%g4, [%g0] ASI_IMMU_DATA_IN		! Enter new mapping
2351	membar	#Sync
2352	CLRTT
2353	retry
2354	NOTREACHED
2355	!!
2356	!!  Check our prom mappings -- temporary
2357	!!
2358
2359/*
2360 * Each memory text access fault, from user or kernel mode,
2361 * comes here.
2362 *
2363 * We will assume that %pil is not lost so we won't bother to save it
2364 * unless we're in an interrupt handler.
2365 *
2366 * On entry:
2367 *	We are on one of the alternate set of globals
2368 *	%g1 = MMU tag target
2369 *	%g2 = %tl
2370 *	%g3 = %tl - 1
2371 *
2372 * On return:
2373 *
2374 */
2375
2376textfault:
2377	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! We need to save volatile stuff to AG regs
2378#ifdef TRAPS_USE_IG
2379	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! We need to save volatile stuff to AG regs
2380#endif
2381	wr	%g0, ASI_IMMU, %asi
2382	ldxa	[%g0 + TLB_TAG_ACCESS] %asi, %g1	! Get fault address from tag access register
2383	ldxa	[SFSR] %asi, %g3			! get sync fault status register
2384	membar	#LoadStore
2385	stxa	%g0, [SFSR] %asi			! Clear out old info
2386
2387	TRAP_SETUP(-CC64FSZ-TF_SIZE)
2388	INCR64(CPUINFO_VA+CI_NFAULT)			! cnt.v_faults++ (clobbers %o0,%o1)
2389
2390	mov	%g3, %o3
2391
2392	wrpr	%g0, PSTATE_KERN, %pstate		! Switch to normal globals
2393	ldxa	[%g0] ASI_AFSR, %o4			! get async fault status
2394	ldxa	[%g0] ASI_AFAR, %o5			! get async fault address
2395	mov	-1, %o0
2396	stxa	%o0, [%g0] ASI_AFSR			! Clear this out
2397	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + (1*8)]	! save g1
2398	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + (2*8)]	! save g2
2399	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + (3*8)]	! (sneak g3 in here)
2400	rdpr	%tt, %o1					! Find out what caused this trap
2401	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + (4*8)]	! sneak in g4
2402	rdpr	%tstate, %g1
2403	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + (5*8)]	! sneak in g5
2404	rdpr	%tpc, %o2					! sync virt addr; must be read first
2405	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + (6*8)]	! sneak in g6
2406	rdpr	%tnpc, %g3
2407	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + (7*8)]	! sneak in g7
2408	rd	%y, %g5						! save y
2409
2410	/* Finish stackframe, call C trap handler */
2411	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]		! set tf.tf_psr, tf.tf_pc
2412	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]		! debug
2413
2414	stx	%o2, [%sp + CC64FSZ + STKB + TF_PC]
2415	stx	%g3, [%sp + CC64FSZ + STKB + TF_NPC]		! set tf.tf_npc
2416
2417	rdpr	%pil, %g4
2418	stb	%g4, [%sp + CC64FSZ + STKB + TF_PIL]
2419	stb	%g4, [%sp + CC64FSZ + STKB + TF_OLDPIL]
2420
2421	rdpr	%tl, %g7
2422	dec	%g7
2423	movrlz	%g7, %g0, %g7
2424	wrpr	%g0, %g7, %tl		! Revert to kernel mode
2425
2426	wr	%g0, ASI_PRIMARY_NOFAULT, %asi		! Restore default ASI
2427	flushw						! Get rid of any user windows so we don't deadlock
2428
2429	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
2430	!! In our case we need to clear it before calling any C-code
2431	clr	%g4
2432
2433	/* Use trap type to see what handler to call */
2434	cmp	%o1, T_INST_ERROR
2435	be,pn	%xcc, text_error
2436	 st	%g5, [%sp + CC64FSZ + STKB + TF_Y]		! set tf.tf_y
2437
2438	wrpr	%g0, PSTATE_INTR, %pstate	! reenable interrupts
2439	call	_C_LABEL(text_access_fault)	! mem_access_fault(&tf, type, pc, sfsr)
2440	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
2441text_recover:
2442	wrpr	%g0, PSTATE_KERN, %pstate	! disable interrupts
2443	b	return_from_trap		! go return
2444	 ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1	! Load this for return_from_trap
2445	NOTREACHED
2446
2447text_error:
2448	wrpr	%g0, PSTATE_INTR, %pstate	! reenable interrupts
2449	call	_C_LABEL(text_access_error)	! mem_access_fault(&tfm type, sfva [pc], sfsr,
2450						!		afva, afsr);
2451	 add	%sp, CC64FSZ + STKB, %o0	! (argument: &tf)
2452	ba	text_recover
2453	 nop
2454	NOTREACHED
2455
2456/*
2457 * We're here because we took an alignment fault in NUCLEUS context.
2458 * This could be a kernel bug or it could be due to saving a user
2459 * window to an invalid stack pointer.
2460 *
2461 * If the latter is the case, we could try to emulate unaligned accesses,
2462 * but we really don't know where to store the registers since we can't
2463 * determine if there's a stack bias.  Or we could store all the regs
2464 * into the PCB and punt, until the user program uses up all the CPU's
2465 * register windows and we run out of places to store them.  So for
2466 * simplicity we'll just blow them away and enter the trap code which
2467 * will generate a bus error.  Debugging the problem will be a bit
2468 * complicated since lots of register windows will be lost, but what
2469 * can we do?
2470 */
2471checkalign:
2472	rdpr	%tl, %g2
2473	subcc	%g2, 1, %g1
2474	bneg,pn	%icc, slowtrap		! Huh?
2475	 sethi	%hi(CPCB), %g6		! get current pcb
2476
2477	wrpr	%g1, 0, %tl
2478	rdpr	%tt, %g7
2479	rdpr	%tstate, %g4
2480	andn	%g7, 0x3f, %g5
2481	cmp	%g5, 0x080		!   window spill traps are all 0b 0000 10xx xxxx
2482	bne,a,pn	%icc, slowtrap
2483	 wrpr	%g1, 0, %tl		! Revert TL  XXX wrpr in a delay slot...
2484
2485#ifdef DEBUG
2486	cmp	%g7, 0x34		! If we took a datafault just before this trap
2487	bne,pt	%icc, checkalignspill	! our stack's probably bad so we need to switch somewhere else
2488	 nop
2489
2490	!!
2491	!! Double data fault -- bad stack?
2492	!!
2493	wrpr	%g2, %tl		! Restore trap level.
2494	sir				! Just issue a reset and don't try to recover.
2495	mov	%fp, %l6		! Save the frame pointer
2496	set	EINTSTACK+USPACE+CC64FSZ-STKB, %fp ! Set the frame pointer to the middle of the idle stack
2497	add	%fp, -CC64FSZ, %sp	! Create a stackframe
2498	wrpr	%g0, 15, %pil		! Disable interrupts, too
2499	wrpr	%g0, %g0, %canrestore	! Our stack is hozed and our PCB
2500	wrpr	%g0, 7, %cansave	!  probably is too, so blow away
2501	ba	slowtrap		!  all our register windows.
2502	 wrpr	%g0, 0x101, %tt
2503#endif
2504checkalignspill:
2505	/*
2506         * %g1 -- current tl
2507	 * %g2 -- original tl
2508	 * %g4 -- tstate
2509         * %g7 -- tt
2510	 */
2511
2512	and	%g4, CWP, %g5
2513	wrpr	%g5, %cwp		! Go back to the original register win
2514
2515	/*
2516	 * Remember:
2517	 *
2518	 * %otherwin = 0
2519	 * %cansave = NWINDOWS - 2 - %canrestore
2520	 */
2521
2522	rdpr	%otherwin, %g6
2523	rdpr	%canrestore, %g3
2524	rdpr	%ver, %g5
2525	sub	%g3, %g6, %g3		! Calculate %canrestore - %g7
2526	and	%g5, CWP, %g5		! NWINDOWS-1
2527	movrlz	%g3, %g0, %g3		! Clamp at zero
2528	wrpr	%g0, 0, %otherwin
2529	wrpr	%g3, 0, %canrestore	! This is the new canrestore
2530	dec	%g5			! NWINDOWS-2
2531	wrpr	%g5, 0, %cleanwin	! Set cleanwin to max, since we're in-kernel
2532	sub	%g5, %g3, %g5		! NWINDOWS-2-%canrestore
2533	wrpr	%g5, 0, %cansave
2534
2535	wrpr	%g0, T_ALIGN, %tt	! This was an alignment fault
2536	/*
2537	 * Now we need to determine if this was a userland store or not.
2538	 * Userland stores occur in anything other than the kernel spill
2539	 * handlers (trap type 09x).
2540	 */
2541	and	%g7, 0xff0, %g5
2542	cmp	%g5, 0x90
2543	bz,pn	%icc, slowtrap
2544	 nop
2545	bclr	TSTATE_PRIV, %g4
2546	wrpr	%g4, 0, %tstate
2547	ba,a,pt	%icc, slowtrap
2548	 nop
2549
2550/*
2551 * slowtrap() builds a trap frame and calls trap().
2552 * This is called `slowtrap' because it *is*....
2553 * We have to build a full frame for ptrace(), for instance.
2554 *
2555 * Registers:
2556 *
2557 */
2558slowtrap:
2559#ifdef TRAPS_USE_IG
2560	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
2561#endif
2562#ifdef DIAGNOSTIC
2563	/* Make sure kernel stack is aligned */
2564	btst	0x03, %sp		! 32-bit stack OK?
2565	 and	%sp, 0x07, %g4		! 64-bit stack OK?
2566	bz,pt	%icc, 1f
2567	cmp	%g4, 0x1		! Must end in 0b001
2568	be,pt	%icc, 1f
2569	 rdpr	%wstate, %g7
2570	cmp	%g7, WSTATE_KERN
2571	bnz,pt	%icc, 1f		! User stack -- we'll blow it away
2572	 nop
2573	sethi	%hi(PANICSTACK), %sp
2574	LDPTR	[%sp + %lo(PANICSTACK)], %sp
2575	add	%sp, -CC64FSZ-STKB, %sp
25761:
2577#endif
2578	rdpr	%tt, %g4
2579	rdpr	%tstate, %g1
2580	rdpr	%tpc, %g2
2581	rdpr	%tnpc, %g3
2582
2583	TRAP_SETUP(-CC64FSZ-TF_SIZE)
2584Lslowtrap_reenter:
2585	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
2586	mov	%g4, %o1		! (type)
2587	stx	%g2, [%sp + CC64FSZ + STKB + TF_PC]
2588	rd	%y, %g5
2589	stx	%g3, [%sp + CC64FSZ + STKB + TF_NPC]
2590	mov	%g1, %o3		! (pstate)
2591	st	%g5, [%sp + CC64FSZ + STKB + TF_Y]
2592	mov	%g2, %o2		! (pc)
2593	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
2594
2595	wrpr	%g0, PSTATE_KERN, %pstate		! Get back to normal globals
2596	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + (1*8)]
2597	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + (2*8)]
2598	add	%sp, CC64FSZ + STKB, %o0		! (&tf)
2599	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + (3*8)]
2600	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + (4*8)]
2601	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + (5*8)]
2602	rdpr	%pil, %g5
2603	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + (6*8)]
2604	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + (7*8)]
2605	stb	%g5, [%sp + CC64FSZ + STKB + TF_PIL]
2606	stb	%g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
2607	/*
2608	 * Phew, ready to enable traps and call C code.
2609	 */
2610	rdpr	%tl, %g1
2611	dec	%g1
2612	movrlz	%g1, %g0, %g1
2613	wrpr	%g0, %g1, %tl		! Revert to kernel mode
2614	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
2615	!! In our case we need to clear it before calling any C-code
2616	clr	%g4
2617
2618	wr	%g0, ASI_PRIMARY_NOFAULT, %asi		! Restore default ASI
2619	wrpr	%g0, PSTATE_INTR, %pstate	! traps on again
2620	call	_C_LABEL(trap)			! trap(tf, type, pc, pstate)
2621	 nop
2622
2623	ba,a,pt	%icc, return_from_trap
2624	 nop
2625	NOTREACHED
2626#if 1
2627/*
2628 * This code is no longer needed.
2629 */
2630/*
2631 * Do a `software' trap by re-entering the trap code, possibly first
2632 * switching from interrupt stack to kernel stack.  This is used for
2633 * scheduling and signal ASTs (which generally occur from softclock or
2634 * tty or net interrupts).
2635 *
2636 * We enter with the trap type in %g1.  All we have to do is jump to
2637 * Lslowtrap_reenter above, but maybe after switching stacks....
2638 *
2639 * We should be running alternate globals.  The normal globals and
2640 * out registers were just loaded from the old trap frame.
2641 *
2642 *	Input Params:
2643 *	%g1 = tstate
2644 *	%g2 = tpc
2645 *	%g3 = tnpc
2646 *	%g4 = tt == T_AST
2647 */
2648softtrap:
2649	sethi	%hi(EINTSTACK-STKB), %g5
2650	sethi	%hi(EINTSTACK-INTSTACK), %g7
2651	or	%g5, %lo(EINTSTACK-STKB), %g5
2652	dec	%g7
2653	sub	%g5, %sp, %g5
2654	sethi	%hi(CPCB), %g6
2655	andncc	%g5, %g7, %g0
2656	bnz,pt	%xcc, Lslowtrap_reenter
2657	 LDPTR	[%g6 + %lo(CPCB)], %g7
2658	set	USPACE-CC64FSZ-TF_SIZE-STKB, %g5
2659	add	%g7, %g5, %g6
2660	SET_SP_REDZONE(%g7, %g5)
2661#ifdef DEBUG
2662	stx	%g1, [%g6 + CC64FSZ + STKB + TF_FAULT]		! Generate a new trapframe
2663#endif
2664	stx	%i0, [%g6 + CC64FSZ + STKB + TF_O + (0*8)]	!	but don't bother with
2665	stx	%i1, [%g6 + CC64FSZ + STKB + TF_O + (1*8)]	!	locals and ins
2666	stx	%i2, [%g6 + CC64FSZ + STKB + TF_O + (2*8)]
2667	stx	%i3, [%g6 + CC64FSZ + STKB + TF_O + (3*8)]
2668	stx	%i4, [%g6 + CC64FSZ + STKB + TF_O + (4*8)]
2669	stx	%i5, [%g6 + CC64FSZ + STKB + TF_O + (5*8)]
2670	stx	%i6, [%g6 + CC64FSZ + STKB + TF_O + (6*8)]
2671	stx	%i7, [%g6 + CC64FSZ + STKB + TF_O + (7*8)]
2672#ifdef DEBUG
2673	ldx	[%sp + CC64FSZ + STKB + TF_I + (0*8)], %l0	! Copy over the rest of the regs
2674	ldx	[%sp + CC64FSZ + STKB + TF_I + (1*8)], %l1	! But just dirty the locals
2675	ldx	[%sp + CC64FSZ + STKB + TF_I + (2*8)], %l2
2676	ldx	[%sp + CC64FSZ + STKB + TF_I + (3*8)], %l3
2677	ldx	[%sp + CC64FSZ + STKB + TF_I + (4*8)], %l4
2678	ldx	[%sp + CC64FSZ + STKB + TF_I + (5*8)], %l5
2679	ldx	[%sp + CC64FSZ + STKB + TF_I + (6*8)], %l6
2680	ldx	[%sp + CC64FSZ + STKB + TF_I + (7*8)], %l7
2681	stx	%l0, [%g6 + CC64FSZ + STKB + TF_I + (0*8)]
2682	stx	%l1, [%g6 + CC64FSZ + STKB + TF_I + (1*8)]
2683	stx	%l2, [%g6 + CC64FSZ + STKB + TF_I + (2*8)]
2684	stx	%l3, [%g6 + CC64FSZ + STKB + TF_I + (3*8)]
2685	stx	%l4, [%g6 + CC64FSZ + STKB + TF_I + (4*8)]
2686	stx	%l5, [%g6 + CC64FSZ + STKB + TF_I + (5*8)]
2687	stx	%l6, [%g6 + CC64FSZ + STKB + TF_I + (6*8)]
2688	stx	%l7, [%g6 + CC64FSZ + STKB + TF_I + (7*8)]
2689	ldx	[%sp + CC64FSZ + STKB + TF_L + (0*8)], %l0
2690	ldx	[%sp + CC64FSZ + STKB + TF_L + (1*8)], %l1
2691	ldx	[%sp + CC64FSZ + STKB + TF_L + (2*8)], %l2
2692	ldx	[%sp + CC64FSZ + STKB + TF_L + (3*8)], %l3
2693	ldx	[%sp + CC64FSZ + STKB + TF_L + (4*8)], %l4
2694	ldx	[%sp + CC64FSZ + STKB + TF_L + (5*8)], %l5
2695	ldx	[%sp + CC64FSZ + STKB + TF_L + (6*8)], %l6
2696	ldx	[%sp + CC64FSZ + STKB + TF_L + (7*8)], %l7
2697	stx	%l0, [%g6 + CC64FSZ + STKB + TF_L + (0*8)]
2698	stx	%l1, [%g6 + CC64FSZ + STKB + TF_L + (1*8)]
2699	stx	%l2, [%g6 + CC64FSZ + STKB + TF_L + (2*8)]
2700	stx	%l3, [%g6 + CC64FSZ + STKB + TF_L + (3*8)]
2701	stx	%l4, [%g6 + CC64FSZ + STKB + TF_L + (4*8)]
2702	stx	%l5, [%g6 + CC64FSZ + STKB + TF_L + (5*8)]
2703	stx	%l6, [%g6 + CC64FSZ + STKB + TF_L + (6*8)]
2704	stx	%l7, [%g6 + CC64FSZ + STKB + TF_L + (7*8)]
2705#endif
2706	ba,pt	%xcc, Lslowtrap_reenter
2707	 mov	%g6, %sp
2708#endif
2709
2710#if 0
2711/*
2712 * breakpoint:	capture as much info as possible and then call DDB
2713 * or trap, as the case may be.
2714 *
2715 * First, we switch to interrupt globals, and blow away %g7.  Then
2716 * switch down one stackframe -- just fiddle w/cwp, don't save or
2717 * we'll trap.  Then slowly save all the globals into our static
2718 * register buffer.  etc. etc.
2719 */
2720
2721breakpoint:
2722	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! Get IG to use
2723	rdpr	%cwp, %g7
2724	inc	1, %g7					! Equivalent of save
2725	wrpr	%g7, 0, %cwp				! Now we have some unused locals to fiddle with
2726XXX ddb_regs is now ddb-regp and is a pointer not a symbol.
2727	set	_C_LABEL(ddb_regs), %l0
2728	stx	%g1, [%l0+DBR_IG+(1*8)]			! Save IGs
2729	stx	%g2, [%l0+DBR_IG+(2*8)]
2730	stx	%g3, [%l0+DBR_IG+(3*8)]
2731	stx	%g4, [%l0+DBR_IG+(4*8)]
2732	stx	%g5, [%l0+DBR_IG+(5*8)]
2733	stx	%g6, [%l0+DBR_IG+(6*8)]
2734	stx	%g7, [%l0+DBR_IG+(7*8)]
2735	wrpr	%g0, PSTATE_KERN|PSTATE_MG, %pstate	! Get MG to use
2736	stx	%g1, [%l0+DBR_MG+(1*8)]			! Save MGs
2737	stx	%g2, [%l0+DBR_MG+(2*8)]
2738	stx	%g3, [%l0+DBR_MG+(3*8)]
2739	stx	%g4, [%l0+DBR_MG+(4*8)]
2740	stx	%g5, [%l0+DBR_MG+(5*8)]
2741	stx	%g6, [%l0+DBR_MG+(6*8)]
2742	stx	%g7, [%l0+DBR_MG+(7*8)]
2743	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate	! Get AG to use
2744	stx	%g1, [%l0+DBR_AG+(1*8)]			! Save AGs
2745	stx	%g2, [%l0+DBR_AG+(2*8)]
2746	stx	%g3, [%l0+DBR_AG+(3*8)]
2747	stx	%g4, [%l0+DBR_AG+(4*8)]
2748	stx	%g5, [%l0+DBR_AG+(5*8)]
2749	stx	%g6, [%l0+DBR_AG+(6*8)]
2750	stx	%g7, [%l0+DBR_AG+(7*8)]
2751	wrpr	%g0, PSTATE_KERN, %pstate	! Get G to use
2752	stx	%g1, [%l0+DBR_G+(1*8)]			! Save Gs
2753	stx	%g2, [%l0+DBR_G+(2*8)]
2754	stx	%g3, [%l0+DBR_G+(3*8)]
2755	stx	%g4, [%l0+DBR_G+(4*8)]
2756	stx	%g5, [%l0+DBR_G+(5*8)]
2757	stx	%g6, [%l0+DBR_G+(6*8)]
2758	stx	%g7, [%l0+DBR_G+(7*8)]
2759	rdpr	%canrestore, %l1
2760	stb	%l1, [%l0+DBR_CANRESTORE]
2761	rdpr	%cansave, %l2
2762	stb	%l2, [%l0+DBR_CANSAVE]
2763	rdpr	%cleanwin, %l3
2764	stb	%l3, [%l0+DBR_CLEANWIN]
2765	rdpr	%wstate, %l4
2766	stb	%l4, [%l0+DBR_WSTATE]
2767	rd	%y, %l5
2768	stw	%l5, [%l0+DBR_Y]
2769	rdpr	%tl, %l6
2770	stb	%l6, [%l0+DBR_TL]
2771	dec	1, %g7
2772#endif
2773
2774/*
2775 * I will not touch any of the DDB or KGDB stuff until I know what's going
2776 * on with the symbol table.  This is all still v7/v8 code and needs to be fixed.
2777 */
2778#ifdef KGDB
2779/*
2780 * bpt is entered on all breakpoint traps.
2781 * If this is a kernel breakpoint, we do not want to call trap().
2782 * Among other reasons, this way we can set breakpoints in trap().
2783 */
2784bpt:
2785	set	TSTATE_PRIV, %l4
2786	andcc	%l4, %l0, %g0		! breakpoint from kernel?
2787	bz	slowtrap		! no, go do regular trap
2788	 nop
2789
2790	/*
2791	 * Build a trap frame for kgdb_trap_glue to copy.
2792	 * Enable traps but set ipl high so that we will not
2793	 * see interrupts from within breakpoints.
2794	 */
2795	save	%sp, -CCFSZ-TF_SIZE, %sp		! allocate a trap frame
2796	TRAP_SETUP(-CCFSZ-TF_SIZE)
2797	or	%l0, PSR_PIL, %l4	! splhigh()
2798	wr	%l4, 0, %psr		! the manual claims that this
2799	wr	%l4, PSR_ET, %psr	! song and dance is necessary
2800	std	%l0, [%sp + CCFSZ + 0]	! tf.tf_psr, tf.tf_pc
2801	mov	%l3, %o0		! trap type arg for kgdb_trap_glue
2802	rd	%y, %l3
2803	std	%l2, [%sp + CCFSZ + 8]	! tf.tf_npc, tf.tf_y
2804	rd	%wim, %l3
2805	st	%l3, [%sp + CCFSZ + 16]	! tf.tf_wim (a kgdb-only r/o field)
2806	st	%g1, [%sp + CCFSZ + 20]	! tf.tf_global[1]
2807	std	%g2, [%sp + CCFSZ + 24]	! etc
2808	std	%g4, [%sp + CCFSZ + 32]
2809	std	%g6, [%sp + CCFSZ + 40]
2810	std	%i0, [%sp + CCFSZ + 48]	! tf.tf_in[0..1]
2811	std	%i2, [%sp + CCFSZ + 56]	! etc
2812	std	%i4, [%sp + CCFSZ + 64]
2813	std	%i6, [%sp + CCFSZ + 72]
2814
2815	/*
2816	 * Now call kgdb_trap_glue(); if it returns, call trap().
2817	 */
2818	mov	%o0, %l3		! gotta save trap type
2819	call	_C_LABEL(kgdb_trap_glue)		! kgdb_trap_glue(type, &trapframe)
2820	 add	%sp, CCFSZ, %o1		! (&trapframe)
2821
2822	/*
2823	 * Use slowtrap to call trap---but first erase our tracks
2824	 * (put the registers back the way they were).
2825	 */
2826	mov	%l3, %o0		! slowtrap will need trap type
2827	ld	[%sp + CCFSZ + 12], %l3
2828	wr	%l3, 0, %y
2829	ld	[%sp + CCFSZ + 20], %g1
2830	ldd	[%sp + CCFSZ + 24], %g2
2831	ldd	[%sp + CCFSZ + 32], %g4
2832	b	Lslowtrap_reenter
2833	 ldd	[%sp + CCFSZ + 40], %g6
2834
2835/*
2836 * Enter kernel breakpoint.  Write all the windows (not including the
2837 * current window) into the stack, so that backtrace works.  Copy the
2838 * supplied trap frame to the kgdb stack and switch stacks.
2839 *
2840 * kgdb_trap_glue(type, tf0)
2841 *	int type;
2842 *	struct trapframe *tf0;
2843 */
2844ENTRY_NOPROFILE(kgdb_trap_glue)
2845	save	%sp, -CCFSZ, %sp
2846
2847	flushw				! flush all windows
2848	mov	%sp, %l4		! %l4 = current %sp
2849
2850	/* copy trapframe to top of kgdb stack */
2851	set	_C_LABEL(kgdb_stack) + KGDB_STACK_SIZE - 80, %l0
2852					! %l0 = tfcopy -> end_of_kgdb_stack
2853	mov	80, %l1
28541:	ldd	[%i1], %l2
2855	inc	8, %i1
2856	deccc	8, %l1
2857	std	%l2, [%l0]
2858	bg	1b
2859	 inc	8, %l0
2860
2861#ifdef NOTDEF_DEBUG
2862	/* save old red zone and then turn it off */
2863	sethi	%hi(_C_LABEL(redzone)), %l7
2864	ld	[%l7 + %lo(_C_LABEL(redzone))], %l6
2865	st	%g0, [%l7 + %lo(_C_LABEL(redzone))]
2866#endif
2867	/* switch to kgdb stack */
2868	add	%l0, -CCFSZ-TF_SIZE, %sp
2869
2870	/* if (kgdb_trap(type, tfcopy)) kgdb_rett(tfcopy); */
2871	mov	%i0, %o0
2872	call	_C_LABEL(kgdb_trap)
2873	add	%l0, -80, %o1
2874	tst	%o0
2875	bnz,a	kgdb_rett
2876	 add	%l0, -80, %g1
2877
2878	/*
2879	 * kgdb_trap() did not handle the trap at all so the stack is
2880	 * still intact.  A simple `restore' will put everything back,
2881	 * after we reset the stack pointer.
2882	 */
2883	mov	%l4, %sp
2884#ifdef NOTDEF_DEBUG
2885	st	%l6, [%l7 + %lo(_C_LABEL(redzone))]	! restore red zone
2886#endif
2887	ret
2888	 restore
2889
2890/*
2891 * Return from kgdb trap.  This is sort of special.
2892 *
2893 * We know that kgdb_trap_glue wrote the window above it, so that we will
2894 * be able to (and are sure to have to) load it up.  We also know that we
2895 * came from kernel land and can assume that the %fp (%i6) we load here
2896 * is proper.  We must also be sure not to lower ipl (it is at splhigh())
2897 * until we have traps disabled, due to the SPARC taking traps at the
2898 * new ipl before noticing that PSR_ET has been turned off.  We are on
2899 * the kgdb stack, so this could be disastrous.
2900 *
2901 * Note that the trapframe argument in %g1 points into the current stack
2902 * frame (current window).  We abandon this window when we move %g1->tf_psr
2903 * into %psr, but we will not have loaded the new %sp yet, so again traps
2904 * must be disabled.
2905 */
2906kgdb_rett:
2907	rd	%psr, %g4		! turn off traps
2908	wr	%g4, PSR_ET, %psr
2909	/* use the three-instruction delay to do something useful */
2910	ld	[%g1], %g2		! pick up new %psr
2911	ld	[%g1 + 12], %g3		! set %y
2912	wr	%g3, 0, %y
2913#ifdef NOTDEF_DEBUG
2914	st	%l6, [%l7 + %lo(_C_LABEL(redzone))] ! and restore red zone
2915#endif
2916	wr	%g0, 0, %wim		! enable window changes
2917	nop; nop; nop
2918	/* now safe to set the new psr (changes CWP, leaves traps disabled) */
2919	wr	%g2, 0, %psr		! set rett psr (including cond codes)
2920	/* 3 instruction delay before we can use the new window */
2921/*1*/	ldd	[%g1 + 24], %g2		! set new %g2, %g3
2922/*2*/	ldd	[%g1 + 32], %g4		! set new %g4, %g5
2923/*3*/	ldd	[%g1 + 40], %g6		! set new %g6, %g7
2924
2925	/* now we can use the new window */
2926	mov	%g1, %l4
2927	ld	[%l4 + 4], %l1		! get new pc
2928	ld	[%l4 + 8], %l2		! get new npc
2929	ld	[%l4 + 20], %g1		! set new %g1
2930
2931	/* set up returnee's out registers, including its %sp */
2932	ldd	[%l4 + 48], %i0
2933	ldd	[%l4 + 56], %i2
2934	ldd	[%l4 + 64], %i4
2935	ldd	[%l4 + 72], %i6
2936
2937	/* load returnee's window, making the window above it be invalid */
2938	restore
2939	restore	%g0, 1, %l1		! move to inval window and set %l1 = 1
2940	rd	%psr, %l0
2941	srl	%l1, %l0, %l1
2942	wr	%l1, 0, %wim		! %wim = 1 << (%psr & 31)
2943	sethi	%hi(CPCB), %l1
2944	LDPTR	[%l1 + %lo(CPCB)], %l1
2945	and	%l0, 31, %l0		! CWP = %psr & 31;
2946!	st	%l0, [%l1 + PCB_WIM]	! cpcb->pcb_wim = CWP;
2947	save	%g0, %g0, %g0		! back to window to reload
2948!	LOADWIN(%sp)
2949	save	%g0, %g0, %g0		! back to trap window
2950	/* note, we have not altered condition codes; safe to just rett */
2951	RETT
2952#endif
2953
2954/*
2955 * syscall_setup() builds a trap frame and calls syscall().
2956 * sun_syscall is same but delivers sun system call number
2957 * XXX	should not have to save&reload ALL the registers just for
2958 *	ptrace...
2959 */
2960syscall_setup:
2961#ifdef TRAPS_USE_IG
2962	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
2963#endif
2964	TRAP_SETUP(-CC64FSZ-TF_SIZE)
2965
2966#ifdef DEBUG
2967	rdpr	%tt, %o1	! debug
2968	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
2969#endif
2970
2971	wrpr	%g0, PSTATE_KERN, %pstate	! Get back to normal globals
2972	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
2973	mov	%g1, %o1			! code
2974	rdpr	%tpc, %o2			! (pc)
2975	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
2976	rdpr	%tstate, %g1
2977	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
2978	rdpr	%tnpc, %o3
2979	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
2980	rd	%y, %o4
2981	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
2982	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
2983	wrpr	%g0, 0, %tl			! return to tl=0
2984	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
2985	add	%sp, CC64FSZ + STKB, %o0	! (&tf)
2986
2987	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
2988	stx	%o2, [%sp + CC64FSZ + STKB + TF_PC]
2989	stx	%o3, [%sp + CC64FSZ + STKB + TF_NPC]
2990	st	%o4, [%sp + CC64FSZ + STKB + TF_Y]
2991
2992	rdpr	%pil, %g5
2993	stb	%g5, [%sp + CC64FSZ + STKB + TF_PIL]
2994	stb	%g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
2995
2996	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
2997	!! In our case we need to clear it before calling any C-code
2998	clr	%g4
2999	wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Restore default ASI
3000
3001	sethi	%hi(CURLWP), %l1
3002	LDPTR	[%l1 + %lo(CURLWP)], %l1
3003	LDPTR	[%l1 + L_PROC], %l1		! now %l1 points to p
3004	LDPTR	[%l1 + P_MD_SYSCALL], %l1
3005	call	%l1
3006	 wrpr	%g0, PSTATE_INTR, %pstate	! turn on interrupts
3007
3008	/* see `lwp_trampoline' for the reason for this label */
3009return_from_syscall:
3010	wrpr	%g0, PSTATE_KERN, %pstate	! Disable intterrupts
3011	wrpr	%g0, 0, %tl			! Return to tl==0
3012	ba,a,pt	%icc, return_from_trap
3013	 nop
3014	NOTREACHED
3015
3016/*
3017 * interrupt_vector:
3018 *
3019 * Spitfire chips never get level interrupts directly from H/W.
3020 * Instead, all interrupts come in as interrupt_vector traps.
3021 * The interrupt number or handler address is an 11 bit number
3022 * encoded in the first interrupt data word.  Additional words
3023 * are application specific and used primarily for cross-calls.
3024 *
3025 * The interrupt vector handler then needs to identify the
3026 * interrupt source from the interrupt number and arrange to
3027 * invoke the interrupt handler.  This can either be done directly
3028 * from here, or a softint at a particular level can be issued.
3029 *
3030 * To call an interrupt directly and not overflow the trap stack,
3031 * the trap registers should be saved on the stack, registers
3032 * cleaned, trap-level decremented, the handler called, and then
3033 * the process must be reversed.
3034 *
3035 * To simplify life all we do here is issue an appropriate softint.
3036 *
3037 * Note:	It is impossible to identify or change a device's
3038 *		interrupt number until it is probed.  That's the
3039 *		purpose for all the funny interrupt acknowledge
3040 *		code.
3041 *
3042 */
3043
3044/*
3045 * Vectored interrupts:
3046 *
3047 * When an interrupt comes in, interrupt_vector uses the interrupt
3048 * vector number to lookup the appropriate intrhand from the intrlev
3049 * array.  It then looks up the interrupt level from the intrhand
3050 * structure.  It uses the level to index the intrpending array,
3051 * which is 8 slots for each possible interrupt level (so we can
3052 * shift instead of multiply for address calculation).  It hunts for
3053 * any available slot at that level.  Available slots are NULL.
3054 *
3055 * Then interrupt_vector uses the interrupt level in the intrhand
3056 * to issue a softint of the appropriate level.  The softint handler
3057 * figures out what level interrupt it's handling and pulls the first
3058 * intrhand pointer out of the intrpending array for that interrupt
3059 * level, puts a NULL in its place, clears the interrupt generator,
3060 * and invokes the interrupt handler.
3061 */
3062
3063/* intrpending array is now in per-CPU structure. */
3064
3065#ifdef DEBUG
3066#define INTRDEBUG_VECTOR	0x1
3067#define INTRDEBUG_LEVEL		0x2
3068#define INTRDEBUG_FUNC		0x4
3069#define INTRDEBUG_SPUR		0x8
3070	.data
3071	.globl	_C_LABEL(intrdebug)
3072_C_LABEL(intrdebug):	.word 0x0
3073/*
3074 * Note: we use the local label `97' to branch forward to, to skip
3075 * actual debugging code following a `intrdebug' bit test.
3076 */
3077#endif
3078	.text
3079interrupt_vector:
3080#ifdef TRAPSTATS
3081	set	_C_LABEL(kiveccnt), %g1
3082	set	_C_LABEL(iveccnt), %g2
3083	rdpr	%tl, %g3
3084	dec	%g3
3085	movrz	%g3, %g2, %g1
3086	lduw	[%g1], %g2
3087	inc	%g2
3088	stw	%g2, [%g1]
3089#endif
3090	ldxa	[%g0] ASI_IRSR, %g1
3091	mov	IRDR_0H, %g7
3092	ldxa	[%g7] ASI_IRDR, %g7	! Get interrupt number
3093	membar	#Sync
3094
3095	btst	IRSR_BUSY, %g1
3096	bz,pn	%icc, 3f		! spurious interrupt
3097#ifdef MULTIPROCESSOR
3098	 sethi	%hi(KERNBASE), %g1
3099
3100	cmp	%g7, %g1
3101	bl,a,pt	%xcc, Lsoftint_regular	! >= KERNBASE is a fast cross-call
3102	 and	%g7, (MAXINTNUM-1), %g7	! XXX make sun4us work
3103
3104	mov	IRDR_1H, %g2
3105	ldxa	[%g2] ASI_IRDR, %g2	! Get IPI handler argument 1
3106	mov	IRDR_2H, %g3
3107	ldxa	[%g3] ASI_IRDR, %g3	! Get IPI handler argument 2
3108
3109	stxa	%g0, [%g0] ASI_IRSR	! Ack IRQ
3110	membar	#Sync			! Should not be needed due to retry
3111
3112	jmpl	%g7, %g0
3113	 nop
3114#else
3115	and	%g7, (MAXINTNUM-1), %g7	! XXX make sun4us work
3116#endif
3117
3118Lsoftint_regular:
3119	stxa	%g0, [%g0] ASI_IRSR	! Ack IRQ
3120	membar	#Sync			! Should not be needed due to retry
3121	sethi	%hi(_C_LABEL(intrlev)), %g3
3122	sllx	%g7, PTRSHFT, %g5	! Calculate entry number
3123	or	%g3, %lo(_C_LABEL(intrlev)), %g3
3124	LDPTR	[%g3 + %g5], %g5	! We have a pointer to the handler
3125	brz,pn	%g5, 3f			! NULL means it isn't registered yet.  Skip it.
3126	 nop
3127
3128	! increment per-ivec counter
3129	ldx	[%g5 + IH_CNT], %g1
3130	inc	%g1
3131	stx	%g1, [%g5 + IH_CNT]
3132
3133setup_sparcintr:
3134	LDPTR	[%g5+IH_PEND], %g6	! Read pending flag
3135	brnz,pn	%g6, ret_from_intr_vector ! Skip it if it's running
3136	 ldub	[%g5+IH_PIL], %g6	! Read interrupt level
3137	sethi	%hi(CPUINFO_VA+CI_INTRPENDING), %g1
3138	sll	%g6, PTRSHFT, %g3	! Find start of table for this IPL
3139	or	%g1, %lo(CPUINFO_VA+CI_INTRPENDING), %g1
3140	add	%g1, %g3, %g1
31411:
3142	LDPTR	[%g1], %g3		! Load list head
3143	STPTR	%g3, [%g5+IH_PEND]	! Link our intrhand node in
3144	mov	%g5, %g7
3145	CASPTR	[%g1] ASI_N, %g3, %g7
3146	cmp	%g7, %g3		! Did it work?
3147	bne,pn	CCCR, 1b		! No, try again
3148	 .empty
31492:
3150#ifdef NOT_DEBUG
3151	set	_C_LABEL(intrdebug), %g7
3152	ld	[%g7], %g7
3153	btst	INTRDEBUG_VECTOR, %g7
3154	bz,pt	%icc, 97f
3155	 nop
3156
3157	cmp	%g6, 0xa		! ignore clock interrupts?
3158	bz,pt	%icc, 97f
3159	 nop
3160
3161	STACKFRAME(-CC64FSZ)		! Get a clean register window
3162	LOAD_ASCIZ(%o0,\
3163	    "interrupt_vector: number %lx softint mask %lx pil %lu slot %p\r\n")
3164	mov	%g2, %o1
3165	rdpr	%pil, %o3
3166	mov	%g1, %o4
3167	GLOBTOLOC
3168	clr	%g4
3169	call	prom_printf
3170	 mov	%g6, %o2
3171	LOCTOGLOB
3172	restore
317397:
3174#endif
3175	mov	1, %g7
3176	sll	%g7, %g6, %g6
3177	wr	%g6, 0, SET_SOFTINT	! Invoke a softint
3178
3179	.global ret_from_intr_vector
3180ret_from_intr_vector:
3181	retry
3182	NOTREACHED
3183
31843:
3185#ifdef NOT_DEBUG	/* always do this */
3186	set	_C_LABEL(intrdebug), %g6
3187	ld	[%g6], %g6
3188	btst	INTRDEBUG_SPUR, %g6
3189	bz,pt	%icc, 97f
3190	 nop
3191#endif
3192#if 1
3193	STACKFRAME(-CC64FSZ)		! Get a clean register window
3194	LOAD_ASCIZ(%o0, "interrupt_vector: spurious vector %lx at pil %d\r\n")
3195	mov	%g7, %o1
3196	GLOBTOLOC
3197	clr	%g4
3198	call	prom_printf
3199	 rdpr	%pil, %o2
3200	LOCTOGLOB
3201	restore
320297:
3203#endif
3204	ba,a	ret_from_intr_vector
3205	 nop				! XXX spitfire bug?
3206
3207/*
3208 * Ultra1 and Ultra2 CPUs use soft interrupts for everything.  What we do
3209 * on a soft interrupt, is we should check which bits in ASR_SOFTINT(0x16)
3210 * are set, handle those interrupts, then clear them by setting the
3211 * appropriate bits in ASR_CLEAR_SOFTINT(0x15).
3212 *
3213 * We have an array of 8 interrupt vector slots for each of 15 interrupt
3214 * levels.  If a vectored interrupt can be dispatched, the dispatch
3215 * routine will place a pointer to an intrhand structure in one of
3216 * the slots.  The interrupt handler will go through the list to look
3217 * for an interrupt to dispatch.  If it finds one it will pull it off
3218 * the list, free the entry, and call the handler.  The code is like
3219 * this:
3220 *
3221 *	for (i=0; i<8; i++)
3222 *		if (ih = intrpending[intlev][i]) {
3223 *			intrpending[intlev][i] = NULL;
3224 *			if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
3225 *				return;
3226 *			strayintr(&frame);
3227 *			return;
3228 *		}
3229 *
3230 * Otherwise we go back to the old style of polled interrupts.
3231 *
3232 * After preliminary setup work, the interrupt is passed to each
3233 * registered handler in turn.  These are expected to return nonzero if
3234 * they took care of the interrupt.  If a handler claims the interrupt,
3235 * we exit (hardware interrupts are latched in the requestor so we'll
3236 * just take another interrupt in the unlikely event of simultaneous
3237 * interrupts from two different devices at the same level).  If we go
3238 * through all the registered handlers and no one claims it, we report a
3239 * stray interrupt.  This is more or less done as:
3240 *
3241 *	for (ih = intrhand[intlev]; ih; ih = ih->ih_next)
3242 *		if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
3243 *			return;
3244 *	strayintr(&frame);
3245 *
3246 * Inputs:
3247 *	%l0 = %tstate
3248 *	%l1 = return pc
3249 *	%l2 = return npc
3250 *	%l3 = interrupt level
3251 *	(software interrupt only) %l4 = bits to clear in interrupt register
3252 *
3253 * Internal:
3254 *	%l4, %l5: local variables
3255 *	%l6 = %y
3256 *	%l7 = %g1
3257 *	%g2..%g7 go to stack
3258 *
3259 * An interrupt frame is built in the space for a full trapframe;
3260 * this contains the psr, pc, npc, and interrupt level.
3261 *
3262 * The level of this interrupt is determined by:
3263 *
3264 *       IRQ# = %tt - 0x40
3265 */
3266
3267ENTRY_NOPROFILE(sparc_interrupt)
3268#ifdef TRAPS_USE_IG
3269	! This is for interrupt debugging
3270	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
3271#endif
3272	/*
3273	 * If this is a %tick or %stick softint, clear it then call
3274	 * interrupt_vector. Only one of them should be enabled at any given
3275	 * time.
3276	 */
3277	rd	SOFTINT, %g1
3278	set	0x10001, %g5
3279	andcc	%g5, %g1, %g5
3280	bz,pt	%icc, 0f
3281	 sethi	%hi(CPUINFO_VA+CI_TICK_IH), %g3
3282	wr	%g0, %g5, CLEAR_SOFTINT
3283	ba,pt	%icc, setup_sparcintr
3284	 LDPTR	[%g3 + %lo(CPUINFO_VA+CI_TICK_IH)], %g5
32850:
3286
3287#ifdef TRAPSTATS
3288	sethi	%hi(_C_LABEL(kintrcnt)), %g1
3289	sethi	%hi(_C_LABEL(uintrcnt)), %g2
3290	or	%g1, %lo(_C_LABEL(kintrcnt)), %g1
3291	or	%g1, %lo(_C_LABEL(uintrcnt)), %g2
3292	rdpr	%tl, %g3
3293	dec	%g3
3294	movrz	%g3, %g2, %g1
3295	lduw	[%g1], %g2
3296	inc	%g2
3297	stw	%g2, [%g1]
3298	/* See if we're on the interrupt stack already. */
3299	set	EINTSTACK, %g2
3300	set	(EINTSTACK-INTSTACK), %g1
3301	btst	1, %sp
3302	add	%sp, BIAS, %g3
3303	movz	%icc, %sp, %g3
3304	srl	%g3, 0, %g3
3305	sub	%g2, %g3, %g3
3306	cmp	%g3, %g1
3307	bgu	1f
3308	 set	_C_LABEL(intristk), %g1
3309	lduw	[%g1], %g2
3310	inc	%g2
3311	stw	%g2, [%g1]
33121:
3313#endif
3314	INTR_SETUP(-CC64FSZ-TF_SIZE)
3315	! Switch to normal globals so we can save them
3316	wrpr	%g0, PSTATE_KERN, %pstate
3317	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
3318	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
3319	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
3320	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
3321	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
3322	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
3323	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
3324
3325	/*
3326	 * In the EMBEDANY memory model %g4 points to the start of the
3327	 * data segment.  In our case we need to clear it before calling
3328	 * any C-code.
3329	 */
3330	clr	%g4
3331
3332	flushw			! Do not remove this insn -- causes interrupt loss
3333	rd	%y, %l6
3334	INCR64(CPUINFO_VA+CI_NINTR)	! cnt.v_ints++ (clobbers %o0,%o1)
3335	rdpr	%tt, %l5		! Find out our current IPL
3336	rdpr	%tstate, %l0
3337	rdpr	%tpc, %l1
3338	rdpr	%tnpc, %l2
3339	rdpr	%tl, %l3		! Dump our trap frame now we have taken the IRQ
3340	stw	%l6, [%sp + CC64FSZ + STKB + TF_Y]	! Silly, but we need to save this for rft
3341	dec	%l3
3342	wrpr	%g0, %l3, %tl
3343	sth	%l5, [%sp + CC64FSZ + STKB + TF_TT]! debug
3344	stx	%l0, [%sp + CC64FSZ + STKB + TF_TSTATE]	! set up intrframe/clockframe
3345	stx	%l1, [%sp + CC64FSZ + STKB + TF_PC]
3346	btst	TSTATE_PRIV, %l0		! User mode?
3347	stx	%l2, [%sp + CC64FSZ + STKB + TF_NPC]
3348
3349	sub	%l5, 0x40, %l6			! Convert to interrupt level
3350	sethi	%hi(_C_LABEL(intr_evcnts)), %l4
3351	stb	%l6, [%sp + CC64FSZ + STKB + TF_PIL]	! set up intrframe/clockframe
3352	rdpr	%pil, %o1
3353	mulx	%l6, EVC_SIZE, %l3
3354	or	%l4, %lo(_C_LABEL(intr_evcnts)), %l4	! intrcnt[intlev]++;
3355	stb	%o1, [%sp + CC64FSZ + STKB + TF_OLDPIL]	! old %pil
3356	ldx	[%l4 + %l3], %o0
3357	add	%l4, %l3, %l4
3358	clr	%l5			! Zero handled count
3359#ifdef MULTIPROCESSOR
3360	mov	1, %l3			! Ack softint
33611:	add	%o0, 1, %l7
3362	casxa	[%l4] ASI_N, %o0, %l7
3363	cmp	%o0, %l7
3364	bne,a,pn %xcc, 1b		! retry if changed
3365	 mov	%l7, %o0
3366#else
3367	inc	%o0
3368	mov	1, %l3			! Ack softint
3369	stx	%o0, [%l4]
3370#endif
3371	sll	%l3, %l6, %l3		! Generate IRQ mask
3372
3373	wrpr	%l6, %pil
3374
3375#define SOFTINT_INT \
3376	(1<<IPL_SOFTCLOCK|1<<IPL_SOFTBIO|1<<IPL_SOFTNET|1<<IPL_SOFTSERIAL)
3377
3378	! Increment the per-cpu interrupt depth in case of hardintrs
3379	btst	SOFTINT_INT, %l3
3380	bnz,pn	%icc, sparc_intr_retry
3381	 sethi	%hi(CPUINFO_VA+CI_IDEPTH), %l1
3382	ld	[%l1 + %lo(CPUINFO_VA+CI_IDEPTH)], %l2
3383	inc	%l2
3384	st	%l2, [%l1 + %lo(CPUINFO_VA+CI_IDEPTH)]
3385
3386sparc_intr_retry:
3387	wr	%l3, 0, CLEAR_SOFTINT	! (don't clear possible %tick IRQ)
3388	sethi	%hi(CPUINFO_VA+CI_INTRPENDING), %l4
3389	sll	%l6, PTRSHFT, %l2
3390	or	%l4, %lo(CPUINFO_VA+CI_INTRPENDING), %l4
3391	add	%l2, %l4, %l4
3392
33931:
3394	membar	#StoreLoad		! Make sure any failed casxa insns complete
3395	LDPTR	[%l4], %l2		! Check a slot
3396	cmp	%l2, -1
3397	beq,pn	CCCR, intrcmplt		! Empty list?
3398	 mov	-1, %l7
3399	membar	#LoadStore
3400	CASPTR	[%l4] ASI_N, %l2, %l7	! Grab the entire list
3401	cmp	%l7, %l2
3402	bne,pn	CCCR, 1b
3403	 add	%sp, CC64FSZ+STKB, %o2	! tf = %sp + CC64FSZ + STKB
3404	LDPTR	[%l2 + IH_PEND], %l7
3405	cmp	%l7, -1			! Last slot?
3406	be,pt	CCCR, 3f
3407	 membar	#LoadStore
3408
3409	/*
3410	 * Reverse a pending list since setup_sparcintr/send_softint
3411	 * makes it in a LIFO order.
3412	 */
3413	mov	-1, %o0			! prev = -1
34141:	STPTR	%o0, [%l2 + IH_PEND]	! ih->ih_pending = prev
3415	mov	%l2, %o0		! prev = ih
3416	mov	%l7, %l2		! ih = ih->ih_pending
3417	LDPTR	[%l2 + IH_PEND], %l7
3418	cmp	%l7, -1			! Last slot?
3419	bne,pn	CCCR, 1b
3420	 membar	#LoadStore
3421	ba,pt	CCCR, 3f
3422	 mov	%o0, %l7		! save ih->ih_pending
3423
34242:
3425	add	%sp, CC64FSZ+STKB, %o2	! tf = %sp + CC64FSZ + STKB
3426	LDPTR	[%l2 + IH_PEND], %l7	! save ih->ih_pending
3427	membar	#LoadStore
34283:
3429	STPTR	%g0, [%l2 + IH_PEND]	! Clear pending flag
3430	membar	#Sync
3431	LDPTR	[%l2 + IH_FUN], %o4	! ih->ih_fun
3432	LDPTR	[%l2 + IH_ARG], %o0	! ih->ih_arg
3433
3434#ifdef NOT_DEBUG
3435	set	_C_LABEL(intrdebug), %o3
3436	ld	[%o2], %o3
3437	btst	INTRDEBUG_FUNC, %o3
3438	bz,a,pt	%icc, 97f
3439	 nop
3440
3441	cmp	%l6, 0xa		! ignore clock interrupts?
3442	bz,pt	%icc, 97f
3443	 nop
3444
3445	STACKFRAME(-CC64FSZ)		! Get a clean register window
3446	LOAD_ASCIZ(%o0, "sparc_interrupt: func %p arg %p\r\n")
3447	mov	%i0, %o2		! arg
3448	GLOBTOLOC
3449	call	prom_printf
3450	 mov	%i4, %o1		! func
3451	LOCTOGLOB
3452	restore
345397:
3454	mov	%l4, %o1
3455#endif
3456
3457	wrpr	%g0, PSTATE_INTR, %pstate	! Reenable interrupts
3458	jmpl	%o4, %o7		! handled = (*ih->ih_fun)(...)
3459	 movrz	%o0, %o2, %o0		! arg = (arg == 0) ? arg : tf
3460	wrpr	%g0, PSTATE_KERN, %pstate	! Disable interrupts
3461	LDPTR	[%l2 + IH_CLR], %l1
3462	membar	#Sync
3463
3464	brz,pn	%l1, 0f
3465	 add	%l5, %o0, %l5
3466	stx	%g0, [%l1]		! Clear intr source
3467	membar	#Sync			! Should not be needed
34680:
3469	cmp	%l7, -1
3470	bne,pn	CCCR, 2b		! 'Nother?
3471	 mov	%l7, %l2
3472
3473intrcmplt:
3474	/*
3475	 * Re-read SOFTINT to see if any new  pending interrupts
3476	 * at this level.
3477	 */
3478	mov	1, %l3			! Ack softint
3479	rd	SOFTINT, %l7		! %l5 contains #intr handled.
3480	sll	%l3, %l6, %l3		! Generate IRQ mask
3481	btst	%l3, %l7		! leave mask in %l3 for retry code
3482	bnz,pn	%icc, sparc_intr_retry
3483	 mov	1, %l5			! initialize intr count for next run
3484
3485	! Decrement this cpu's interrupt depth in case of hardintrs
3486	btst	SOFTINT_INT, %l3
3487	bnz,pn	%icc, 1f
3488	 sethi	%hi(CPUINFO_VA+CI_IDEPTH), %l4
3489	ld	[%l4 + %lo(CPUINFO_VA+CI_IDEPTH)], %l5
3490	dec	%l5
3491	st	%l5, [%l4 + %lo(CPUINFO_VA+CI_IDEPTH)]
34921:
3493
3494#ifdef NOT_DEBUG
3495	set	_C_LABEL(intrdebug), %o2
3496	ld	[%o2], %o2
3497	btst	INTRDEBUG_FUNC, %o2
3498	bz,a,pt	%icc, 97f
3499	 nop
3500
3501	cmp	%l6, 0xa		! ignore clock interrupts?
3502	bz,pt	%icc, 97f
3503	 nop
3504
3505	STACKFRAME(-CC64FSZ)		! Get a clean register window
3506	LOAD_ASCIZ(%o0, "sparc_interrupt:  done\r\n")
3507	GLOBTOLOC
3508	call	prom_printf
3509	 nop
3510	LOCTOGLOB
3511	restore
351297:
3513#endif
3514
3515	ldub	[%sp + CC64FSZ + STKB + TF_OLDPIL], %l3	! restore old %pil
3516	wrpr	%l3, 0, %pil
3517
3518	ba,a,pt	%icc, return_from_trap
3519	 nop
3520
3521#ifdef notyet
3522/*
3523 * Level 12 (ZS serial) interrupt.  Handle it quickly, schedule a
3524 * software interrupt, and get out.  Do the software interrupt directly
3525 * if we would just take it on the way out.
3526 *
3527 * Input:
3528 *	%l0 = %psr
3529 *	%l1 = return pc
3530 *	%l2 = return npc
3531 * Internal:
3532 *	%l3 = zs device
3533 *	%l4, %l5 = temporary
3534 *	%l6 = rr3 (or temporary data) + 0x100 => need soft int
3535 *	%l7 = zs soft status
3536 */
3537zshard:
3538#endif /* notyet */
3539
3540	.globl	return_from_trap, rft_kernel, rft_user
3541	.globl	softtrap, slowtrap
3542
3543/*
3544 * Various return-from-trap routines (see return_from_trap).
3545 */
3546
3547/*
3548 * Return from trap.
3549 * registers are:
3550 *
3551 *	[%sp + CC64FSZ + STKB] => trap frame
3552 *
3553 * We must load all global, out, and trap registers from the trap frame.
3554 *
3555 * If returning to kernel, we should be at the proper trap level because
3556 * we don't touch %tl.
3557 *
3558 * When returning to user mode, the trap level does not matter, as it
3559 * will be set explicitly.
3560 *
3561 * If we are returning to user code, we must:
3562 *  1.  Check for register windows in the pcb that belong on the stack.
3563 *	If there are any, reload them
3564 */
3565return_from_trap:
3566#ifdef DEBUG
3567	!! Make sure we don't have pc == npc == 0 or we suck.
3568	ldx	[%sp + CC64FSZ + STKB + TF_PC], %g2
3569	ldx	[%sp + CC64FSZ + STKB + TF_NPC], %g3
3570	orcc	%g2, %g3, %g0
3571	tz	%icc, 1
3572#endif
3573
3574	!!
3575	!! We'll make sure we flush our pcb here, rather than later.
3576	!!
3577	ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1
3578	btst	TSTATE_PRIV, %g1			! returning to userland?
3579
3580	!!
3581	!! Let all pending interrupts drain before returning to userland
3582	!!
3583	bnz,pn	%icc, 1f				! Returning to userland?
3584	 nop
3585	wrpr	%g0, PSTATE_INTR, %pstate
3586	wrpr	%g0, %g0, %pil				! Lower IPL
35871:
3588	wrpr	%g0, PSTATE_KERN, %pstate		! Make sure we have normal globals & no IRQs
3589
3590	/* Restore normal globals */
3591	ldx	[%sp + CC64FSZ + STKB + TF_G + (1*8)], %g1
3592	ldx	[%sp + CC64FSZ + STKB + TF_G + (2*8)], %g2
3593	ldx	[%sp + CC64FSZ + STKB + TF_G + (3*8)], %g3
3594	ldx	[%sp + CC64FSZ + STKB + TF_G + (4*8)], %g4
3595	ldx	[%sp + CC64FSZ + STKB + TF_G + (5*8)], %g5
3596	ldx	[%sp + CC64FSZ + STKB + TF_G + (6*8)], %g6
3597	ldx	[%sp + CC64FSZ + STKB + TF_G + (7*8)], %g7
3598	/* Switch to alternate globals and load outs */
3599	wrpr	%g0, PSTATE_KERN|PSTATE_AG, %pstate
3600#ifdef TRAPS_USE_IG
3601	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
3602#endif
3603	ldx	[%sp + CC64FSZ + STKB + TF_O + (0*8)], %i0
3604	ldx	[%sp + CC64FSZ + STKB + TF_O + (1*8)], %i1
3605	ldx	[%sp + CC64FSZ + STKB + TF_O + (2*8)], %i2
3606	ldx	[%sp + CC64FSZ + STKB + TF_O + (3*8)], %i3
3607	ldx	[%sp + CC64FSZ + STKB + TF_O + (4*8)], %i4
3608	ldx	[%sp + CC64FSZ + STKB + TF_O + (5*8)], %i5
3609	ldx	[%sp + CC64FSZ + STKB + TF_O + (6*8)], %i6
3610	ldx	[%sp + CC64FSZ + STKB + TF_O + (7*8)], %i7
3611	/* Now load trap registers into alternate globals */
3612	ld	[%sp + CC64FSZ + STKB + TF_Y], %g4
3613	ldx	[%sp + CC64FSZ + STKB + TF_TSTATE], %g1		! load new values
3614	wr	%g4, 0, %y
3615	ldx	[%sp + CC64FSZ + STKB + TF_PC], %g2
3616	ldx	[%sp + CC64FSZ + STKB + TF_NPC], %g3
3617
3618#ifdef NOTDEF_DEBUG
3619	ldub	[%sp + CC64FSZ + STKB + TF_PIL], %g5		! restore %pil
3620	wrpr	%g5, %pil				! DEBUG
3621#endif
3622
3623	/* Returning to user mode or kernel mode? */
3624	btst	TSTATE_PRIV, %g1		! returning to userland?
3625	bz,pt	%icc, rft_user
3626	 sethi	%hi(CPUINFO_VA+CI_WANT_AST), %g7	! first instr of rft_user
3627
3628/*
3629 * Return from trap, to kernel.
3630 *
3631 * We will assume, for the moment, that all kernel traps are properly stacked
3632 * in the trap registers, so all we have to do is insert the (possibly modified)
3633 * register values into the trap registers then do a retry.
3634 *
3635 */
3636rft_kernel:
3637	rdpr	%tl, %g4				! Grab a set of trap registers
3638	inc	%g4
3639	wrpr	%g4, %g0, %tl
3640	wrpr	%g3, 0, %tnpc
3641	wrpr	%g2, 0, %tpc
3642	wrpr	%g1, 0, %tstate
3643	restore
3644	rdpr	%tstate, %g1			! Since we may have trapped our regs may be toast
3645	rdpr	%cwp, %g2
3646	andn	%g1, CWP, %g1
3647	wrpr	%g1, %g2, %tstate		! Put %cwp in %tstate
3648	CLRTT
3649#ifdef TRAPSTATS
3650	rdpr	%tl, %g2
3651	set	_C_LABEL(rftkcnt), %g1
3652	sllx	%g2, 2, %g2
3653	add	%g1, %g2, %g1
3654	lduw	[%g1], %g2
3655	inc	%g2
3656	stw	%g2, [%g1]
3657#endif
3658#if	0
3659	wrpr	%g0, 0, %cleanwin	! DEBUG
3660#endif
3661#if defined(DDB) && defined(MULTIPROCESSOR)
3662	set	sparc64_ipi_pause_trap_point, %g1
3663	rdpr	%tpc, %g2
3664	cmp	%g1, %g2
3665	bne,pt	%icc, 0f
3666	 nop
3667	done
36680:
3669#endif
3670	retry
3671	NOTREACHED
3672/*
3673 * Return from trap, to user.  Checks for scheduling trap (`ast') first;
3674 * will re-enter trap() if set.  Note that we may have to switch from
3675 * the interrupt stack to the kernel stack in this case.
3676 *	%g1 = %tstate
3677 *	%g2 = return %pc
3678 *	%g3 = return %npc
3679 * If returning to a valid window, just set psr and return.
3680 */
3681	.data
3682rft_wcnt:	.word 0
3683	.text
3684
3685rft_user:
3686!	sethi	%hi(CPUINFO_VA+CI_WANT_AST), %g7	! (done above)
3687	lduw	[%g7 + %lo(CPUINFO_VA+CI_WANT_AST)], %g7! want AST trap?
3688	brnz,pn	%g7, softtrap			! yes, re-enter trap with type T_AST
3689	 mov	T_AST, %g4
3690
3691#ifdef NOTDEF_DEBUG
3692	sethi	%hi(CPCB), %g4
3693	LDPTR	[%g4 + %lo(CPCB)], %g4
3694	ldub	[%g4 + PCB_NSAVED], %g4		! nsaved
3695	brz,pt	%g4, 2f		! Only print if nsaved <> 0
3696	 nop
3697
3698	set	1f, %o0
3699	mov	%g4, %o1
3700	mov	%g2, %o2			! pc
3701	wr	%g0, ASI_DMMU, %asi		! restore the user context
3702	ldxa	[CTX_SECONDARY] %asi, %o3	! ctx
3703	GLOBTOLOC
3704	mov	%g3, %o5
3705	call	printf
3706	 mov	%i6, %o4			! sp
3707!	wrpr	%g0, PSTATE_INTR, %pstate		! Allow IRQ service
3708!	wrpr	%g0, PSTATE_KERN, %pstate		! DenyIRQ service
3709	LOCTOGLOB
37101:
3711	.data
3712	.asciz	"rft_user: nsaved=%x pc=%d ctx=%x sp=%x npc=%p\n"
3713	_ALIGN
3714	.text
3715#endif
3716
3717	/*
3718	 * NB: only need to do this after a cache miss
3719	 */
3720#ifdef TRAPSTATS
3721	set	_C_LABEL(rftucnt), %g6
3722	lduw	[%g6], %g7
3723	inc	%g7
3724	stw	%g7, [%g6]
3725#endif
3726	/*
3727	 * Now check to see if any regs are saved in the pcb and restore them.
3728	 *
3729	 * Here we need to undo the damage caused by switching to a kernel
3730	 * stack.
3731	 *
3732	 * We will use alternate globals %g4..%g7 because %g1..%g3 are used
3733	 * by the data fault trap handlers and we don't want possible conflict.
3734	 */
3735
3736	sethi	%hi(CPCB), %g6
3737	rdpr	%otherwin, %g7			! restore register window controls
3738#ifdef DEBUG
3739	rdpr	%canrestore, %g5		! DEBUG
3740	tst	%g5				! DEBUG
3741	tnz	%icc, 1; nop			! DEBUG
3742!	mov	%g0, %g5			! There should be *NO* %canrestore
3743	add	%g7, %g5, %g7			! DEBUG
3744#endif
3745	wrpr	%g0, %g7, %canrestore
3746	LDPTR	[%g6 + %lo(CPCB)], %g6
3747	wrpr	%g0, 0, %otherwin
3748
3749	ldub	[%g6 + PCB_NSAVED], %g7		! Any saved reg windows?
3750	wrpr	%g0, WSTATE_USER, %wstate	! Need to know where our sp points
3751
3752#ifdef DEBUG
3753	set	rft_wcnt, %g4	! Keep track of all the windows we restored
3754	stw	%g7, [%g4]
3755#endif
3756
3757	brz,pt	%g7, 5f				! No saved reg wins
3758	 nop
3759	dec	%g7				! We can do this now or later.  Move to last entry
3760
3761#ifdef DEBUG
3762	rdpr	%canrestore, %g4			! DEBUG Make sure we've restored everything
3763	brnz,a,pn	%g4, 0f				! DEBUG
3764	 sir						! DEBUG we should NOT have any usable windows here
37650:							! DEBUG
3766	wrpr	%g0, 5, %tl
3767#endif
3768	rdpr	%otherwin, %g4
3769	sll	%g7, 7, %g5			! calculate ptr into rw64 array 8*16 == 128 or 7 bits
3770	brz,pt	%g4, 6f				! We should not have any user windows left
3771	 add	%g5, %g6, %g5
3772
3773	set	1f, %o0
3774	mov	%g7, %o1
3775	mov	%g4, %o2
3776	call	printf
3777	 wrpr	%g0, PSTATE_KERN, %pstate
3778	set	2f, %o0
3779	call	panic
3780	 nop
3781	NOTREACHED
3782	.data
37831:	.asciz	"pcb_nsaved=%x and otherwin=%x\n"
37842:	.asciz	"rft_user\n"
3785	_ALIGN
3786	.text
37876:
37883:
3789	restored					! Load in the window
3790	restore						! This should not trap!
3791	ldx	[%g5 + PCB_RW + ( 0*8)], %l0		! Load the window from the pcb
3792	ldx	[%g5 + PCB_RW + ( 1*8)], %l1
3793	ldx	[%g5 + PCB_RW + ( 2*8)], %l2
3794	ldx	[%g5 + PCB_RW + ( 3*8)], %l3
3795	ldx	[%g5 + PCB_RW + ( 4*8)], %l4
3796	ldx	[%g5 + PCB_RW + ( 5*8)], %l5
3797	ldx	[%g5 + PCB_RW + ( 6*8)], %l6
3798	ldx	[%g5 + PCB_RW + ( 7*8)], %l7
3799
3800	ldx	[%g5 + PCB_RW + ( 8*8)], %i0
3801	ldx	[%g5 + PCB_RW + ( 9*8)], %i1
3802	ldx	[%g5 + PCB_RW + (10*8)], %i2
3803	ldx	[%g5 + PCB_RW + (11*8)], %i3
3804	ldx	[%g5 + PCB_RW + (12*8)], %i4
3805	ldx	[%g5 + PCB_RW + (13*8)], %i5
3806	ldx	[%g5 + PCB_RW + (14*8)], %i6
3807	ldx	[%g5 + PCB_RW + (15*8)], %i7
3808
3809#ifdef DEBUG
3810	stx	%g0, [%g5 + PCB_RW + (14*8)]		! DEBUG mark that we've saved this one
3811#endif
3812
3813	cmp	%g5, %g6
3814	bgu,pt	%xcc, 3b				! Next one?
3815	 dec	8*16, %g5
3816
3817	rdpr	%ver, %g5
3818	stb	%g0, [%g6 + PCB_NSAVED]			! Clear them out so we won't do this again
3819	and	%g5, CWP, %g5
3820	add	%g5, %g7, %g4
3821	dec	1, %g5					! NWINDOWS-1-1
3822	wrpr	%g5, 0, %cansave
3823	wrpr	%g0, 0, %canrestore			! Make sure we have no freeloaders XXX
3824	wrpr	%g0, WSTATE_USER, %wstate		! Save things to user space
3825	mov	%g7, %g5				! We already did one restore
38264:
3827	rdpr	%canrestore, %g4
3828	inc	%g4
3829	deccc	%g5
3830	wrpr	%g4, 0, %cleanwin			! Make *sure* we don't trap to cleanwin
3831	bge,a,pt	%xcc, 4b				! return to starting regwin
3832	 save	%g0, %g0, %g0				! This may force a datafault
3833
3834#ifdef DEBUG
3835	wrpr	%g0, 0, %tl
3836#endif
3837#ifdef TRAPSTATS
3838	set	_C_LABEL(rftuld), %g5
3839	lduw	[%g5], %g4
3840	inc	%g4
3841	stw	%g4, [%g5]
3842#endif
3843	!!
3844	!! We can't take any save faults in here 'cause they will never be serviced
3845	!!
3846
3847#ifdef DEBUG
3848	sethi	%hi(CPCB), %g5
3849	LDPTR	[%g5 + %lo(CPCB)], %g5
3850	ldub	[%g5 + PCB_NSAVED], %g5		! Any saved reg windows?
3851	tst	%g5
3852	tnz	%icc, 1; nop			! Debugger if we still have saved windows
3853	bne,a	rft_user			! Try starting over again
3854	 sethi	%hi(CPUINFO_VA+CI_WANT_AST), %g7
3855#endif
3856	/*
3857	 * Set up our return trapframe so we can recover if we trap from here
3858	 * on in.
3859	 */
3860	wrpr	%g0, 1, %tl			! Set up the trap state
3861	wrpr	%g2, 0, %tpc
3862	wrpr	%g3, 0, %tnpc
3863	ba,pt	%icc, 6f
3864	 wrpr	%g1, %g0, %tstate
3865
38665:
3867	/*
3868	 * Set up our return trapframe so we can recover if we trap from here
3869	 * on in.
3870	 */
3871	wrpr	%g0, 1, %tl			! Set up the trap state
3872	wrpr	%g2, 0, %tpc
3873	wrpr	%g3, 0, %tnpc
3874	wrpr	%g1, %g0, %tstate
3875	restore
38766:
3877	rdpr	%canrestore, %g5
3878	wrpr	%g5, 0, %cleanwin			! Force cleanup of kernel windows
3879
3880#ifdef NOTDEF_DEBUG
3881	ldx	[%g6 + CC64FSZ + STKB + TF_L + (0*8)], %g5! DEBUG -- get proper value for %l0
3882	cmp	%l0, %g5
3883	be,a,pt %icc, 1f
3884	 nop
3885!	sir			! WATCHDOG
3886	set	badregs, %g1	! Save the suspect regs
3887	stw	%l0, [%g1+(4*0)]
3888	stw	%l1, [%g1+(4*1)]
3889	stw	%l2, [%g1+(4*2)]
3890	stw	%l3, [%g1+(4*3)]
3891	stw	%l4, [%g1+(4*4)]
3892	stw	%l5, [%g1+(4*5)]
3893	stw	%l6, [%g1+(4*6)]
3894	stw	%l7, [%g1+(4*7)]
3895	stw	%i0, [%g1+(4*8)+(4*0)]
3896	stw	%i1, [%g1+(4*8)+(4*1)]
3897	stw	%i2, [%g1+(4*8)+(4*2)]
3898	stw	%i3, [%g1+(4*8)+(4*3)]
3899	stw	%i4, [%g1+(4*8)+(4*4)]
3900	stw	%i5, [%g1+(4*8)+(4*5)]
3901	stw	%i6, [%g1+(4*8)+(4*6)]
3902	stw	%i7, [%g1+(4*8)+(4*7)]
3903	save
3904	inc	%g7
3905	wrpr	%g7, 0, %otherwin
3906	wrpr	%g0, 0, %canrestore
3907	wrpr	%g0, WSTATE_KERN, %wstate	! Need to know where our sp points
3908	set	rft_wcnt, %g4	! Restore nsaved before trapping
3909	sethi	%hi(CPCB), %g6
3910	LDPTR	[%g6 + %lo(CPCB)], %g6
3911	lduw	[%g4], %g4
3912	stb	%g4, [%g6 + PCB_NSAVED]
3913	ta	1
3914	sir
3915	.data
3916badregs:
3917	.space	16*4
3918	.text
39191:
3920#endif
3921
3922	rdpr	%tstate, %g1
3923	rdpr	%cwp, %g7			! Find our cur window
3924	andn	%g1, CWP, %g1			! Clear it from %tstate
3925	wrpr	%g1, %g7, %tstate		! Set %tstate with %cwp
3926
3927	wr	%g0, ASI_DMMU, %asi		! restore the user context
3928	ldxa	[CTX_SECONDARY] %asi, %g4
3929	sethi	%hi(KERNBASE), %g7		! Should not be needed due to retry
3930	stxa	%g4, [CTX_PRIMARY] %asi
3931	membar	#Sync				! Should not be needed due to retry
3932	flush	%g7				! Should not be needed due to retry
3933	CLRTT
3934#ifdef TRAPSTATS
3935	set	_C_LABEL(rftudone), %g1
3936	lduw	[%g1], %g2
3937	inc	%g2
3938	stw	%g2, [%g1]
3939#endif
3940#ifdef DEBUG
3941	sethi	%hi(CPCB), %g5
3942	LDPTR	[%g5 + %lo(CPCB)], %g5
3943	ldub	[%g5 + PCB_NSAVED], %g5		! Any saved reg windows?
3944	tst	%g5
3945	tnz	%icc, 1; nop			! Debugger if we still have saved windows!
3946#endif
3947	wrpr	%g0, 0, %pil			! Enable all interrupts
3948	retry
3949
3950! exported end marker for kernel gdb
3951	.globl	_C_LABEL(endtrapcode)
3952_C_LABEL(endtrapcode):
3953
3954/*
3955 * Kernel entry point.
3956 *
3957 * The contract between bootloader and kernel is:
3958 *
3959 * %o0		OpenFirmware entry point, to keep Sun's updaters happy
3960 * %o1		Address of boot information vector (see bootinfo.h)
3961 * %o2		Length of the vector, in bytes
3962 * %o3		OpenFirmware entry point, to mimic Sun bootloader behavior
3963 * %o4		OpenFirmware, to meet earlier NetBSD kernels expectations
3964 */
3965	.align	8
3966start:
3967dostart:
3968	mov	1, %g1
3969	sllx	%g1, 63, %g1
3970	wr	%g1, TICK_CMPR	! XXXXXXX clear and disable %tick_cmpr for now
3971	/*
3972	 * Startup.
3973	 *
3974	 * The Sun FCODE bootloader is nice and loads us where we want
3975	 * to be.  We have a full set of mappings already set up for us.
3976	 *
3977	 * I think we end up having an entire 16M allocated to us.
3978	 *
3979	 * We enter with the prom entry vector in %o0, dvec in %o1,
3980	 * and the bootops vector in %o2.
3981	 *
3982	 * All we need to do is:
3983	 *
3984	 *	1:	Save the prom vector
3985	 *
3986	 *	2:	Create a decent stack for ourselves
3987	 *
3988	 *	3:	Install the permanent 4MB kernel mapping
3989	 *
3990	 *	4:	Call the C language initialization code
3991	 *
3992	 */
3993
3994	/*
3995	 * Set the psr into a known state:
3996	 * Set supervisor mode, interrupt level >= 13, traps enabled
3997	 */
3998	wrpr	%g0, 13, %pil
3999	wrpr	%g0, PSTATE_INTR|PSTATE_PEF, %pstate
4000	wr	%g0, FPRS_FEF, %fprs		! Turn on FPU
4001
4002	/*
4003	 * Step 2: Set up a v8-like stack if we need to
4004	 */
4005
4006#ifdef _LP64
4007	btst	1, %sp
4008	bnz,pt	%icc, 0f
4009	 nop
4010	add	%sp, -BIAS, %sp
4011#else
4012	btst	1, %sp
4013	bz,pt	%icc, 0f
4014	 nop
4015	add	%sp, BIAS, %sp
4016#endif
40170:
4018
4019	call	_C_LABEL(bootstrap)
4020	 clr	%g4				! Clear data segment pointer
4021
4022/*
4023 * Initialize the boot CPU.  Basically:
4024 *
4025 *	Locate the cpu_info structure for this CPU.
4026 *	Establish a locked mapping for interrupt stack.
4027 *	Switch to the initial stack.
4028 *	Call the routine passed in in cpu_info->ci_spinup
4029 */
4030
4031#ifdef NO_VCACHE
4032#define	TTE_DATABITS	TTE_L|TTE_CP|TTE_P|TTE_W
4033#else
4034#define	TTE_DATABITS	TTE_L|TTE_CP|TTE_CV|TTE_P|TTE_W
4035#endif
4036
4037
4038ENTRY_NOPROFILE(cpu_initialize)	/* for cosmetic reasons - nicer backtrace */
4039	/*
4040	 * Step 5: is no more.
4041	 */
4042
4043	/*
4044	 * Step 6: hunt through cpus list and find the one that
4045	 * matches our UPAID.
4046	 */
4047	sethi	%hi(_C_LABEL(cpus)), %l1
4048	ldxa	[%g0] ASI_MID_REG, %l2
4049	LDPTR	[%l1 + %lo(_C_LABEL(cpus))], %l1
4050	srax	%l2, 17, %l2			! Isolate UPAID from CPU reg
4051	and	%l2, 0x1f, %l2
40520:
4053	ld	[%l1 + CI_UPAID], %l3		! Load UPAID
4054	cmp	%l3, %l2			! Does it match?
4055	bne,a,pt	%icc, 0b		! no
4056	 LDPTR	[%l1 + CI_NEXT], %l1		! Load next cpu_info pointer
4057
4058
4059	/*
4060	 * Get pointer to our cpu_info struct
4061	 */
4062	mov	%l1, %l7			! save cpu_info pointer
4063	ldx	[%l1 + CI_PADDR], %l1		! Load the interrupt stack's PA
4064
4065	sethi	%hi(0xa0000000), %l2		! V=1|SZ=01|NFO=0|IE=0
4066	sllx	%l2, 32, %l2			! Shift it into place
4067
4068	mov	-1, %l3				! Create a nice mask
4069	sllx	%l3, 43, %l4			! Mask off high bits
4070	or	%l4, 0xfff, %l4			! We can just load this in 12 (of 13) bits
4071
4072	andn	%l1, %l4, %l1			! Mask the phys page number
4073
4074	or	%l2, %l1, %l1			! Now take care of the high bits
4075	or	%l1, TTE_DATABITS, %l2		! And low bits:	L=1|CP=1|CV=?|E=0|P=1|W=1|G=0
4076
4077	!!
4078	!!  Now, map in the interrupt stack as context==0
4079	!!
4080	set	TLB_TAG_ACCESS, %l5
4081	set	INTSTACK, %l0
4082	stxa	%l0, [%l5] ASI_DMMU		! Make DMMU point to it
4083	stxa	%l2, [%g0] ASI_DMMU_DATA_IN	! Store it
4084	membar	#Sync
4085
4086	!! Setup kernel stack (we rely on curlwp on this cpu
4087	!! being lwp0 here and it's uarea is mapped special
4088	!! and already accessible here)
4089	flushw
4090	LDPTR	[%l7 + CI_CPCB], %l0		! load PCB/uarea pointer
4091	set	USPACE - TF_SIZE - CC64FSZ, %l1
4092 	add	%l1, %l0, %l0
4093#ifdef _LP64
4094	andn	%l0, 0x0f, %l0			! Needs to be 16-byte aligned
4095	sub	%l0, BIAS, %l0			! and biased
4096#endif
4097	mov	%l0, %sp
4098	flushw
4099
4100#ifdef DEBUG
4101	set	_C_LABEL(pmapdebug), %o1
4102	ld	[%o1], %o1
4103	sethi	%hi(0x40000), %o2
4104	btst	%o2, %o1
4105	bz	0f
4106
4107	set	1f, %o0		! Debug printf
4108	call	_C_LABEL(prom_printf)
4109	.data
41101:
4111	.asciz	"Setting trap base...\r\n"
4112	_ALIGN
4113	.text
41140:
4115#endif
4116	/*
4117	 * Step 7: change the trap base register, and install our TSB pointers
4118	 */
4119
4120	/*
4121	 * install our TSB pointers
4122	 */
4123	sethi	%hi(_C_LABEL(tsbsize)), %l2
4124	sethi	%hi(0x1fff), %l3
4125	sethi	%hi(TSB), %l4
4126	LDPTR	[%l7 + CI_TSB_DMMU], %l0
4127	LDPTR	[%l7 + CI_TSB_IMMU], %l1
4128	ld	[%l2 + %lo(_C_LABEL(tsbsize))], %l2
4129	or	%l3, %lo(0x1fff), %l3
4130	or	%l4, %lo(TSB), %l4
4131
4132	andn	%l0, %l3, %l0			! Mask off size and split bits
4133	or	%l0, %l2, %l0			! Make a TSB pointer
4134	stxa	%l0, [%l4] ASI_DMMU		! Install data TSB pointer
4135
4136	andn	%l1, %l3, %l1			! Mask off size and split bits
4137	or	%l1, %l2, %l1			! Make a TSB pointer
4138	stxa	%l1, [%l4] ASI_IMMU		! Install instruction TSB pointer
4139	membar	#Sync
4140	set	1f, %l1
4141	flush	%l1
41421:
4143
4144	/* set trap table */
4145	set	_C_LABEL(trapbase), %l1
4146	call	_C_LABEL(prom_set_trap_table)	! Now we should be running 100% from our handlers
4147	 mov	%l1, %o0
4148	wrpr	%l1, 0, %tba			! Make sure the PROM didn't foul up.
4149
4150	/*
4151	 * Switch to the kernel mode and run away.
4152	 */
4153	wrpr	%g0, WSTATE_KERN, %wstate
4154
4155#ifdef DEBUG
4156	wrpr	%g0, 1, %tl			! Debug -- start at tl==3 so we'll watchdog
4157	wrpr	%g0, 0x1ff, %tt			! Debug -- clear out unused trap regs
4158	wrpr	%g0, 0, %tpc
4159	wrpr	%g0, 0, %tnpc
4160	wrpr	%g0, 0, %tstate
4161	wrpr	%g0, 0, %tl
4162#endif
4163
4164#ifdef DEBUG
4165	set	_C_LABEL(pmapdebug), %o1
4166	ld	[%o1], %o1
4167	sethi	%hi(0x40000), %o2
4168	btst	%o2, %o1
4169	bz	0f
4170
4171	LDPTR	[%l7 + CI_SPINUP], %o1
4172	set	1f, %o0		! Debug printf
4173	call	_C_LABEL(prom_printf)
4174	 mov	%sp, %o2
4175
4176	.data
41771:
4178	.asciz	"Calling startup routine %p with stack at %p...\r\n"
4179	_ALIGN
4180	.text
41810:
4182#endif
4183	/*
4184	 * Call our startup routine.
4185	 */
4186
4187	LDPTR	[%l7 + CI_SPINUP], %o1
4188
4189	call	%o1				! Call routine
4190	 clr	%o0				! our frame arg is ignored
4191
4192	set	1f, %o0				! Main should never come back here
4193	call	_C_LABEL(panic)
4194	 nop
4195	.data
41961:
4197	.asciz	"main() returned\r\n"
4198	_ALIGN
4199	.text
4200
4201	.align 8
4202ENTRY(get_romtba)
4203	retl
4204	 rdpr	%tba, %o0
4205
4206#ifdef MULTIPROCESSOR
4207	/*
4208	 * cpu_mp_startup is called with:
4209	 *
4210	 *	%g2 = cpu_args
4211	 */
4212ENTRY(cpu_mp_startup)
4213	mov	1, %o0
4214	sllx	%o0, 63, %o0
4215	wr	%o0, TICK_CMPR	! XXXXXXX clear and disable %tick_cmpr for now
4216	wrpr    %g0, 0, %cleanwin
4217	wrpr	%g0, 0, %tl			! Make sure we're not in NUCLEUS mode
4218	wrpr	%g0, WSTATE_KERN, %wstate
4219	wrpr	%g0, PSTATE_KERN, %pstate
4220	flushw
4221
4222	/*
4223	 * Get pointer to our cpu_info struct
4224	 */
4225	ldx	[%g2 + CBA_CPUINFO], %l1	! Load the interrupt stack's PA
4226	sethi	%hi(0xa0000000), %l2		! V=1|SZ=01|NFO=0|IE=0
4227	sllx	%l2, 32, %l2			! Shift it into place
4228	mov	-1, %l3				! Create a nice mask
4229	sllx	%l3, 43, %l4			! Mask off high bits
4230	or	%l4, 0xfff, %l4			! We can just load this in 12 (of 13) bits
4231	andn	%l1, %l4, %l1			! Mask the phys page number
4232	or	%l2, %l1, %l1			! Now take care of the high bits
4233	or	%l1, TTE_DATABITS, %l2		! And low bits:	L=1|CP=1|CV=?|E=0|P=1|W=1|G=0
4234
4235	/*
4236	 *  Now, map in the interrupt stack & cpu_info as context==0
4237	 */
4238	set	TLB_TAG_ACCESS, %l5
4239	set	INTSTACK, %l0
4240	stxa	%l0, [%l5] ASI_DMMU		! Make DMMU point to it
4241	stxa	%l2, [%g0] ASI_DMMU_DATA_IN	! Store it
4242
4243	/*
4244	 * Set 0 as primary context XXX
4245	 */
4246	mov	CTX_PRIMARY, %o0
4247	stxa	%g0, [%o0] ASI_DMMU
4248	membar	#Sync
4249
4250	/*
4251	 * Temporarily use the interrupt stack
4252	 */
4253#ifdef _LP64
4254	set	((EINTSTACK - CC64FSZ - TF_SIZE)) & ~0x0f - BIAS, %sp
4255#else
4256	set	EINTSTACK - CC64FSZ - TF_SIZE, %sp
4257#endif
4258	set	1, %fp
4259	clr	%i7
4260
4261	/*
4262	 * install our TSB pointers
4263	 */
4264	sethi	%hi(CPUINFO_VA+CI_TSB_DMMU), %l0
4265	sethi	%hi(CPUINFO_VA+CI_TSB_IMMU), %l1
4266	sethi	%hi(_C_LABEL(tsbsize)), %l2
4267	sethi	%hi(0x1fff), %l3
4268	sethi	%hi(TSB), %l4
4269	LDPTR	[%l0 + %lo(CPUINFO_VA+CI_TSB_DMMU)], %l0
4270	LDPTR	[%l1 + %lo(CPUINFO_VA+CI_TSB_IMMU)], %l1
4271	ld	[%l2 + %lo(_C_LABEL(tsbsize))], %l2
4272	or	%l3, %lo(0x1fff), %l3
4273	or	%l4, %lo(TSB), %l4
4274
4275	andn	%l0, %l3, %l0			! Mask off size and split bits
4276	or	%l0, %l2, %l0			! Make a TSB pointer
4277	stxa	%l0, [%l4] ASI_DMMU		! Install data TSB pointer
4278	membar	#Sync
4279
4280	andn	%l1, %l3, %l1			! Mask off size and split bits
4281	or	%l1, %l2, %l1			! Make a TSB pointer
4282	stxa	%l1, [%l4] ASI_IMMU		! Install instruction TSB pointer
4283	membar	#Sync
4284	set	1f, %o0
4285	flush	%o0
42861:
4287
4288	/* set trap table */
4289	set	_C_LABEL(trapbase), %l1
4290	call	_C_LABEL(prom_set_trap_table)
4291	 mov	%l1, %o0
4292	wrpr	%l1, 0, %tba			! Make sure the PROM didn't
4293						! foul up.
4294	/*
4295	 * Use this CPUs idlelewp's uarea stack
4296	 */
4297	sethi	%hi(CPUINFO_VA+CI_IDLELWP), %l0
4298	LDPTR	[%l0 + %lo(CPUINFO_VA+CI_IDLELWP)], %l0
4299	set	USPACE - TF_SIZE - CC64FSZ, %l1
4300	LDPTR	[%l0 + L_PCB], %l0
4301	add	%l0, %l1, %l0
4302#ifdef _LP64
4303	andn	%l0, 0x0f, %l0			! Needs to be 16-byte aligned
4304	sub	%l0, BIAS, %l0			! and biased
4305#endif
4306	mov	%l0, %sp
4307	flushw
4308
4309	/*
4310	 * Switch to the kernel mode and run away.
4311	 */
4312	wrpr	%g0, 13, %pil
4313	wrpr	%g0, PSTATE_INTR|PSTATE_PEF, %pstate
4314	wr	%g0, FPRS_FEF, %fprs			! Turn on FPU
4315
4316	call	_C_LABEL(cpu_hatch)
4317	 clr %g4
4318
4319	b	_C_LABEL(idle_loop)
4320	 clr	%o0
4321
4322	NOTREACHED
4323
4324	.globl cpu_mp_startup_end
4325cpu_mp_startup_end:
4326#endif
4327
4328/*
4329 * openfirmware(cell* param);
4330 *
4331 * OpenFirmware entry point
4332 *
4333 * If we're running in 32-bit mode we need to convert to a 64-bit stack
4334 * and 64-bit cells.  The cells we'll allocate off the stack for simplicity.
4335 */
4336	.align 8
4337ENTRY(openfirmware)
4338	sethi	%hi(romp), %o4
4339	andcc	%sp, 1, %g0
4340	bz,pt	%icc, 1f
4341	 LDPTR	[%o4+%lo(romp)], %o4		! v9 stack, just load the addr and callit
4342	save	%sp, -CC64FSZ, %sp
4343	rdpr	%pil, %i2
4344	mov	PIL_HIGH, %i3
4345	cmp	%i3, %i2
4346	movle	%icc, %i2, %i3
4347	wrpr	%g0, %i3, %pil
4348	mov	%i0, %o0
4349	mov	%g1, %l1
4350	mov	%g2, %l2
4351	mov	%g3, %l3
4352	mov	%g4, %l4
4353	mov	%g5, %l5
4354	mov	%g6, %l6
4355	mov	%g7, %l7
4356	rdpr	%pstate, %l0
4357	jmpl	%i4, %o7
4358#if !defined(_LP64)
4359	 wrpr	%g0, PSTATE_PROM, %pstate
4360#else
4361	 wrpr	%g0, PSTATE_PROM|PSTATE_IE, %pstate
4362#endif
4363	wrpr	%l0, %g0, %pstate
4364	mov	%l1, %g1
4365	mov	%l2, %g2
4366	mov	%l3, %g3
4367	mov	%l4, %g4
4368	mov	%l5, %g5
4369	mov	%l6, %g6
4370	mov	%l7, %g7
4371	wrpr	%i2, 0, %pil
4372	ret
4373	 restore	%o0, %g0, %o0
4374
43751:	! v8 -- need to screw with stack & params
4376#ifdef NOTDEF_DEBUG
4377	mov	%o7, %o5
4378	call	globreg_check
4379	 nop
4380	mov	%o5, %o7
4381#endif
4382	save	%sp, -CC64FSZ, %sp		! Get a new 64-bit stack frame
4383	add	%sp, -BIAS, %sp
4384	rdpr	%pstate, %l0
4385	srl	%sp, 0, %sp
4386	rdpr	%pil, %i2	! s = splx(level)
4387	mov	%i0, %o0
4388	mov	PIL_HIGH, %i3
4389	mov	%g1, %l1
4390	mov	%g2, %l2
4391	cmp	%i3, %i2
4392	mov	%g3, %l3
4393	mov	%g4, %l4
4394	mov	%g5, %l5
4395	movle	%icc, %i2, %i3
4396	mov	%g6, %l6
4397	mov	%g7, %l7
4398	wrpr	%i3, %g0, %pil
4399	jmpl	%i4, %o7
4400	! Enable 64-bit addresses for the prom
4401#if defined(_LP64)
4402	 wrpr	%g0, PSTATE_PROM, %pstate
4403#else
4404	 wrpr	%g0, PSTATE_PROM|PSTATE_IE, %pstate
4405#endif
4406	wrpr	%l0, 0, %pstate
4407	wrpr	%i2, 0, %pil
4408	mov	%l1, %g1
4409	mov	%l2, %g2
4410	mov	%l3, %g3
4411	mov	%l4, %g4
4412	mov	%l5, %g5
4413	mov	%l6, %g6
4414	mov	%l7, %g7
4415	ret
4416	 restore	%o0, %g0, %o0
4417
4418/*
4419 * void ofw_exit(cell_t args[])
4420 */
4421ENTRY(openfirmware_exit)
4422	STACKFRAME(-CC64FSZ)
4423	flushw					! Flush register windows
4424
4425	wrpr	%g0, PIL_HIGH, %pil		! Disable interrupts
4426	sethi	%hi(romtba), %l5
4427	LDPTR	[%l5 + %lo(romtba)], %l5
4428	wrpr	%l5, 0, %tba			! restore the ofw trap table
4429
4430	/* Arrange locked kernel stack as PROM stack */
4431	set	EINTSTACK  - CC64FSZ, %l5
4432
4433	andn	%l5, 0x0f, %l5			! Needs to be 16-byte aligned
4434	sub	%l5, BIAS, %l5			! and biased
4435	mov	%l5, %sp
4436	flushw
4437
4438	sethi	%hi(romp), %l6
4439	LDPTR	[%l6 + %lo(romp)], %l6
4440
4441	mov     CTX_PRIMARY, %l3		! set context 0
4442	stxa    %g0, [%l3] ASI_DMMU
4443	membar	#Sync
4444
4445	wrpr	%g0, PSTATE_PROM, %pstate	! Disable interrupts
4446						! and enable 64-bit addresses
4447	wrpr	%g0, 0, %tl			! force trap level 0
4448	call	%l6
4449	 mov	%i0, %o0
4450	NOTREACHED
4451
4452/*
4453 * sp_tlb_flush_pte_us(vaddr_t va, int ctx)
4454 * sp_tlb_flush_pte_usiii(vaddr_t va, int ctx)
4455 *
4456 * Flush tte from both IMMU and DMMU.
4457 *
4458 * This uses %o0-%o5
4459 */
4460	.align 8
4461ENTRY(sp_tlb_flush_pte_us)
4462#ifdef DEBUG
4463	set	pmapdebug, %o3
4464	lduw	[%o3], %o3
4465!	movrz	%o1, -1, %o3				! Print on either pmapdebug & PDB_DEMAP or ctx == 0
4466	btst	0x0020, %o3
4467	bz,pt	%icc, 2f
4468	 nop
4469	save	%sp, -CC64FSZ, %sp
4470	set	1f, %o0
4471	mov	%i1, %o1
4472	andn	%i0, 0xfff, %o3
4473	or	%o3, 0x010, %o3
4474	call	_C_LABEL(printf)
4475	 mov	%i0, %o2
4476	restore
4477	.data
44781:
4479	.asciz	"sp_tlb_flush_pte_us:	demap ctx=%x va=%08x res=%x\r\n"
4480	_ALIGN
4481	.text
44822:
4483#endif
4484#ifdef MULTIPROCESSOR
4485	rdpr	%pstate, %o3
4486	andn	%o3, PSTATE_IE, %o4			! disable interrupts
4487	wrpr	%o4, 0, %pstate
4488#endif
4489	srlx	%o0, PG_SHIFT4U, %o0			! drop unused va bits
4490	mov	CTX_SECONDARY, %o2
4491	sllx	%o0, PG_SHIFT4U, %o0
4492	ldxa	[%o2] ASI_DMMU, %o5			! Save secondary context
4493	sethi	%hi(KERNBASE), %o4
4494	membar	#LoadStore
4495	stxa	%o1, [%o2] ASI_DMMU			! Insert context to demap
4496	membar	#Sync
4497	or	%o0, DEMAP_PAGE_SECONDARY, %o0		! Demap page from secondary context only
4498	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
4499	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! to both TLBs
4500#ifdef TLB_FLUSH_LOWVA
4501	srl	%o0, 0, %o0				! and make sure it's both 32- and 64-bit entries
4502	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
4503	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! Do the demap
4504#endif
4505	flush	%o4
4506	stxa	%o5, [%o2] ASI_DMMU			! Restore secondary context
4507	membar	#Sync
4508	retl
4509#ifdef MULTIPROCESSOR
4510	 wrpr	%o3, %pstate				! restore interrupts
4511#else
4512	 nop
4513#endif
4514
4515ENTRY(sp_tlb_flush_pte_usiii)
4516#ifdef DEBUG
4517	set	pmapdebug, %o3
4518	lduw	[%o3], %o3
4519!	movrz	%o1, -1, %o3				! Print on either pmapdebug & PDB_DEMAP or ctx == 0
4520	btst	0x0020, %o3
4521	bz,pt	%icc, 2f
4522	 nop
4523	save	%sp, -CC64FSZ, %sp
4524	set	1f, %o0
4525	mov	%i1, %o1
4526	andn	%i0, 0xfff, %o3
4527	or	%o3, 0x010, %o3
4528	call	_C_LABEL(printf)
4529	 mov	%i0, %o2
4530	restore
4531	.data
45321:
4533	.asciz	"sp_tlb_flush_pte_usiii:	demap ctx=%x va=%08x res=%x\r\n"
4534	_ALIGN
4535	.text
45362:
4537#endif
4538	! %o0 = VA [in]
4539	! %o1 = ctx value [in] / KERNBASE
4540	! %o2 = CTX_PRIMARY
4541	! %o3 = saved %tl
4542	! %o4 = saved %pstate
4543	! %o5 = saved primary ctx
4544
4545	! Need this for UP as well
4546	rdpr	%pstate, %o4
4547	andn	%o4, PSTATE_IE, %o3			! disable interrupts
4548	wrpr	%o3, 0, %pstate
4549
4550	!!
4551	!! Cheetahs do not support flushing the IMMU from secondary context
4552	!!
4553	rdpr	%tl, %o3
4554	mov	CTX_PRIMARY, %o2
4555	brnz,pt	%o3, 1f
4556	 andn	%o0, 0xfff, %o0				! drop unused va bits
4557	wrpr	%g0, 1, %tl				! Make sure we're NUCLEUS
45581:
4559	ldxa	[%o2] ASI_DMMU, %o5			! Save primary context
4560	membar	#LoadStore
4561	stxa	%o1, [%o2] ASI_DMMU			! Insert context to demap
4562	sethi	%hi(KERNBASE), %o1
4563	membar	#Sync
4564	or	%o0, DEMAP_PAGE_PRIMARY, %o0
4565	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
4566	membar	#Sync
4567	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! to both TLBs
4568	membar	#Sync
4569#ifdef TLB_FLUSH_LOWVA
4570	srl	%o0, 0, %o0				! and make sure it's both 32- and 64-bit entries
4571	stxa	%o0, [%o0] ASI_DMMU_DEMAP		! Do the demap
4572	membar	#Sync
4573	stxa	%o0, [%o0] ASI_IMMU_DEMAP		! Do the demap
4574	membar	#Sync
4575#endif
4576	flush	%o1
4577	stxa	%o5, [%o2] ASI_DMMU			! Restore primary context
4578	membar	#Sync
4579	brnz,pt	%o3, 1f
4580	 flush	%o1
4581	wrpr	%g0, %o3, %tl				! Return to kernel mode.
45821:
4583	retl
4584	 wrpr	%o4, %pstate				! restore interrupts
4585
4586
4587/*
4588 * sp_tlb_flush_all_us(void)
4589 * sp_tlb_flush_all_usiii(void)
4590 *
4591 * Flush all user TLB entries from both IMMU and DMMU.
4592 * We have both UltraSPARC I+II, and UltraSPARC >=III versions.
4593 */
4594	.align 8
4595ENTRY(sp_tlb_flush_all_us)
4596	rdpr	%pstate, %o3
4597	andn	%o3, PSTATE_IE, %o4			! disable interrupts
4598	wrpr	%o4, 0, %pstate
4599	set	((TLB_SIZE_SPITFIRE-1) * 8), %o0
4600	set	CTX_SECONDARY, %o4
4601	ldxa	[%o4] ASI_DMMU, %o4			! save secondary context
4602	set	CTX_MASK, %o5
4603	membar	#Sync
4604
4605	! %o0 = loop counter
4606	! %o1 = ctx value
4607	! %o2 = TLB tag value
4608	! %o3 = saved %pstate
4609	! %o4 = saved primary ctx
4610	! %o5 = CTX_MASK
4611	! %xx = saved %tl
4612
46130:
4614	ldxa	[%o0] ASI_DMMU_TLB_TAG, %o2		! fetch the TLB tag
4615	andcc	%o2, %o5, %o1				! context 0?
4616	bz,pt	%xcc, 1f				! if so, skip
4617	 mov	CTX_SECONDARY, %o2
4618
4619	stxa	%o1, [%o2] ASI_DMMU			! set the context
4620	set	DEMAP_CTX_SECONDARY, %o2
4621	membar	#Sync
4622	stxa	%o2, [%o2] ASI_DMMU_DEMAP		! do the demap
4623	membar	#Sync
4624
46251:
4626	dec	8, %o0
4627	brgz,pt %o0, 0b					! loop over all entries
4628	 nop
4629
4630/*
4631 * now do the IMMU
4632 */
4633
4634	set	((TLB_SIZE_SPITFIRE-1) * 8), %o0
4635
46360:
4637	ldxa	[%o0] ASI_IMMU_TLB_TAG, %o2		! fetch the TLB tag
4638	andcc	%o2, %o5, %o1				! context 0?
4639	bz,pt	%xcc, 1f				! if so, skip
4640	 mov	CTX_SECONDARY, %o2
4641
4642	stxa	%o1, [%o2] ASI_DMMU			! set the context
4643	set	DEMAP_CTX_SECONDARY, %o2
4644	membar	#Sync
4645	stxa	%o2, [%o2] ASI_IMMU_DEMAP		! do the demap
4646	membar	#Sync
4647
46481:
4649	dec	8, %o0
4650	brgz,pt %o0, 0b					! loop over all entries
4651	 nop
4652
4653	set	CTX_SECONDARY, %o2
4654	stxa	%o4, [%o2] ASI_DMMU			! restore secondary ctx
4655	sethi	%hi(KERNBASE), %o4
4656	membar	#Sync
4657	flush	%o4
4658	retl
4659	 wrpr	%o3, %pstate
4660
4661	.align 8
4662ENTRY(sp_tlb_flush_all_usiii)
4663	rdpr	%tl, %o5
4664	brnz,pt	%o5, 1f
4665	 set	DEMAP_ALL, %o2
4666	wrpr	1, %tl
46671:
4668	rdpr	%pstate, %o3
4669	andn	%o3, PSTATE_IE, %o4			! disable interrupts
4670	wrpr	%o4, 0, %pstate
4671
4672	stxa	%o2, [%o2] ASI_IMMU_DEMAP
4673	membar	#Sync
4674	stxa	%o2, [%o2] ASI_DMMU_DEMAP
4675
4676	sethi	%hi(KERNBASE), %o4
4677	membar	#Sync
4678	flush	%o4
4679
4680	wrpr	%o5, %tl
4681	retl
4682	 wrpr	%o3, %pstate
4683
4684/*
4685 * sp_blast_dcache(int dcache_size, int dcache_line_size)
4686 *
4687 * Clear out all of D$ regardless of contents
4688 */
4689	.align 8
4690ENTRY(sp_blast_dcache)
4691/*
4692 * We turn off interrupts for the duration to prevent RED exceptions.
4693 */
4694#ifdef PROF
4695	save	%sp, -CC64FSZ, %sp
4696#endif
4697
4698	rdpr	%pstate, %o3
4699	sub	%o0, %o1, %o0
4700	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
4701	wrpr	%o4, 0, %pstate
47021:
4703	stxa	%g0, [%o0] ASI_DCACHE_TAG
4704	membar	#Sync
4705	brnz,pt	%o0, 1b
4706	 sub	%o0, %o1, %o0
4707
4708	sethi	%hi(KERNBASE), %o2
4709	flush	%o2
4710	membar	#Sync
4711#ifdef PROF
4712	wrpr	%o3, %pstate
4713	ret
4714	 restore
4715#else
4716	retl
4717	 wrpr	%o3, %pstate
4718#endif
4719
4720#ifdef MULTIPROCESSOR
4721/*
4722 * void sparc64_ipi_blast_dcache(int dcache_size, int dcache_line_size)
4723 *
4724 * Clear out all of D$ regardless of contents
4725 *
4726 * On entry:
4727 *	%g2 = dcache_size
4728 *	%g3 = dcache_line_size
4729 */
4730	.align 8
4731ENTRY(sparc64_ipi_blast_dcache)
4732	sub	%g2, %g3, %g2
47331:
4734	stxa	%g0, [%g2] ASI_DCACHE_TAG
4735	membar	#Sync
4736	brnz,pt	%g2, 1b
4737	 sub	%g2, %g3, %g2
4738
4739	sethi	%hi(KERNBASE), %g5
4740	flush	%g5
4741	membar	#Sync
4742
4743	ba,a	ret_from_intr_vector
4744	 nop
4745#endif /* MULTIPROCESSOR */
4746
4747/*
4748 * blast_icache_us()
4749 * blast_icache_usiii()
4750 *
4751 * Clear out all of I$ regardless of contents
4752 * Does not modify %o0
4753 *
4754 * We turn off interrupts for the duration to prevent RED exceptions.
4755 * For the Cheetah version, we also have to to turn off the I$ during this as
4756 * ASI_ICACHE_TAG accesses interfere with coherency.
4757 */
4758	.align 8
4759ENTRY(blast_icache_us)
4760	rdpr	%pstate, %o3
4761	sethi	%hi(icache_size), %o1
4762	ld	[%o1 + %lo(icache_size)], %o1
4763	sethi	%hi(icache_line_size), %o2
4764	ld	[%o2 + %lo(icache_line_size)], %o2
4765	sub	%o1, %o2, %o1
4766	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
4767	wrpr	%o4, 0, %pstate
47681:
4769	stxa	%g0, [%o1] ASI_ICACHE_TAG
4770	brnz,pt	%o1, 1b
4771	 sub	%o1, %o2, %o1
4772	sethi	%hi(KERNBASE), %o5
4773	flush	%o5
4774	membar	#Sync
4775	retl
4776	 wrpr	%o3, %pstate
4777
4778	.align 8
4779ENTRY(blast_icache_usiii)
4780	rdpr	%pstate, %o3
4781	sethi	%hi(icache_size), %o1
4782	ld	[%o1 + %lo(icache_size)], %o1
4783	sethi	%hi(icache_line_size), %o2
4784	ld	[%o2 + %lo(icache_line_size)], %o2
4785	sub	%o1, %o2, %o1
4786	andn	%o3, PSTATE_IE, %o4			! Turn off PSTATE_IE bit
4787	wrpr	%o4, 0, %pstate
4788	ldxa    [%g0] ASI_MCCR, %o5
4789	andn	%o5, MCCR_ICACHE_EN, %o4		! Turn off the I$
4790	stxa	%o4, [%g0] ASI_MCCR
4791	flush 	%g0
47921:
4793	stxa	%g0, [%o1] ASI_ICACHE_TAG
4794	membar	#Sync
4795	brnz,pt	%o1, 1b
4796	 sub	%o1, %o2, %o1
4797	stxa	%o5, [%g0] ASI_MCCR			! Restore the I$
4798	flush 	%g0
4799	retl
4800	 wrpr	%o3, %pstate
4801
4802/*
4803 * dcache_flush_page_us(paddr_t pa)
4804 * dcache_flush_page_usiii(paddr_t pa)
4805 *
4806 * Clear one page from D$.
4807 *
4808 */
4809	.align 8
4810ENTRY(dcache_flush_page_us)
4811#ifndef _LP64
4812	COMBINE(%o0, %o1, %o0)
4813#endif
4814	mov	-1, %o1		! Generate mask for tag: bits [29..2]
4815	srlx	%o0, 13-2, %o2	! Tag is PA bits <40:13> in bits <29:2>
4816	clr	%o4
4817	srl	%o1, 2, %o1	! Now we have bits <29:0> set
4818	set	(2*NBPG), %o5
4819	ba,pt	%icc, 1f
4820	 andn	%o1, 3, %o1	! Now we have bits <29:2> set
4821
4822	.align 8
48231:
4824	ldxa	[%o4] ASI_DCACHE_TAG, %o3
4825	mov	%o4, %o0
4826	deccc	32, %o5
4827	bl,pn	%icc, 2f
4828	 inc	32, %o4
4829
4830	xor	%o3, %o2, %o3
4831	andcc	%o3, %o1, %g0
4832	bne,pt	%xcc, 1b
4833	 membar	#LoadStore
4834
4835	stxa	%g0, [%o0] ASI_DCACHE_TAG
4836	ba,pt	%icc, 1b
4837	 membar	#StoreLoad
48382:
4839
4840	sethi	%hi(KERNBASE), %o5
4841	flush	%o5
4842	retl
4843	 membar	#Sync
4844
4845	.align 8
4846ENTRY(dcache_flush_page_usiii)
4847#ifndef _LP64
4848	COMBINE(%o0, %o1, %o0)
4849#endif
4850	set	NBPG, %o1
4851	sethi	%hi(dcache_line_size), %o2
4852	add	%o0, %o1, %o1	! end address
4853	ld	[%o2 + %lo(dcache_line_size)], %o2
4854
48551:
4856	stxa	%g0, [%o0] ASI_DCACHE_INVALIDATE
4857	add	%o0, %o2, %o0
4858	cmp	%o0, %o1
4859	bl,pt	%xcc, 1b
4860	 nop
4861
4862	sethi	%hi(KERNBASE), %o5
4863	flush	%o5
4864	retl
4865	 membar	#Sync
4866
4867/*
4868 *	cache_flush_phys_us(paddr_t, psize_t, int);
4869 *	cache_flush_phys_usiii(paddr_t, psize_t, int);
4870 *
4871 *	Clear a set of paddrs from the D$, I$ and if param3 is
4872 *	non-zero, E$.  (E$ is not supported yet).
4873 */
4874
4875	.align 8
4876ENTRY(cache_flush_phys_us)
4877#ifndef _LP64
4878	COMBINE(%o0, %o1, %o0)
4879	COMBINE(%o2, %o3, %o1)
4880	mov	%o4, %o2
4881#endif
4882#ifdef DEBUG
4883	tst	%o2		! Want to clear E$?
4884	tnz	1		! Error!
4885#endif
4886	add	%o0, %o1, %o1	! End PA
4887	dec	%o1
4888
4889	!!
4890	!! Both D$ and I$ tags match pa bits 42-13, but
4891	!! they are shifted different amounts.  So we'll
4892	!! generate a mask for bits 40-13.
4893	!!
4894
4895	mov	-1, %o2		! Generate mask for tag: bits [40..13]
4896	srl	%o2, 5, %o2	! 32-5 = [27..0]
4897	sllx	%o2, 13, %o2	! 27+13 = [40..13]
4898
4899	and	%o2, %o0, %o0	! Mask away uninteresting bits
4900	and	%o2, %o1, %o1	! (probably not necessary)
4901
4902	set	(2*NBPG), %o5
4903	clr	%o4
49041:
4905	ldxa	[%o4] ASI_DCACHE_TAG, %o3
4906	sllx	%o3, 40-29, %o3	! Shift D$ tag into place
4907	and	%o3, %o2, %o3	! Mask out trash
4908
4909	cmp	%o0, %o3
4910	blt,pt	%xcc, 2f	! Too low
4911	 cmp	%o1, %o3
4912	bgt,pt	%xcc, 2f	! Too high
4913	 nop
4914
4915	membar	#LoadStore
4916	stxa	%g0, [%o4] ASI_DCACHE_TAG ! Just right
4917	membar	#Sync
49182:
4919	ldda	[%o4] ASI_ICACHE_TAG, %g0	! Tag goes in %g1
4920	sllx	%g1, 40-35, %g1			! Shift I$ tag into place
4921	and	%g1, %o2, %g1			! Mask out trash
4922	cmp	%o0, %g1
4923	blt,pt	%xcc, 3f
4924	 cmp	%o1, %g1
4925	bgt,pt	%xcc, 3f
4926	 nop
4927	stxa	%g0, [%o4] ASI_ICACHE_TAG
49283:
4929	membar	#StoreLoad
4930	dec	32, %o5
4931	brgz,pt	%o5, 1b
4932	 inc	32, %o4
4933
4934	sethi	%hi(KERNBASE), %o5
4935	flush	%o5
4936	retl
4937	 membar	#Sync
4938
4939	.align 8
4940ENTRY(cache_flush_phys_usiii)
4941#ifndef _LP64
4942	COMBINE(%o0, %o1, %o0)
4943	COMBINE(%o2, %o3, %o1)
4944	mov	%o4, %o2
4945#endif
4946#ifdef DEBUG
4947	tst	%o2		! Want to clear E$?
4948	tnz	1		! Error!
4949#endif
4950	add	%o0, %o1, %o1	! End PA
4951	sethi	%hi(dcache_line_size), %o3
4952	ld	[%o3 + %lo(dcache_line_size)], %o3
4953	sethi	%hi(KERNBASE), %o5
49541:
4955	stxa	%g0, [%o0] ASI_DCACHE_INVALIDATE
4956	add	%o0, %o3, %o0
4957	cmp	%o0, %o1
4958	bl,pt	%xcc, 1b
4959	 nop
4960
4961	/* don't need to flush the I$ on cheetah */
4962
4963	flush	%o5
4964	retl
4965	 membar	#Sync
4966
4967#ifdef COMPAT_16
4968#ifdef _LP64
4969/*
4970 * XXXXX Still needs lotsa cleanup after sendsig is complete and offsets are known
4971 *
4972 * The following code is copied to the top of the user stack when each
4973 * process is exec'ed, and signals are `trampolined' off it.
4974 *
4975 * When this code is run, the stack looks like:
4976 *	[%sp]			128 bytes to which registers can be dumped
4977 *	[%sp + 128]		signal number (goes in %o0)
4978 *	[%sp + 128 + 4]		signal code (goes in %o1)
4979 *	[%sp + 128 + 8]		first word of saved state (sigcontext)
4980 *	    .
4981 *	    .
4982 *	    .
4983 *	[%sp + NNN]	last word of saved state
4984 * (followed by previous stack contents or top of signal stack).
4985 * The address of the function to call is in %g1; the old %g1 and %o0
4986 * have already been saved in the sigcontext.  We are running in a clean
4987 * window, all previous windows now being saved to the stack.
4988 *
4989 * Note that [%sp + 128 + 8] == %sp + 128 + 16.  The copy at %sp+128+8
4990 * will eventually be removed, with a hole left in its place, if things
4991 * work out.
4992 */
4993ENTRY_NOPROFILE(sigcode)
4994	/*
4995	 * XXX  the `save' and `restore' below are unnecessary: should
4996	 *	replace with simple arithmetic on %sp
4997	 *
4998	 * Make room on the stack for 64 %f registers + %fsr.  This comes
4999	 * out to 64*4+8 or 264 bytes, but this must be aligned to a multiple
5000	 * of 64, or 320 bytes.
5001	 */
5002	save	%sp, -CC64FSZ - 320, %sp
5003	mov	%g2, %l2		! save globals in %l registers
5004	mov	%g3, %l3
5005	mov	%g4, %l4
5006	mov	%g5, %l5
5007	mov	%g6, %l6
5008	mov	%g7, %l7
5009	/*
5010	 * Saving the fpu registers is expensive, so do it iff it is
5011	 * enabled and dirty.
5012	 */
5013	rd	%fprs, %l0
5014	btst	FPRS_DL|FPRS_DU, %l0	! All clean?
5015	bz,pt	%icc, 2f
5016	 btst	FPRS_DL, %l0		! test dl
5017	bz,pt	%icc, 1f
5018	 btst	FPRS_DU, %l0		! test du
5019
5020	! fpu is enabled, oh well
5021	stx	%fsr, [%sp + CC64FSZ + BIAS + 0]
5022	add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
5023	andn	%l0, BLOCK_ALIGN, %l0	! do a block store
5024	stda	%f0, [%l0] ASI_BLK_P
5025	inc	BLOCK_SIZE, %l0
5026	stda	%f16, [%l0] ASI_BLK_P
50271:
5028	bz,pt	%icc, 2f
5029	 add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
5030	andn	%l0, BLOCK_ALIGN, %l0	! do a block store
5031	add	%l0, 2*BLOCK_SIZE, %l0	! and skip what we already stored
5032	stda	%f32, [%l0] ASI_BLK_P
5033	inc	BLOCK_SIZE, %l0
5034	stda	%f48, [%l0] ASI_BLK_P
50352:
5036	membar	#Sync
5037	rd	%fprs, %l0		! reload fprs copy, for checking after
5038	rd	%y, %l1			! in any case, save %y
5039	lduw	[%fp + BIAS + 128], %o0	! sig
5040	lduw	[%fp + BIAS + 128 + 4], %o1	! code
5041	call	%g1			! (*sa->sa_handler)(sig,code,scp)
5042	 add	%fp, BIAS + 128 + 8, %o2	! scp
5043	wr	%l1, %g0, %y		! in any case, restore %y
5044
5045	/*
5046	 * Now that the handler has returned, re-establish all the state
5047	 * we just saved above, then do a sigreturn.
5048	 */
5049	btst	FPRS_DL|FPRS_DU, %l0	! All clean?
5050	bz,pt	%icc, 2f
5051	 btst	FPRS_DL, %l0		! test dl
5052	bz,pt	%icc, 1f
5053	 btst	FPRS_DU, %l0		! test du
5054
5055	ldx	[%sp + CC64FSZ + BIAS + 0], %fsr
5056	add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
5057	andn	%l0, BLOCK_ALIGN, %l0	! do a block load
5058	ldda	[%l0] ASI_BLK_P, %f0
5059	inc	BLOCK_SIZE, %l0
5060	ldda	[%l0] ASI_BLK_P, %f16
50611:
5062	bz,pt	%icc, 2f
5063	 nop
5064	add	%sp, BIAS+CC64FSZ+BLOCK_SIZE, %l0	! Generate a pointer so we can
5065	andn	%l0, BLOCK_ALIGN, %l0	! do a block load
5066	inc	2*BLOCK_SIZE, %l0	! and skip what we already loaded
5067	ldda	[%l0] ASI_BLK_P, %f32
5068	inc	BLOCK_SIZE, %l0
5069	ldda	[%l0] ASI_BLK_P, %f48
50702:
5071	mov	%l2, %g2
5072	mov	%l3, %g3
5073	mov	%l4, %g4
5074	mov	%l5, %g5
5075	mov	%l6, %g6
5076	mov	%l7, %g7
5077	membar	#Sync
5078
5079	restore	%g0, SYS_compat_16___sigreturn14, %g1 ! get registers back & set syscall #
5080	add	%sp, BIAS + 128 + 8, %o0! compute scp
5081!	andn	%o0, 0x0f, %o0
5082	t	ST_SYSCALL		! sigreturn(scp)
5083	! sigreturn does not return unless it fails
5084	mov	SYS_exit, %g1		! exit(errno)
5085	t	ST_SYSCALL
5086	/* NOTREACHED */
5087
5088	.globl	_C_LABEL(esigcode)
5089_C_LABEL(esigcode):
5090#endif
5091
5092#if !defined(_LP64)
5093
5094#define SIGCODE_NAME		sigcode
5095#define ESIGCODE_NAME		esigcode
5096#define SIGRETURN_NAME		SYS_compat_16___sigreturn14
5097#define EXIT_NAME		SYS_exit
5098
5099#include "sigcode32.s"
5100
5101#endif
5102#endif
5103
5104/*
5105 * getfp() - get stack frame pointer
5106 */
5107ENTRY(getfp)
5108	retl
5109	 mov %fp, %o0
5110
5111/*
5112 * nothing MD to do in the idle loop
5113 */
5114ENTRY(cpu_idle)
5115	retl
5116	 nop
5117
5118/*
5119 * cpu_switchto() switches to an lwp to run and runs it, saving the
5120 * current one away.
5121 *
5122 * stuct lwp * cpu_switchto(struct lwp *current, struct lwp *next)
5123 * Switch to the specified next LWP
5124 * Arguments:
5125 *	i0	'struct lwp *' of the current LWP
5126 *	i1	'struct lwp *' of the LWP to switch to
5127 *	i2	'bool' of the flag returning to a softint LWP or not
5128 * Returns:
5129 *	the old lwp switched away from
5130 */
5131ENTRY(cpu_switchto)
5132	save	%sp, -CC64FSZ, %sp
5133	/*
5134	 * REGISTER USAGE AT THIS POINT:
5135	 *	%l1 = newpcb
5136	 *	%l3 = new trapframe
5137	 *	%l4 = new l->l_proc
5138	 *	%l5 = pcb of oldlwp
5139	 *	%l6 = %hi(CPCB)
5140	 *	%l7 = %hi(CURLWP)
5141	 *	%i0 = oldlwp
5142	 *	%i1 = lwp
5143	 *	%i2 = returning
5144	 *	%o0 = tmp 1
5145	 *	%o1 = tmp 2
5146	 *	%o2 = tmp 3
5147	 *	%o3 = tmp 4
5148	 */
5149
5150	flushw				! save all register windows except this one
5151	wrpr	%g0, PSTATE_KERN, %pstate	! make sure we're on normal globals
5152						! with traps turned off
5153
5154	brz,pn	%i0, 1f
5155	 sethi	%hi(CPCB), %l6
5156
5157	rdpr	%pstate, %o1			! oldpstate = %pstate;
5158	LDPTR	[%i0 + L_PCB], %l5
5159
5160	stx	%i7, [%l5 + PCB_PC]
5161	stx	%i6, [%l5 + PCB_SP]
5162	sth	%o1, [%l5 + PCB_PSTATE]
5163
5164	rdpr	%cwp, %o2		! Useless
5165	stb	%o2, [%l5 + PCB_CWP]
5166
51671:
5168	sethi	%hi(CURLWP), %l7
5169
5170	LDPTR   [%i1 + L_PCB], %l1	! newpcb = l->l_pcb;
5171
5172	/*
5173	 * Load the new lwp.  To load, we must change stacks and
5174	 * alter cpcb and the window control registers, hence we must
5175	 * keep interrupts disabled.
5176	 */
5177
5178	STPTR	%i1, [%l7 + %lo(CURLWP)]	! curlwp = l;
5179	STPTR	%l1, [%l6 + %lo(CPCB)]		! cpcb = newpcb;
5180
5181	ldx	[%l1 + PCB_SP], %i6
5182	ldx	[%l1 + PCB_PC], %i7
5183
5184	wrpr	%g0, 0, %otherwin	! These two insns should be redundant
5185	wrpr	%g0, 0, %canrestore
5186	rdpr	%ver, %o3
5187	and	%o3, CWP, %o3
5188	wrpr	%g0, %o3, %cleanwin
5189	dec	1, %o3					! NWINDOWS-1-1
5190	/* Skip the rest if returning to a interrupted LWP. */
5191	brnz,pn	%i2, Lsw_noras
5192	 wrpr	%o3, %cansave
5193
5194	/* finally, enable traps */
5195	wrpr	%g0, PSTATE_INTR, %pstate
5196
5197	!flushw
5198	!membar #Sync
5199
5200	/*
5201	 * Check for restartable atomic sequences (RAS)
5202	 */
5203	LDPTR	[%i1 + L_PROC], %l4		! now %l4 points to p
5204	mov	%l4, %o0		! p is first arg to ras_lookup
5205	LDPTR	[%o0 + P_RASLIST], %o1	! any RAS in p?
5206	brz,pt	%o1, Lsw_noras		! no, skip RAS check
5207	 LDPTR	[%i1 + L_TF], %l3	! pointer to trap frame
5208	call	_C_LABEL(ras_lookup)
5209	 LDPTR	[%l3 + TF_PC], %o1
5210	cmp	%o0, -1
5211	be,pt	%xcc, Lsw_noras
5212	 add	%o0, 4, %o1
5213	STPTR	%o0, [%l3 + TF_PC]	! store rewound %pc
5214	STPTR	%o1, [%l3 + TF_NPC]	! and %npc
5215
5216Lsw_noras:
5217
5218	/*
5219	 * We are resuming the process that was running at the
5220	 * call to switch().  Just set psr ipl and return.
5221	 */
5222!	wrpr	%g0, 0, %cleanwin	! DEBUG
5223	clr	%g4		! This needs to point to the base of the data segment
5224	wr	%g0, ASI_PRIMARY_NOFAULT, %asi		! Restore default ASI
5225	!wrpr	%g0, PSTATE_INTR, %pstate
5226	ret
5227	 restore %i0, %g0, %o0				! return old curlwp
5228
5229#ifdef __HAVE_FAST_SOFTINTS
5230/*
5231 * Switch to the LWP assigned to handle interrupts from the given
5232 * source.  We borrow the VM context from the interrupted LWP.
5233 *
5234 * int softint_fastintr(void *l)
5235 *
5236 * Arguments:
5237 *	i0	softint lwp
5238 */
5239ENTRY(softint_fastintr)
5240	save	%sp, -CC64FSZ, %sp
5241	set	CPUINFO_VA, %l0			! l0 = curcpu()
5242	rdpr	%pil, %l7			! l7 = splhigh()
5243	wrpr	%g0, PIL_HIGH, %pil
5244	LDPTR	[%l0 + CI_EINTSTACK], %l6	! l6 = ci_eintstack
5245	add	%sp, -CC64FSZ, %l2		! ci_eintstack = sp - CC64FSZ
5246	STPTR	%l2, [%l0 + CI_EINTSTACK]	! save intstack for nexted intr
5247
5248	mov	%i0, %o0			! o0/i0 = softint lwp
5249	mov	%l7, %o1			! o1/i1 = ipl
5250	save	%sp, -CC64FSZ, %sp		! make one more register window
5251	flushw					! and save all
5252
5253	sethi	%hi(CURLWP), %l7
5254	sethi	%hi(CPCB), %l6
5255	LDPTR	[%l7 + %lo(CURLWP)], %l0	! l0 = interrupted lwp (curlwp)
5256
5257	/* save interrupted lwp/pcb info */
5258	sethi	%hi(softint_fastintr_ret - 8), %o0	! trampoline function
5259	LDPTR	[%l0 + L_PCB], %l5		! l5 = interrupted pcb
5260	or	%o0, %lo(softint_fastintr_ret - 8), %o0
5261	stx	%i6, [%l5 + PCB_SP]
5262	stx	%o0, [%l5 + PCB_PC]
5263	rdpr	%pstate, %o1
5264	rdpr	%cwp, %o2
5265	sth	%o1, [%l5 + PCB_PSTATE]
5266	stb	%o2, [%l5 + PCB_CWP]
5267
5268	/* switch to softint lwp */
5269	sethi	%hi(USPACE - TF_SIZE - CC64FSZ - STKB), %o3
5270	LDPTR	[%i0 + L_PCB], %l1		! l1 = softint pcb
5271	or	%o3, %lo(USPACE - TF_SIZE - CC64FSZ - STKB), %o3
5272	STPTR	%i0, [%l7 + %lo(CURLWP)]
5273	add	%l1, %o3, %i6
5274	STPTR	%l1, [%l6 + %lo(CPCB)]
5275	stx	%i6, [%l1 + PCB_SP]
5276	add	%i6, -CC64FSZ, %sp		! new stack
5277
5278	/* now switched, then invoke MI dispatcher */
5279	mov	%i1, %o1
5280	call	_C_LABEL(softint_dispatch)
5281	 mov	%l0, %o0
5282
5283	/* switch back to interrupted lwp */
5284	ldx	[%l5 + PCB_SP], %i6
5285	STPTR	%l0, [%l7 + %lo(CURLWP)]
5286	STPTR	%l5, [%l6 + %lo(CPCB)]
5287
5288	restore					! rewind register window
5289
5290	STPTR	%l6, [%l0 + CI_EINTSTACK]	! restore ci_eintstack
5291	wrpr	%g0, %l7, %pil			! restore ipl
5292	ret
5293	 restore	%g0, 1, %o0
5294
5295/*
5296 * Trampoline function that gets returned to by cpu_switchto() when
5297 * an interrupt handler blocks.
5298 *
5299 * Arguments:
5300 *	o0	old lwp from cpu_switchto()
5301 *
5302 * from softint_fastintr():
5303 *	l0	CPUINFO_VA
5304 *	l6	saved ci_eintstack
5305 *	l7	saved ipl
5306 */
5307softint_fastintr_ret:
5308	/* re-adjust after mi_switch() */
5309	ld	[%l0 + CI_MTX_COUNT], %o1
5310	inc	%o1				! ci_mtx_count++
5311	st	%o1, [%l0 + CI_MTX_COUNT]
5312	st	%g0, [%o0 + L_CTXSWTCH]		! prev->l_ctxswtch = 0
5313
5314	STPTR	%l6, [%l0 + CI_EINTSTACK]	! restore ci_eintstack
5315	wrpr	%g0, %l7, %pil			! restore ipl
5316	ret
5317	 restore	%g0, 1, %o0
5318
5319#endif /* __HAVE_FAST_SOFTINTS */
5320
5321/*
5322 * Snapshot the current process so that stack frames are up to date.
5323 * Only used just before a crash dump.
5324 */
5325ENTRY(snapshot)
5326	rdpr	%pstate, %o1		! save psr
5327	stx	%o7, [%o0 + PCB_PC]	! save pc
5328	stx	%o6, [%o0 + PCB_SP]	! save sp
5329	rdpr	%pil, %o2
5330	sth	%o1, [%o0 + PCB_PSTATE]
5331	rdpr	%cwp, %o3
5332	stb	%o2, [%o0 + PCB_PIL]
5333	stb	%o3, [%o0 + PCB_CWP]
5334
5335	flushw
5336	save	%sp, -CC64FSZ, %sp
5337	flushw
5338	ret
5339	 restore
5340
5341/*
5342 * cpu_lwp_fork() arranges for lwp_trampoline() to run when the
5343 * nascent lwp is selected by switch().
5344 *
5345 * The switch frame will contain pointer to struct lwp of this lwp in
5346 * %l2, a pointer to the function to call in %l0, and an argument to
5347 * pass to it in %l1 (we abuse the callee-saved registers).
5348 *
5349 * We enter lwp_trampoline as if we are "returning" from
5350 * cpu_switchto(), so %o0 contains previous lwp (the one we are
5351 * switching from) that we pass to lwp_startup().
5352 *
5353 * If the function *(%l0) returns, we arrange for an immediate return
5354 * to user mode.  This happens in two known cases: after execve(2) of
5355 * init, and when returning a child to user mode after a fork(2).
5356 *
5357 * If were setting up a kernel thread, the function *(%l0) will not
5358 * return.
5359 */
5360ENTRY(lwp_trampoline)
5361	/*
5362	 * Note: cpu_lwp_fork() has set up a stack frame for us to run
5363	 * in, so we can call other functions from here without using
5364	 * `save ... restore'.
5365	 */
5366
5367	! newlwp in %l2, oldlwp in %o0
5368	call    lwp_startup
5369	 mov    %l2, %o1
5370
5371	call	%l0			! re-use current frame
5372	 mov	%l1, %o0
5373
5374	/*
5375	 * Going to userland - set proper tstate in trap frame
5376	 */
5377	set	(ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT)|((PSTATE_USER)<<TSTATE_PSTATE_SHIFT), %g1
5378	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
5379
5380	/*
5381	 * Here we finish up as in syscall, but simplified.
5382	 */
5383	ba,a,pt	%icc, return_from_trap
5384	 nop
5385
5386	/*
5387	 * Like lwp_trampoline, but for cpu_setfunc(), i.e. without newlwp
5388	 * arguement and will not call lwp_startup.
5389	 */
5390ENTRY(setfunc_trampoline)
5391	call	%l0			! re-use current frame
5392	 mov	%l1, %o0
5393	ba,a,pt	%icc, return_from_trap
5394	 nop
5395
5396/*
5397 * pmap_zero_page_phys(pa)
5398 *
5399 * Zero one page physically addressed
5400 *
5401 * Block load/store ASIs do not exist for physical addresses,
5402 * so we won't use them.
5403 *
5404 * We will execute a flush at the end to sync the I$.
5405 *
5406 * This version expects to have the dcache_flush_page_all(pa)
5407 * to have been called before calling into here.
5408 */
5409ENTRY(pmap_zero_page_phys)
5410#ifndef _LP64
5411	COMBINE(%o0, %o1, %o0)
5412#endif
5413#ifdef DEBUG
5414	set	pmapdebug, %o4
5415	ld	[%o4], %o4
5416	btst	0x80, %o4	! PDB_COPY
5417	bz,pt	%icc, 3f
5418	 nop
5419	save	%sp, -CC64FSZ, %sp
5420	set	2f, %o0
5421	call	printf
5422	 mov	%i0, %o1
5423!	ta	1; nop
5424	restore
5425	.data
54262:	.asciz	"pmap_zero_page(%p)\n"
5427	_ALIGN
5428	.text
54293:
5430#endif
5431	set	NBPG, %o2		! Loop count
5432	wr	%g0, ASI_PHYS_CACHED, %asi
54331:
5434	/* Unroll the loop 8 times */
5435	stxa	%g0, [%o0 + 0x00] %asi
5436	deccc	0x40, %o2
5437	stxa	%g0, [%o0 + 0x08] %asi
5438	stxa	%g0, [%o0 + 0x10] %asi
5439	stxa	%g0, [%o0 + 0x18] %asi
5440	stxa	%g0, [%o0 + 0x20] %asi
5441	stxa	%g0, [%o0 + 0x28] %asi
5442	stxa	%g0, [%o0 + 0x30] %asi
5443	stxa	%g0, [%o0 + 0x38] %asi
5444	bg,pt	%icc, 1b
5445	 inc	0x40, %o0
5446
5447	sethi	%hi(KERNBASE), %o3
5448	flush	%o3
5449	retl
5450	 wr	%g0, ASI_PRIMARY_NOFAULT, %asi	! Make C code happy
5451
5452/*
5453 * pmap_copy_page_phys(paddr_t src, paddr_t dst)
5454 *
5455 * Copy one page physically addressed
5456 * We need to use a global reg for ldxa/stxa
5457 * so the top 32-bits cannot be lost if we take
5458 * a trap and need to save our stack frame to a
5459 * 32-bit stack.  We will unroll the loop by 4 to
5460 * improve performance.
5461 *
5462 * This version expects to have the dcache_flush_page_all(pa)
5463 * to have been called before calling into here.
5464 *
5465 */
5466ENTRY(pmap_copy_page_phys)
5467#ifndef _LP64
5468	COMBINE(%o0, %o1, %o0)
5469	COMBINE(%o2, %o3, %o1)
5470#endif
5471#ifdef DEBUG
5472	set	pmapdebug, %o4
5473	ld	[%o4], %o4
5474	btst	0x80, %o4	! PDB_COPY
5475	bz,pt	%icc, 3f
5476	 nop
5477	save	%sp, -CC64FSZ, %sp
5478	mov	%i0, %o1
5479	set	2f, %o0
5480	call	printf
5481	 mov	%i1, %o2
5482!	ta	1; nop
5483	restore
5484	.data
54852:	.asciz	"pmap_copy_page(%p,%p)\n"
5486	_ALIGN
5487	.text
54883:
5489#endif
5490#if 1
5491	set	NBPG, %o2
5492	wr	%g0, ASI_PHYS_CACHED, %asi
54931:
5494	ldxa	[%o0 + 0x00] %asi, %g1
5495	ldxa	[%o0 + 0x08] %asi, %o3
5496	ldxa	[%o0 + 0x10] %asi, %o4
5497	ldxa	[%o0 + 0x18] %asi, %o5
5498	inc	0x20, %o0
5499	deccc	0x20, %o2
5500	stxa	%g1, [%o1 + 0x00] %asi
5501	stxa	%o3, [%o1 + 0x08] %asi
5502	stxa	%o4, [%o1 + 0x10] %asi
5503	stxa	%o5, [%o1 + 0x18] %asi
5504	bg,pt	%icc, 1b		! We don't care about pages >4GB
5505	 inc	0x20, %o1
5506	retl
5507	 wr	%g0, ASI_PRIMARY_NOFAULT, %asi
5508#else
5509	set	NBPG, %o3
5510	add	%o3, %o0, %o3
5511	mov	%g1, %o4		! Save g1
55121:
5513	ldxa	[%o0] ASI_PHYS_CACHED, %g1
5514	inc	8, %o0
5515	cmp	%o0, %o3
5516	stxa	%g1, [%o1] ASI_PHYS_CACHED
5517	bl,pt	%icc, 1b		! We don't care about pages >4GB
5518	 inc	8, %o1
5519	retl
5520	 mov	%o4, %g1		! Restore g1
5521#endif
5522
5523/*
5524 * extern int64_t pseg_get_real(struct pmap *pm, vaddr_t addr);
5525 *
5526 * Return TTE at addr in pmap.  Uses physical addressing only.
5527 * pmap->pm_physaddr must by the physical address of pm_segs
5528 *
5529 */
5530ENTRY(pseg_get_real)
5531!	flushw			! Make sure we don't have stack probs & lose hibits of %o
5532#ifndef _LP64
5533	clruw	%o1					! Zero extend
5534#endif
5535	ldx	[%o0 + PM_PHYS], %o2			! pmap->pm_segs
5536
5537	srax	%o1, HOLESHIFT, %o3			! Check for valid address
5538	brz,pt	%o3, 0f					! Should be zero or -1
5539	 inc	%o3					! Make -1 -> 0
5540	brnz,pn	%o3, 1f					! Error! In hole!
55410:
5542	srlx	%o1, STSHIFT, %o3
5543	and	%o3, STMASK, %o3			! Index into pm_segs
5544	sll	%o3, 3, %o3
5545	add	%o2, %o3, %o2
5546	DLFLUSH(%o2,%o3)
5547	ldxa	[%o2] ASI_PHYS_CACHED, %o2		! Load page directory pointer
5548	DLFLUSH2(%o3)
5549
5550	srlx	%o1, PDSHIFT, %o3
5551	and	%o3, PDMASK, %o3
5552	sll	%o3, 3, %o3
5553	brz,pn	%o2, 1f					! NULL entry? check somewhere else
5554	 add	%o2, %o3, %o2
5555	DLFLUSH(%o2,%o3)
5556	ldxa	[%o2] ASI_PHYS_CACHED, %o2		! Load page table pointer
5557	DLFLUSH2(%o3)
5558
5559	srlx	%o1, PTSHIFT, %o3			! Convert to ptab offset
5560	and	%o3, PTMASK, %o3
5561	sll	%o3, 3, %o3
5562	brz,pn	%o2, 1f					! NULL entry? check somewhere else
5563	 add	%o2, %o3, %o2
5564	DLFLUSH(%o2,%o3)
5565	ldxa	[%o2] ASI_PHYS_CACHED, %o0
5566	DLFLUSH2(%o3)
5567	brgez,pn %o0, 1f				! Entry invalid?  Punt
5568	 btst	1, %sp
5569	bz,pn	%icc, 0f				! 64-bit mode?
5570	 nop
5571	retl						! Yes, return full value
5572	 nop
55730:
5574#if 1
5575	srl	%o0, 0, %o1
5576	retl						! No, generate a %o0:%o1 double
5577	 srlx	%o0, 32, %o0
5578#else
5579	DLFLUSH(%o2,%o3)
5580	ldda	[%o2] ASI_PHYS_CACHED, %o0
5581	DLFLUSH2(%o3)
5582	retl						! No, generate a %o0:%o1 double
5583	 nop
5584#endif
55851:
5586#ifndef _LP64
5587	clr	%o1
5588#endif
5589	retl
5590	 clr	%o0
5591
5592/*
5593 * In 32-bit mode:
5594 *
5595 * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
5596 *			    int64_t tte %o2:%o3, paddr_t spare %o4:%o5);
5597 *
5598 * In 64-bit mode:
5599 *
5600 * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
5601 *			    int64_t tte %o2, paddr_t spare %o3);
5602 *
5603 * Set a pseg entry to a particular TTE value.  Return values are:
5604 *
5605 *	-2	addr in hole
5606 *	0	success	(spare was not used if given)
5607 *	1	failure	(spare was not given, but one is needed)
5608 *	2	success	(spare was given, used for L2)
5609 *	3	failure	(spare was given, used for L2, another is needed for L3)
5610 *	4	success	(spare was given, used for L3)
5611 *
5612 *	rv == 0	success, spare not used if one was given
5613 *	rv & 4	spare was used for L3
5614 *	rv & 2	spare was used for L2
5615 *	rv & 1	failure, spare is needed
5616 *
5617 * (NB: nobody in pmap checks for the virtual hole, so the system will hang.)
5618 * The way to call this is:  first just call it without a spare page.
5619 * If that fails, allocate a page and try again, passing the paddr of the
5620 * new page as the spare.
5621 * If spare is non-zero it is assumed to be the address of a zeroed physical
5622 * page that can be used to generate a directory table or page table if needed.
5623 *
5624 * We keep track of valid (A_TLB_V bit set) and wired (A_TLB_TSB_LOCK bit set)
5625 * mappings that are set here. We check both bits on the new data entered
5626 * and increment counts, as well as decrementing counts if the bits are set
5627 * in the value replaced by this call.
5628 * The counters are 32 bit or 64 bit wide, depending on the kernel type we are
5629 * running!
5630 */
5631ENTRY(pseg_set_real)
5632#ifndef _LP64
5633	clruw	%o1					! Zero extend
5634	COMBINE(%o2, %o3, %o2)
5635	COMBINE(%o4, %o5, %o3)
5636#endif
5637	!!
5638	!! However we managed to get here we now have:
5639	!!
5640	!! %o0 = *pmap
5641	!! %o1 = addr
5642	!! %o2 = tte
5643	!! %o3 = paddr of spare page
5644	!!
5645	srax	%o1, HOLESHIFT, %o4			! Check for valid address
5646	brz,pt	%o4, 0f					! Should be zero or -1
5647	 inc	%o4					! Make -1 -> 0
5648	brz,pt	%o4, 0f
5649	 nop
5650#ifdef DEBUG
5651	ta	1					! Break into debugger
5652#endif
5653	retl
5654	 mov -2, %o0					! Error -- in hole!
5655
56560:
5657	ldx	[%o0 + PM_PHYS], %o4			! pmap->pm_segs
5658	clr	%g1
5659	srlx	%o1, STSHIFT, %o5
5660	and	%o5, STMASK, %o5
5661	sll	%o5, 3, %o5
5662	add	%o4, %o5, %o4
56630:
5664	DLFLUSH(%o4,%g5)
5665	ldxa	[%o4] ASI_PHYS_CACHED, %o5		! Load page directory pointer
5666	DLFLUSH2(%g5)
5667
5668	brnz,a,pt %o5, 0f				! Null pointer?
5669	 mov	%o5, %o4
5670	brz,pn	%o3, 9f					! Have a spare?
5671	 mov	%o3, %o5
5672	casxa	[%o4] ASI_PHYS_CACHED, %g0, %o5
5673	brnz,pn	%o5, 0b					! Something changed?
5674	DLFLUSH(%o4, %o5)
5675	mov	%o3, %o4
5676	mov	2, %g1					! record spare used for L2
5677	clr	%o3					! and not available for L3
56780:
5679	srlx	%o1, PDSHIFT, %o5
5680	and	%o5, PDMASK, %o5
5681	sll	%o5, 3, %o5
5682	add	%o4, %o5, %o4
56830:
5684	DLFLUSH(%o4,%g5)
5685	ldxa	[%o4] ASI_PHYS_CACHED, %o5		! Load table directory pointer
5686	DLFLUSH2(%g5)
5687
5688	brnz,a,pt %o5, 0f				! Null pointer?
5689	 mov	%o5, %o4
5690	brz,pn	%o3, 9f					! Have a spare?
5691	 mov	%o3, %o5
5692	casxa	[%o4] ASI_PHYS_CACHED, %g0, %o5
5693	brnz,pn	%o5, 0b					! Something changed?
5694	DLFLUSH(%o4, %o4)
5695	mov	%o3, %o4
5696	mov	4, %g1					! record spare used for L3
56970:
5698	srlx	%o1, PTSHIFT, %o5			! Convert to ptab offset
5699	and	%o5, PTMASK, %o5
5700	sll	%o5, 3, %o5
5701	add	%o5, %o4, %o4
5702
5703	DLFLUSH(%o4,%g5)
5704	ldxa	[%o4] ASI_PHYS_CACHED, %o5		! save old value in %o5
5705	stxa	%o2, [%o4] ASI_PHYS_CACHED		! Easier than shift+or
5706	DLFLUSH2(%g5)
5707
5708	!! at this point we have:
5709	!!  %g1 = return value
5710	!!  %o0 = struct pmap * (where the counts are)
5711	!!  %o2 = new TTE
5712	!!  %o5 = old TTE
5713
5714	!! see if stats needs an update
5715	set	A_TLB_TSB_LOCK, %g5
5716	xor	%o2, %o5, %o3			! %o3 - what changed
5717
5718	brgez,pn %o3, 5f			! has resident changed? (we predict it has)
5719	 btst	%g5, %o3			! has wired changed?
5720
5721	LDPTR	[%o0 + PM_RESIDENT], %o1	! gonna update resident count
5722	brlz	%o2, 0f
5723	 mov	1, %o4
5724	neg	%o4				! new is not resident -> decrement
57250:	add	%o1, %o4, %o1
5726	STPTR	%o1, [%o0 + PM_RESIDENT]
5727	btst	%g5, %o3			! has wired changed?
57285:	bz,pt	%xcc, 8f			! we predict it's not
5729	 btst	%g5, %o2			! don't waste delay slot, check if new one is wired
5730	LDPTR	[%o0 + PM_WIRED], %o1		! gonna update wired count
5731	bnz,pt	%xcc, 0f			! if wired changes, we predict it increments
5732	 mov	1, %o4
5733	neg	%o4				! new is not wired -> decrement
57340:	add	%o1, %o4, %o1
5735	STPTR	%o1, [%o0 + PM_WIRED]
57368:	retl
5737	 mov	%g1, %o0			! return %g1
5738
57399:	retl
5740	 or	%g1, 1, %o0			! spare needed, return flags + 1
5741
5742
5743/*
5744 * clearfpstate()
5745 *
5746 * Drops the current fpu state, without saving it.
5747 */
5748ENTRY(clearfpstate)
5749	rdpr	%pstate, %o1		! enable FPU
5750	wr	%g0, FPRS_FEF, %fprs
5751	or	%o1, PSTATE_PEF, %o1
5752	retl
5753	 wrpr	%o1, 0, %pstate
5754
5755/*
5756 * savefpstate(f) struct fpstate *f;
5757 *
5758 * Store the current FPU state.
5759 *
5760 * Since the kernel may need to use the FPU and we have problems atomically
5761 * testing and enabling the FPU, we leave here with the FPRS_FEF bit set.
5762 * Normally this should be turned on in loadfpstate().
5763 */
5764 /* XXXXXXXXXX  Assume caller created a proper stack frame */
5765ENTRY(savefpstate)
5766!	flushw			! Make sure we don't have stack probs & lose hibits of %o
5767	rdpr	%pstate, %o1		! enable FP before we begin
5768	rd	%fprs, %o5
5769	wr	%g0, FPRS_FEF, %fprs
5770	or	%o1, PSTATE_PEF, %o1
5771	wrpr	%o1, 0, %pstate
5772
5773	stx	%fsr, [%o0 + FS_FSR]	! f->fs_fsr = getfsr();
5774	rd	%gsr, %o4		! Save %gsr
5775	st	%o4, [%o0 + FS_GSR]
5776
5777	add	%o0, FS_REGS, %o2
5778#ifdef DIAGNOSTIC
5779	btst	BLOCK_ALIGN, %o2	! Needs to be re-executed
5780	bnz,pn	%icc, 6f		! Check alignment
5781#endif
5782	 st	%g0, [%o0 + FS_QSIZE]	! f->fs_qsize = 0;
5783	btst	FPRS_DL|FPRS_DU, %o5	! Both FPU halves clean?
5784	bz,pt	%icc, 5f		! Then skip it
5785
5786	 btst	FPRS_DL, %o5		! Lower FPU clean?
5787	membar	#Sync
5788	bz,a,pt	%icc, 1f		! Then skip it, but upper FPU not clean
5789	 add	%o2, 2*BLOCK_SIZE, %o2	! Skip a block
5790
5791	stda	%f0, [%o2] ASI_BLK_P	! f->fs_f0 = etc;
5792	inc	BLOCK_SIZE, %o2
5793	stda	%f16, [%o2] ASI_BLK_P
5794
5795	btst	FPRS_DU, %o5		! Upper FPU clean?
5796	bz,pt	%icc, 2f		! Then skip it
5797	 inc	BLOCK_SIZE, %o2
57981:
5799	stda	%f32, [%o2] ASI_BLK_P
5800	inc	BLOCK_SIZE, %o2
5801	stda	%f48, [%o2] ASI_BLK_P
58022:
5803	membar	#Sync			! Finish operation so we can
58045:
5805	retl
5806	 wr	%g0, FPRS_FEF, %fprs	! Mark FPU clean
5807
5808#ifdef DIAGNOSTIC
5809	!!
5810	!! Damn thing is *NOT* aligned on a 64-byte boundary
5811	!!
58126:
5813	wr	%g0, FPRS_FEF, %fprs
5814	! XXX -- we should panic instead of silently entering debugger
5815	ta	1
5816	retl
5817	 nop
5818#endif
5819
5820/*
5821 * Load FPU state.
5822 */
5823 /* XXXXXXXXXX  Should test to see if we only need to do a partial restore */
5824ENTRY(loadfpstate)
5825	flushw			! Make sure we don't have stack probs & lose hibits of %o
5826	rdpr	%pstate, %o1		! enable FP before we begin
5827	ld	[%o0 + FS_GSR], %o4	! Restore %gsr
5828	set	PSTATE_PEF, %o2
5829	wr	%g0, FPRS_FEF, %fprs
5830	or	%o1, %o2, %o1
5831	wrpr	%o1, 0, %pstate
5832	ldx	[%o0 + FS_FSR], %fsr	! setfsr(f->fs_fsr);
5833	add	%o0, FS_REGS, %o3	! This is zero...
5834#ifdef DIAGNOSTIC
5835	btst	BLOCK_ALIGN, %o3
5836	bne,pn	%icc, 1f	! Only use block loads on aligned blocks
5837#endif
5838	 wr	%o4, %g0, %gsr
5839	membar	#Sync
5840	ldda	[%o3] ASI_BLK_P, %f0
5841	inc	BLOCK_SIZE, %o3
5842	ldda	[%o3] ASI_BLK_P, %f16
5843	inc	BLOCK_SIZE, %o3
5844	ldda	[%o3] ASI_BLK_P, %f32
5845	inc	BLOCK_SIZE, %o3
5846	ldda	[%o3] ASI_BLK_P, %f48
5847	membar	#Sync			! Make sure loads are complete
5848	retl
5849	 wr	%g0, FPRS_FEF, %fprs	! Clear dirty bits
5850
5851#ifdef DIAGNOSTIC
5852	!!
5853	!! Damn thing is *NOT* aligned on a 64-byte boundary
5854	!!
58551:
5856	wr	%g0, FPRS_FEF, %fprs	! Clear dirty bits
5857	! XXX -- we should panic instead of silently entering debugger
5858	ta	1
5859	retl
5860	 nop
5861#endif
5862
5863/*
5864 * ienab_bis(bis) int bis;
5865 * ienab_bic(bic) int bic;
5866 *
5867 * Set and clear bits in the interrupt register.
5868 */
5869
5870/*
5871 * sun4u has separate asr's for clearing/setting the interrupt mask.
5872 */
5873ENTRY(ienab_bis)
5874	retl
5875	 wr	%o0, 0, SET_SOFTINT	! SET_SOFTINT
5876
5877ENTRY(ienab_bic)
5878	retl
5879	 wr	%o0, 0, CLEAR_SOFTINT	! CLEAR_SOFTINT
5880
5881/*
5882 * send_softint(cpu, level, intrhand)
5883 *
5884 * Send a softint with an intrhand pointer so we can cause a vectored
5885 * interrupt instead of a polled interrupt.  This does pretty much the same
5886 * as interrupt_vector.  If cpu is -1 then send it to this CPU, if it's -2
5887 * send it to any CPU, otherwise send it to a particular CPU.
5888 *
5889 * XXXX Dispatching to different CPUs is not implemented yet.
5890 */
5891ENTRY(send_softint)
5892	rdpr	%pstate, %g1
5893	andn	%g1, PSTATE_IE, %g2	! clear PSTATE.IE
5894	wrpr	%g2, 0, %pstate
5895
5896	sethi	%hi(CPUINFO_VA+CI_INTRPENDING), %o3
5897	LDPTR	[%o2 + IH_PEND], %o5
5898	or	%o3, %lo(CPUINFO_VA+CI_INTRPENDING), %o3
5899	brnz	%o5, 1f
5900	 sll	%o1, PTRSHFT, %o5	! Find start of table for this IPL
5901	add	%o3, %o5, %o3
59022:
5903	LDPTR	[%o3], %o5		! Load list head
5904	STPTR	%o5, [%o2+IH_PEND]	! Link our intrhand node in
5905	mov	%o2, %o4
5906	CASPTR	[%o3] ASI_N, %o5, %o4
5907	cmp	%o4, %o5		! Did it work?
5908	bne,pn	CCCR, 2b		! No, try again
5909	 .empty
5910
5911	mov	1, %o4			! Change from level to bitmask
5912	sllx	%o4, %o1, %o4
5913	wr	%o4, 0, SET_SOFTINT	! SET_SOFTINT
59141:
5915	retl
5916	 wrpr	%g1, 0, %pstate		! restore PSTATE.IE
5917
5918
5919#define MICROPERSEC	(1000000)
5920
5921/*
5922 * delay function
5923 *
5924 * void delay(N)  -- delay N microseconds
5925 *
5926 * Register usage: %o0 = "N" number of usecs to go (counts down to zero)
5927 *		   %o1 = "timerblurb" (stays constant)
5928 *		   %o2 = counter for 1 usec (counts down from %o1 to zero)
5929 *
5930 *
5931 *	ci_cpu_clockrate should be tuned during CPU probe to the CPU
5932 *	clockrate in Hz
5933 *
5934 */
5935ENTRY(delay)			! %o0 = n
5936#if 1
5937	rdpr	%tick, %o1					! Take timer snapshot
5938	sethi	%hi(CPUINFO_VA + CI_CLOCKRATE), %o2
5939	sethi	%hi(MICROPERSEC), %o3
5940	ldx	[%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)], %o4	! Get scale factor
5941	brnz,pt	%o4, 0f
5942	 or	%o3, %lo(MICROPERSEC), %o3
5943
5944	!! Calculate ticks/usec
5945	ldx	[%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %o4	! No, we need to calculate it
5946	udivx	%o4, %o3, %o4
5947	stx	%o4, [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)]	! Save it so we don't need to divide again
59480:
5949
5950	mulx	%o0, %o4, %o0					! Convert usec -> ticks
5951	rdpr	%tick, %o2					! Top of next itr
59521:
5953	sub	%o2, %o1, %o3					! How many ticks have gone by?
5954	sub	%o0, %o3, %o4					! Decrement count by that much
5955	movrgz	%o3, %o4, %o0					! But only if we're decrementing
5956	mov	%o2, %o1					! Remember last tick
5957	brgz,pt	%o0, 1b						! Done?
5958	 rdpr	%tick, %o2					! Get new tick
5959
5960	retl
5961	 nop
5962#else
5963/* This code only works if %tick does not wrap */
5964	rdpr	%tick, %g1					! Take timer snapshot
5965	sethi	%hi(CPUINFO_VA + CI_CLOCKRATE), %g2
5966	sethi	%hi(MICROPERSEC), %o2
5967	ldx	[%g2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %g2	! Get scale factor
5968	or	%o2, %lo(MICROPERSEC), %o2
5969!	sethi	%hi(_C_LABEL(timerblurb), %o5			! This is if we plan to tune the clock
5970!	ld	[%o5 + %lo(_C_LABEL(timerblurb))], %o5		!  with respect to the counter/timer
5971	mulx	%o0, %g2, %g2					! Scale it: (usec * Hz) / 1 x 10^6 = ticks
5972	udivx	%g2, %o2, %g2
5973	add	%g1, %g2, %g2
5974!	add	%o5, %g2, %g2			5, %g2, %g2					! But this gets complicated
5975	rdpr	%tick, %g1					! Top of next itr
5976	mov	%g1, %g1	! Erratum 50
59771:
5978	cmp	%g1, %g2
5979	bl,a,pn %xcc, 1b					! Done?
5980	 rdpr	%tick, %g1
5981
5982	retl
5983	 nop
5984#endif
5985	/*
5986	 * If something's wrong with the standard setup do this stupid loop
5987	 * calibrated for a 143MHz processor.
5988	 */
5989Lstupid_delay:
5990	set	142857143/MICROPERSEC, %o1
5991Lstupid_loop:
5992	brnz,pt	%o1, Lstupid_loop
5993	 dec	%o1
5994	brnz,pt	%o0, Lstupid_delay
5995	 dec	%o0
5996	retl
5997	 nop
5998
5999/*
6000 * next_tick(long increment)
6001 *
6002 * Sets the %tick_cmpr register to fire off in `increment' machine
6003 * cycles in the future.  Also handles %tick wraparound.  In 32-bit
6004 * mode we're limited to a 32-bit increment.
6005 */
6006ENTRY(next_tick)
6007	rd	TICK_CMPR, %o2
6008	rdpr	%tick, %o1
6009
6010	mov	1, %o3		! Mask off high bits of these registers
6011	sllx	%o3, 63, %o3
6012	andn	%o1, %o3, %o1
6013	andn	%o2, %o3, %o2
6014	cmp	%o1, %o2	! Did we wrap?  (tick < tick_cmpr)
6015	bgt,pt	%icc, 1f
6016	 add	%o1, 1000, %o1	! Need some slack so we don't lose intrs.
6017
6018	/*
6019	 * Handle the unlikely case of %tick wrapping.
6020	 *
6021	 * This should only happen every 10 years or more.
6022	 *
6023	 * We need to increment the time base by the size of %tick in
6024	 * microseconds.  This will require some divides and multiplies
6025	 * which can take time.  So we re-read %tick.
6026	 *
6027	 */
6028
6029	/* XXXXX NOT IMPLEMENTED */
6030
6031
6032
60331:
6034	add	%o2, %o0, %o2
6035	andn	%o2, %o3, %o4
6036	brlz,pn	%o4, Ltick_ovflw
6037	 cmp	%o2, %o1	! Has this tick passed?
6038	blt,pn	%xcc, 1b	! Yes
6039	 nop
6040
6041#ifdef BB_ERRATA_1
6042	ba,a	2f
6043	 nop
6044#else
6045	retl
6046	 wr	%o2, TICK_CMPR
6047#endif
6048
6049Ltick_ovflw:
6050/*
6051 * When we get here tick_cmpr has wrapped, but we don't know if %tick
6052 * has wrapped.  If bit 62 is set then we have not wrapped and we can
6053 * use the current value of %o4 as %tick.  Otherwise we need to return
6054 * to our loop with %o4 as %tick_cmpr (%o2).
6055 */
6056	srlx	%o3, 1, %o5
6057	btst	%o5, %o1
6058	bz,pn	%xcc, 1b
6059	 mov	%o4, %o2
6060#ifdef BB_ERRATA_1
6061	ba,a	2f
6062	 nop
6063	.align	64
60642:	wr	%o2, TICK_CMPR
6065	rd	TICK_CMPR, %g0
6066	retl
6067	 nop
6068#else
6069	retl
6070	 wr	%o2, TICK_CMPR
6071#endif
6072
6073/*
6074 * setstick(long)
6075 */
6076ENTRY(setstick)
6077	retl
6078	 wr %o0, STICK
6079
6080/*
6081 * long getstick(void)
6082 */
6083ENTRY(getstick)
6084	retl
6085	 rd STICK, %o0
6086
6087/*
6088 * next_stick(long increment)
6089 *
6090 * Sets the %stick_cmpr register to fire off in `increment' machine
6091 * cycles in the future.  Also handles %stick wraparound.  In 32-bit
6092 * mode we're limited to a 32-bit increment.
6093 */
6094ENTRY(next_stick)
6095	rd	STICK_CMPR, %o2
6096	rd	STICK, %o1
6097
6098	mov	1, %o3		! Mask off high bits of these registers
6099	sllx	%o3, 63, %o3
6100	andn	%o1, %o3, %o1
6101	andn	%o2, %o3, %o2
6102	cmp	%o1, %o2	! Did we wrap?  (tick < tick_cmpr)
6103	bgt,pt	%icc, 1f
6104	 add	%o1, 1000, %o1	! Need some slack so we don't lose intrs.
6105
6106	/*
6107	 * Handle the unlikely case of %stick wrapping.
6108	 *
6109	 * This should only happen every 10 years or more.
6110	 *
6111	 * We need to increment the time base by the size of %stick in
6112	 * microseconds.  This will require some divides and multiplies
6113	 * which can take time.  So we re-read %stick.
6114	 *
6115	 */
6116
6117	/* XXXXX NOT IMPLEMENTED */
6118
6119
6120
61211:
6122	add	%o2, %o0, %o2
6123	andn	%o2, %o3, %o4
6124	brlz,pn	%o4, Lstick_ovflw
6125	 cmp	%o2, %o1	! Has this stick passed?
6126	blt,pn	%xcc, 1b	! Yes
6127	 nop
6128	retl
6129	 wr	%o2, STICK_CMPR
6130
6131Lstick_ovflw:
6132/*
6133 * When we get here tick_cmpr has wrapped, but we don't know if %stick
6134 * has wrapped.  If bit 62 is set then we have not wrapped and we can
6135 * use the current value of %o4 as %stick.  Otherwise we need to return
6136 * to our loop with %o4 as %stick_cmpr (%o2).
6137 */
6138	srlx	%o3, 1, %o5
6139	btst	%o5, %o1
6140	bz,pn	%xcc, 1b
6141	 mov	%o4, %o2
6142	retl
6143	 wr	%o2, STICK_CMPR
6144
6145ENTRY(setjmp)
6146	save	%sp, -CC64FSZ, %sp	! Need a frame to return to.
6147	flushw
6148	stx	%fp, [%i0+0]	! 64-bit stack pointer
6149	stx	%i7, [%i0+8]	! 64-bit return pc
6150	ret
6151	 restore	%g0, 0, %o0
6152
6153	.data
6154Lpanic_ljmp:
6155	.asciz	"longjmp botch"
6156	_ALIGN
6157	.text
6158
6159ENTRY(longjmp)
6160	save	%sp, -CC64FSZ, %sp	! prepare to restore to (old) frame
6161	flushw
6162	mov	1, %i2
6163	ldx	[%i0+0], %fp	! get return stack
6164	movrz	%i1, %i1, %i2	! compute v ? v : 1
6165	ldx	[%i0+8], %i7	! get rpc
6166	ret
6167	 restore	%i2, 0, %o0
6168
6169#if defined(DDB) || defined(KGDB)
6170	/*
6171	 * Debug stuff.  Dump the trap registers into buffer & set tl=0.
6172	 *
6173	 *  %o0 = *ts
6174	 */
6175ENTRY(savetstate)
6176	mov	%o0, %o1
6177	rdpr	%tl, %o0
6178	brz	%o0, 2f
6179	 mov	%o0, %o2
61801:
6181	rdpr	%tstate, %o3
6182	stx	%o3, [%o1]
6183	deccc	%o2
6184	inc	8, %o1
6185	rdpr	%tpc, %o4
6186	stx	%o4, [%o1]
6187	inc	8, %o1
6188	rdpr	%tnpc, %o5
6189	stx	%o5, [%o1]
6190	inc	8, %o1
6191	rdpr	%tt, %o4
6192	stx	%o4, [%o1]
6193	inc	8, %o1
6194	bnz	1b
6195	 wrpr	%o2, 0, %tl
61962:
6197	retl
6198	 nop
6199
6200	/*
6201	 * Debug stuff.  Resore trap registers from buffer.
6202	 *
6203	 *  %o0 = %tl
6204	 *  %o1 = *ts
6205	 *
6206	 * Maybe this should be re-written to increment tl instead of decrementing.
6207	 */
6208ENTRY(restoretstate)
6209	flushw			! Make sure we don't have stack probs & lose hibits of %o
6210	brz,pn	%o0, 2f
6211	 mov	%o0, %o2
6212	wrpr	%o0, 0, %tl
62131:
6214	ldx	[%o1], %o3
6215	deccc	%o2
6216	inc	8, %o1
6217	wrpr	%o3, 0, %tstate
6218	ldx	[%o1], %o4
6219	inc	8, %o1
6220	wrpr	%o4, 0, %tpc
6221	ldx	[%o1], %o5
6222	inc	8, %o1
6223	wrpr	%o5, 0, %tnpc
6224	ldx	[%o1], %o4
6225	inc	8, %o1
6226	wrpr	%o4, 0, %tt
6227	bnz	1b
6228	 wrpr	%o2, 0, %tl
62292:
6230	retl
6231	 wrpr	%o0, 0, %tl
6232
6233	/*
6234	 * Switch to context in abs(%o0)
6235	 */
6236ENTRY(switchtoctx_us)
6237	set	DEMAP_CTX_SECONDARY, %o3
6238	stxa	%o3, [%o3] ASI_DMMU_DEMAP
6239	mov	CTX_SECONDARY, %o4
6240	stxa	%o3, [%o3] ASI_IMMU_DEMAP
6241	membar	#Sync
6242	stxa	%o0, [%o4] ASI_DMMU		! Maybe we should invalid
6243	sethi	%hi(KERNBASE), %o2
6244	membar	#Sync
6245	flush	%o2
6246	retl
6247	 nop
6248
6249ENTRY(switchtoctx_usiii)
6250	mov	CTX_SECONDARY, %o4
6251	ldxa	[%o4] ASI_DMMU, %o2		! Load secondary context
6252	mov	CTX_PRIMARY, %o5
6253	ldxa	[%o5] ASI_DMMU, %o1		! Save primary context
6254	membar	#LoadStore
6255	stxa	%o2, [%o5] ASI_DMMU		! Insert secondary for demap
6256	membar	#Sync
6257	set	DEMAP_CTX_PRIMARY, %o3
6258	stxa	%o3, [%o3] ASI_DMMU_DEMAP
6259	membar	#Sync
6260	stxa	%o0, [%o4] ASI_DMMU		! Maybe we should invalid
6261	membar	#Sync
6262	stxa	%o1, [%o5] ASI_DMMU		! Restore primary context
6263	sethi	%hi(KERNBASE), %o2
6264	membar	#Sync
6265	flush	%o2
6266	retl
6267	 nop
6268
6269#ifndef _LP64
6270	/*
6271	 * Convert to 32-bit stack then call OF_sym2val()
6272	 */
6273ENTRY(OF_sym2val32)
6274	save	%sp, -CC64FSZ, %sp
6275	btst	7, %i0
6276	bnz,pn	%icc, 1f
6277	 add	%sp, BIAS, %o1
6278	btst	1, %sp
6279	movnz	%icc, %o1, %sp
6280	call	_C_LABEL(OF_sym2val)
6281	 mov	%i0, %o0
62821:
6283	ret
6284	 restore	%o0, 0, %o0
6285
6286	/*
6287	 * Convert to 32-bit stack then call OF_val2sym()
6288	 */
6289ENTRY(OF_val2sym32)
6290	save	%sp, -CC64FSZ, %sp
6291	btst	7, %i0
6292	bnz,pn	%icc, 1f
6293	 add	%sp, BIAS, %o1
6294	btst	1, %sp
6295	movnz	%icc, %o1, %sp
6296	call	_C_LABEL(OF_val2sym)
6297	 mov	%i0, %o0
62981:
6299	ret
6300	 restore	%o0, 0, %o0
6301#endif /* _LP64 */
6302#endif /* DDB */
6303
6304
6305#if defined(MULTIPROCESSOR)
6306/*
6307 * IPI target function to setup a C compatible environment and call a MI function.
6308 *
6309 * On entry:
6310 *	We are on one of the alternate set of globals
6311 *	%g2 = function to call
6312 *	%g3 = single argument to called function
6313 */
6314ENTRY(sparc64_ipi_ccall)
6315#ifdef TRAPS_USE_IG
6316	wrpr	%g0, PSTATE_KERN|PSTATE_IG, %pstate	! DEBUG
6317#endif
6318	TRAP_SETUP(-CC64FSZ-TF_SIZE)
6319
6320#ifdef DEBUG
6321	rdpr	%tt, %o1	! debug
6322	sth	%o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
6323#endif
6324	mov	%g3, %o0			! save argument of function to call
6325	mov	%g2, %o5			! save function pointer
6326
6327	wrpr	%g0, PSTATE_KERN, %pstate	! Get back to normal globals
6328	stx	%g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
6329	mov	%g1, %o1			! code
6330	rdpr	%tpc, %o2			! (pc)
6331	stx	%g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
6332	rdpr	%tstate, %g1
6333	stx	%g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
6334	rdpr	%tnpc, %o3
6335	stx	%g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
6336	rd	%y, %o4
6337	stx	%g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
6338	stx	%g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
6339	wrpr	%g0, 0, %tl			! return to tl=0
6340	stx	%g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
6341
6342	stx	%g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
6343	stx	%o2, [%sp + CC64FSZ + STKB + TF_PC]
6344	stx	%o3, [%sp + CC64FSZ + STKB + TF_NPC]
6345	st	%o4, [%sp + CC64FSZ + STKB + TF_Y]
6346
6347	rdpr	%pil, %g5
6348	stb	%g5, [%sp + CC64FSZ + STKB + TF_PIL]
6349	stb	%g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
6350
6351	!! In the EMBEDANY memory model %g4 points to the start of the data segment.
6352	!! In our case we need to clear it before calling any C-code
6353	clr	%g4
6354	wr	%g0, ASI_NUCLEUS, %asi			! default kernel ASI
6355
6356	call %o5					! call function
6357	 nop
6358
6359	ba,a	return_from_trap			! and return from IPI
6360	 nop
6361
6362#endif
6363
6364
6365	.data
6366	_ALIGN
6367#if NKSYMS || defined(DDB) || defined(LKM)
6368	.globl	_C_LABEL(esym)
6369_C_LABEL(esym):
6370	POINTER	0
6371	.globl	_C_LABEL(ssym)
6372_C_LABEL(ssym):
6373	POINTER	0
6374#endif
6375	.comm	_C_LABEL(promvec), PTRSZ
6376
6377#ifdef DEBUG
6378	.comm	_C_LABEL(trapdebug), 4
6379	.comm	_C_LABEL(pmapdebug), 4
6380#endif
6381