1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#if defined(lint)
27#include <sys/types.h>
28#include <sys/t_lock.h>
29#include <sys/promif.h>
30#include <sys/prom_isa.h>
31#endif	/* lint */
32
33#include <sys/asm_linkage.h>
34#include <sys/intreg.h>
35#include <sys/ivintr.h>
36#include <sys/mmu.h>
37#include <sys/machpcb.h>
38#include <sys/machtrap.h>
39#include <sys/machlock.h>
40#include <sys/fdreg.h>
41#include <sys/vis.h>
42#include <sys/traptrace.h>
43#include <sys/panic.h>
44#include <sys/machasi.h>
45#include <sys/privregs.h>
46#include <sys/hypervisor_api.h>
47#include <sys/clock.h>
48
49#if defined(lint)
50
51#include <sys/thread.h>
52#include <sys/time.h>
53
54#else	/* lint */
55
56#include "assym.h"
57
58
59!
60! REGOFF must add up to allow double word access to r_tstate.
61! PCB_WBUF must also be aligned.
62!
63#if (REGOFF & 7) != 0
64#error "struct regs not aligned"
65#endif
66
67/*
68 * Absolute external symbols.
69 * On the sun4u we put the panic buffer in the third and fourth pages.
70 * We set things up so that the first 2 pages of KERNELBASE is illegal
71 * to act as a redzone during copyin/copyout type operations. One of
72 * the reasons the panic buffer is allocated in low memory to
73 * prevent being overwritten during booting operations (besides
74 * the fact that it is small enough to share pages with others).
75 */
76
77	.seg	".data"
78	.global	panicbuf
79
80PROM	= 0xFFE00000			! address of prom virtual area
81panicbuf = SYSBASE32 + PAGESIZE		! address of panic buffer
82
83	.type	panicbuf, #object
84	.size	panicbuf, PANICBUFSIZE
85
86/*
87 * Absolute external symbol - intr_vec_table.
88 *
89 * With new bus structures supporting a larger number of interrupt
90 * numbers, the interrupt vector table, intr_vec_table[] has been
91 * moved out of kernel nucleus and allocated after panicbuf.
92 */
93	.global intr_vec_table
94
95intr_vec_table = SYSBASE32 + PAGESIZE + PANICBUFSIZE ! address of interrupt table
96
97	.type	intr_vec_table, #object
98	.size	intr_vec_table, MAXIVNUM * CPTRSIZE + MAX_RSVD_IV * IV_SIZE + MAX_RSVD_IVX * (IV_SIZE + CPTRSIZE * (NCPU - 1))
99
100/*
101 * The thread 0 stack. This must be the first thing in the data
102 * segment (other than an sccs string) so that we don't stomp
103 * on anything important if the stack overflows. We get a
104 * red zone below this stack for free when the kernel text is
105 * write protected.
106 */
107
108	.global	t0stack
109	.align	16
110	.type	t0stack, #object
111t0stack:
112	.skip	T0STKSZ			! thread 0 stack
113t0stacktop:
114	.size	t0stack, T0STKSZ
115
116/*
117 * cpu0 and its ptl1_panic stack.  The cpu structure must be allocated
118 * on a single page for ptl1_panic's physical address accesses.
119 */
120	.global	cpu0
121	.align	MMU_PAGESIZE
122cpu0:
123	.type	cpu0, #object
124	.skip	CPU_ALLOC_SIZE
125	.size	cpu0, CPU_ALLOC_SIZE
126
127	.global t0
128	.align	PTR24_ALIGN		! alignment for mutex.
129	.type	t0, #object
130t0:
131	.skip	THREAD_SIZE		! thread 0
132	.size	t0, THREAD_SIZE
133
134	.global	trap_trace_ctl
135	.global	htrap_tr0
136	.global htrap_trace_bufsize
137
138	.align	64
139trap_trace_ctl:
140	.skip	NCPU * TRAPTR_SIZE	! NCPU control headers
141htrap_tr0:
142	.skip	HTRAP_TSIZE		! one buffer for the boot cpu
143	.align	4
144htrap_trace_bufsize:
145	.word	HTRAP_TSIZE		! default hv trap buffer size
146
147#ifdef	TRAPTRACE
148	.global	trap_tr0
149	.global trap_trace_bufsize
150	.global	trap_freeze
151	.global	trap_freeze_pc
152
153	.align	4
154trap_trace_bufsize:
155	.word	TRAP_TSIZE		! default trap buffer size
156trap_freeze:
157	.word	0
158
159	.align	16
160trap_tr0:
161	.skip	TRAP_TSIZE		! one buffer for the boot cpu
162
163/*
164 * When an assertion in TRACE_PTR was failed, %pc is saved in trap_freeze_pc to
165 * show in which TRACE_PTR the assertion failure happened.
166 */
167	.align	8
168trap_freeze_pc:
169	.nword	0
170#endif	/* TRAPTRACE */
171
172	.align 4
173	.seg	".text"
174
175#ifdef	NOPROM
176	.global availmem
177availmem:
178	.word	0
179#endif	/* NOPROM */
180
181	.align	8
182_local_p1275cis:
183	.nword	0
184
185#endif	/* lint */
186
187#if defined(lint)
188
189void
190_start(void)
191{}
192
193#else /* lint */
194
195	.seg	".data"
196
197	.global nwindows, nwin_minus_one, winmask
198nwindows:
199	.word   8
200nwin_minus_one:
201	.word   7
202winmask:
203	.word	8
204
205	.global	afsrbuf
206afsrbuf:
207	.word	0,0,0,0
208
209/*
210 * System initialization
211 *
212 * Our contract with the boot prom specifies that the MMU is on and the
213 * first 16 meg of memory is mapped with a level-1 pte.  We are called
214 * with p1275cis ptr in %o0 and kdi_dvec in %o1; we start execution
215 * directly from physical memory, so we need to get up into our proper
216 * addresses quickly: all code before we do this must be position
217 * independent.
218 *
219 * NB: Above is not true for boot/stick kernel, the only thing mapped is
220 * the text+data+bss. The kernel is loaded directly into KERNELBASE.
221 *
222 * 	entry, the romvec pointer (romp) is the first argument;
223 * 	  i.e., %o0.
224 * 	the bootops vector is in the third argument (%o1)
225 *
226 * Our tasks are:
227 * 	save parameters
228 * 	construct mappings for KERNELBASE (not needed for boot/stick kernel)
229 * 	hop up into high memory           (not needed for boot/stick kernel)
230 * 	initialize stack pointer
231 * 	initialize trap base register
232 * 	initialize window invalid mask
233 * 	initialize psr (with traps enabled)
234 * 	figure out all the module type stuff
235 * 	tear down the 1-1 mappings
236 * 	dive into main()
237 */
238	ENTRY_NP(_start)
239	!
240	! Stash away our arguments in memory.
241	!
242	sethi	%hi(_local_p1275cis), %g1
243	stn	%o4, [%g1 + %lo(_local_p1275cis)]
244
245	!
246	! Initialize CPU state registers
247	!
248	wrpr	%g0, PSTATE_KERN, %pstate
249	wr	%g0, %g0, %fprs
250
251	!
252	! call krtld to link the world together
253	!
254	call	kobj_start
255	mov	%o4, %o0
256
257	! Write 0x1f (MAX_REG_WINDOWS) to %cwp and read back to get
258	! the actual implemented nwin - 1 value
259	rdpr	%cwp, %g2		! save current %cwp
260	wrpr	%g0, 0x1f, %cwp
261	rdpr	%cwp, %g1		! %g1 = nwin - 1
262	wrpr	%g0, %g2, %cwp		! restore current %cwp
263
264	!
265	! Stuff some memory cells related to numbers of windows.
266	!
267	sethi	%hi(nwin_minus_one), %g2
268	st	%g1, [%g2 + %lo(nwin_minus_one)]
269	inc	%g1
270	sethi	%hi(nwindows), %g2
271	st	%g1, [%g2 + %lo(nwindows)]
272	dec	%g1
273	mov	-2, %g2
274	sll	%g2, %g1, %g2
275	sethi	%hi(winmask), %g4
276	st	%g2, [%g4 + %lo(winmask)]
277
278	!
279	! save a pointer to obp's tba for later use by kmdb
280	!
281	rdpr	%tba, %g1
282	set	boot_tba, %g2
283	stx	%g1, [%g2]
284
285	!
286	! copy obp's breakpoint trap entry to obp_bpt
287	!
288	rdpr	%tba, %g1
289	set	T_SOFTWARE_TRAP | ST_MON_BREAKPOINT, %g2
290	sll	%g2, 5, %g2
291	or	%g1, %g2, %g1
292	set	obp_bpt, %g2
293	ldx	[%g1], %g3
294	stx	%g3, [%g2]
295	flush	%g2
296	ldx	[%g1 + 8], %g3
297	stx	%g3, [%g2 + 8]
298	flush	%g2 + 8
299	ldx	[%g1 + 16], %g3
300	stx	%g3, [%g2 + 16]
301	flush	%g2 + 16
302	ldx	[%g1 + 24], %g3
303	stx	%g3, [%g2 + 24]
304	flush	%g2 + 24
305
306	!
307	! Initialize thread 0's stack.
308	!
309	set	t0stacktop, %g1		! setup kernel stack pointer
310	sub	%g1, SA(KFPUSIZE+GSR_SIZE), %g2
311	and	%g2, 0x3f, %g3
312	sub	%g2, %g3, %o1
313	sub	%o1, SA(MPCBSIZE) + STACK_BIAS, %sp
314
315	!
316	! Initialize global thread register.
317	!
318	set	t0, THREAD_REG
319
320	!
321	! Fill in enough of the cpu structure so that
322	! the wbuf management code works. Make sure the
323	! boot cpu is inserted in cpu[] based on cpuid.
324	!
325	CPU_INDEX(%g2, %g1)
326	sll	%g2, CPTRSHIFT, %g2		! convert cpuid to cpu[] offset
327	set	cpu0, %o0			! &cpu0
328	set	cpu, %g1			! &cpu[]
329	stn	%o0, [%g1 + %g2]		! cpu[cpuid] = &cpu0
330
331	stn	%o0, [THREAD_REG + T_CPU]	! threadp()->t_cpu = cpu[cpuid]
332	stn	THREAD_REG, [%o0 + CPU_THREAD]	! cpu[cpuid]->cpu_thread = threadp()
333
334
335	!  We do NOT need to bzero our BSS...boot has already done it for us.
336	!  Just need to reference edata so that we don't break /dev/ksyms
337	set	edata, %g0
338
339	!
340	! Call mlsetup with address of prototype user registers.
341	!
342	call	mlsetup
343	add	%sp, REGOFF + STACK_BIAS, %o0
344
345#if (REGOFF != MPCB_REGS)
346#error "hole in struct machpcb between frame and regs?"
347#endif
348
349	!
350	! Now call main.  We will return as process 1 (init).
351	!
352	call	main
353	nop
354
355	!
356	! Main should never return.
357	!
358	set	.mainretmsg, %o0
359	call	panic
360	nop
361	SET_SIZE(_start)
362
363.mainretmsg:
364	.asciz	"main returned"
365	.align	4
366
367#endif	/* lint */
368
369
370/*
371 * Generic system trap handler.
372 *
373 * Some kernel trap handlers save themselves from buying a window by
374 * borrowing some of sys_trap's unused locals. %l0 thru %l3 may be used
375 * for this purpose, as user_rtt and priv_rtt do not depend on them.
376 * %l4 thru %l7 should NOT be used this way.
377 *
378 * Entry Conditions:
379 * 	%pstate		am:0 priv:1 ie:0
380 * 	%gl		global level  1
381 *
382 * Register Inputs:
383 * 	%g1		pc of trap handler
384 * 	%g2, %g3	args for handler
385 * 	%g4		desired %pil (-1 means current %pil)
386 * 	%g5, %g6	destroyed
387 * 	%g7		saved
388 *
389 * Register Usage:
390 * 	%l0, %l1	temps
391 * 	%l3		saved %g1
392 * 	%l6		curthread for user traps, %pil for priv traps
393 * 	%l7		regs
394 *
395 * Called function prototype variants:
396 *
397 *	func(struct regs *rp);
398 * 	func(struct regs *rp, uintptr_t arg1 [%g2], uintptr_t arg2 [%g3])
399 *	func(struct regs *rp, uintptr_t arg1 [%g2],
400 *	    uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h])
401 *	func(struct regs *rp, uint32_t arg1 [%g2.l],
402 *	    uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h], uint32_t [%g2.h])
403 */
404
405#if defined(lint)
406
407void
408sys_trap(void)
409{}
410
411#else	/* lint */
412
413	ENTRY_NP(sys_trap)
414#ifdef DEBUG
415	! Assert gl == 1
416	rdpr	%gl, %g5
417	cmp	%g5, 1
418	bne,a,pn %xcc, ptl1_panic
419	  mov	PTL1_BAD_GL, %g1
420#endif
421
422	!
423	! force tl=1, update %cwp, branch to correct handler
424	!
425
426	wrpr	%g0, 1, %tl
427	rdpr	%tstate, %g5
428	btst	TSTATE_PRIV, %g5
429	and	%g5, TSTATE_CWP, %g6
430	bnz,pn	%xcc, priv_trap
431	wrpr	%g0, %g6, %cwp
432
433	ALTENTRY(user_trap)
434	!
435	! user trap
436	!
437	! make all windows clean for kernel
438	! buy a window using the current thread's stack
439	!
440#ifdef DEBUG
441	! Assert gl == 1
442	rdpr	%gl, %g5
443	cmp	%g5, 1
444	bne,a,pn %xcc, ptl1_panic
445	  mov	PTL1_BAD_GL, %g1
446#endif
447	sethi	%hi(nwin_minus_one), %g5
448	ld	[%g5 + %lo(nwin_minus_one)], %g5
449	wrpr	%g0, %g5, %cleanwin
450	CPU_ADDR(%g5, %g6)
451	ldn	[%g5 + CPU_THREAD], %g5
452	ldn	[%g5 + T_STACK], %g6
453	sub	%g6, STACK_BIAS, %g6
454	save	%g6, 0, %sp
455	!
456	! set window registers so that current windows are "other" windows
457	!
458	rdpr	%canrestore, %l0
459	rdpr	%wstate, %l1
460	wrpr	%g0, 0, %canrestore
461	sllx	%l1, WSTATE_SHIFT, %l1
462	wrpr	%l1, WSTATE_K64, %wstate
463	wrpr	%g0, %l0, %otherwin
464	!
465	! set pcontext to run kernel
466	!
467	mov	KCONTEXT, %l0
468	mov	MMU_PCONTEXT, %l1
469	stxa	%l0, [%l1]ASI_MMU_CTX
470	! Ensure new ctx takes effect by the time the "done" (below) completes
471	membar	#Sync
472
473	set	utl0, %g6		! bounce to utl0
474have_win:
475#ifdef DEBUG
476	CPU_ADDR(%o1, %o2)
477	add	%o1, CPU_MCPU, %o1
478	ld	[%o1 + MCPU_KWBUF_FULL], %o2
479	tst	%o2
480	bnz,a,pn %icc, ptl1_panic
481	  mov	PTL1_BAD_WTRAP, %g1
482#endif /* DEBUG */
483	SYSTRAP_TRACE(%o1, %o2, %o3)
484
485
486	!
487	! at this point we have a new window we can play in,
488	! and %g6 is the label we want done to bounce to
489	!
490	! save needed current globals
491	!
492	mov	%g1, %l3	! pc
493	mov	%g2, %o1	! arg #1
494	mov	%g3, %o2	! arg #2
495	srlx	%g3, 32, %o3	! pseudo arg #3
496	srlx	%g2, 32, %o4	! pseudo arg #4
497	mov	%g5, %l6	! curthread if user trap, %pil if priv trap
498	!
499	! save trap state on stack
500	!
501	add	%sp, REGOFF + STACK_BIAS, %l7
502	rdpr	%tpc, %l0
503	rdpr	%tnpc, %l1
504	rdpr	%tstate, %l2
505	stn	%l0, [%l7 + PC_OFF]
506	stn	%l1, [%l7 + nPC_OFF]
507	stx	%l2, [%l7 + TSTATE_OFF]
508	!
509	! setup pil
510	!
511	brlz,pt		%g4, 1f
512	nop
513#ifdef DEBUG
514	!
515	! ASSERT(%g4 >= %pil).
516	!
517	rdpr	%pil, %l0
518	cmp	%g4, %l0
519	bge,pt	%xcc, 0f
520	nop				! yes, nop; to avoid anull
521	set	bad_g4_called, %l3
522	mov	1, %o1
523	st	%o1, [%l3]
524	set	bad_g4, %l3		! pc
525	set	sys_trap_wrong_pil, %o1	! arg #1
526	mov	%g4, %o2		! arg #2
527	ba	1f			! stay at the current %pil
528	mov	%l0, %o3		! arg #3
5290:
530#endif /* DEBUG */
531	wrpr		%g0, %g4, %pil
5321:
533	!
534	! set trap regs to execute in kernel at %g6
535	! done resumes execution there
536	!
537	wrpr	%g0, %g6, %tnpc
538	rdpr	%cwp, %l0
539	set	TSTATE_KERN, %l1
540	wrpr	%l1, %l0, %tstate
541	done
542	/* NOTREACHED */
543	SET_SIZE(user_trap)
544	SET_SIZE(sys_trap)
545
546#define	KWBUF64_TO_STACK(SBP,SPP,TMP) \
547	ldx	[SBP + (0*8)], TMP; \
548	stx	TMP, [SPP + V9BIAS64 + 0]; \
549	ldx	[SBP + (1*8)], TMP; \
550	stx	TMP, [SPP + V9BIAS64 + 8]; \
551	ldx	[SBP + (2*8)], TMP; \
552	stx	TMP, [SPP + V9BIAS64 + 16]; \
553	ldx	[SBP + (3*8)], TMP; \
554	stx	TMP, [SPP + V9BIAS64 + 24]; \
555	ldx	[SBP + (4*8)], TMP; \
556	stx	TMP, [SPP + V9BIAS64 + 32]; \
557	ldx	[SBP + (5*8)], TMP; \
558	stx	TMP, [SPP + V9BIAS64 + 40]; \
559	ldx	[SBP + (6*8)], TMP; \
560	stx	TMP, [SPP + V9BIAS64 + 48]; \
561	ldx	[SBP + (7*8)], TMP; \
562	stx	TMP, [SPP + V9BIAS64 + 56]; \
563	ldx	[SBP + (8*8)], TMP; \
564	stx	TMP, [SPP + V9BIAS64 + 64]; \
565	ldx	[SBP + (9*8)], TMP; \
566	stx	TMP, [SPP + V9BIAS64 + 72]; \
567	ldx	[SBP + (10*8)], TMP; \
568	stx	TMP, [SPP + V9BIAS64 + 80]; \
569	ldx	[SBP + (11*8)], TMP; \
570	stx	TMP, [SPP + V9BIAS64 + 88]; \
571	ldx	[SBP + (12*8)], TMP; \
572	stx	TMP, [SPP + V9BIAS64 + 96]; \
573	ldx	[SBP + (13*8)], TMP; \
574	stx	TMP, [SPP + V9BIAS64 + 104]; \
575	ldx	[SBP + (14*8)], TMP; \
576	stx	TMP, [SPP + V9BIAS64 + 112]; \
577	ldx	[SBP + (15*8)], TMP; \
578	stx	TMP, [SPP + V9BIAS64 + 120];
579
580#define	KWBUF32_TO_STACK(SBP,SPP,TMP) \
581	lduw	[SBP + (0 * 4)], TMP; \
582	stw	TMP, [SPP + 0]; \
583	lduw	[SBP + (1 * 4)], TMP; \
584	stw	TMP, [SPP + (1 * 4)]; \
585	lduw	[SBP + (2 * 4)], TMP; \
586	stw	TMP, [SPP + (2 * 4)]; \
587	lduw	[SBP + (3 * 4)], TMP; \
588	stw	TMP, [SPP + (3 * 4)]; \
589	lduw	[SBP + (4 * 4)], TMP; \
590	stw	TMP, [SPP + (4 * 4)]; \
591	lduw	[SBP + (5 * 4)], TMP; \
592	stw	TMP, [SPP + (5 * 4)]; \
593	lduw	[SBP + (6 * 4)], TMP; \
594	stw	TMP, [SPP + (6 * 4)]; \
595	lduw	[SBP + (7 * 4)], TMP; \
596	stw	TMP, [SPP + (7 * 4)]; \
597	lduw	[SBP + (8 * 4)], TMP; \
598	stw	TMP, [SPP + (8 * 4)]; \
599	lduw	[SBP + (9 * 4)], TMP; \
600	stw	TMP, [SPP + (9 * 4)]; \
601	lduw	[SBP + (10 * 4)], TMP; \
602	stw	TMP, [SPP + (10 * 4)]; \
603	lduw	[SBP + (11 * 4)], TMP; \
604	stw	TMP, [SPP + (11 * 4)]; \
605	lduw	[SBP + (12 * 4)], TMP; \
606	stw	TMP, [SPP + (12 * 4)]; \
607	lduw	[SBP + (13 * 4)], TMP; \
608	stw	TMP, [SPP + (13 * 4)]; \
609	lduw	[SBP + (14 * 4)], TMP; \
610	stw	TMP, [SPP + (14 * 4)]; \
611	lduw	[SBP + (15 * 4)], TMP; \
612	stw	TMP, [SPP + (15 * 4)];
613
614#define	COPY_KWBUF_TO_STACK(TMP1,TMP2,TMP3)		\
615	CPU_ADDR(TMP2, TMP3)				;\
616	add	TMP2, CPU_MCPU, TMP2			;\
617	ld	[TMP2 + MCPU_KWBUF_FULL], TMP3		;\
618	brz,pt	TMP3, 2f				;\
619	nop						;\
620	st	%g0, [TMP2 + MCPU_KWBUF_FULL]		;\
621	set	MCPU_KWBUF_SP, TMP3			;\
622	ldn	[TMP2 + TMP3], TMP3			;\
623	set	MCPU_KWBUF, TMP1			;\
624	btst	1, TMP3					;\
625	bz,pn	%xcc, 3f				;\
626	add	TMP2, TMP1, TMP2			;\
627	KWBUF64_TO_STACK(TMP2, TMP3, TMP1)		;\
628	ba,a	2f					;\
6293:							;\
630	KWBUF32_TO_STACK(TMP2, TMP3, TMP1)		;\
6312:
632
633	ENTRY_NP(prom_trap)
634	!
635	! prom trap switches the stack to 32-bit
636	! if we took a trap from a 64-bit window
637	! Then buys a window on the current stack.
638	!
639	save	%sp, -SA64(REGOFF + REGSIZE), %sp
640					/* 32 bit frame, 64 bit sized */
641	COPY_KWBUF_TO_STACK(%o1, %o2, %o3)
642	set	ptl0, %g6
643	ba,a,pt	%xcc, have_win
644	SET_SIZE(prom_trap)
645
646	ENTRY_NP(priv_trap)
647	!
648	! kernel trap
649	! buy a window on the current stack
650	!
651	! is the trap PC in the range allocated to Open Firmware?
652	rdpr	%tpc, %g5
653	set	OFW_END_ADDR, %g6
654	cmp	%g5, %g6
655	bgu,a,pn %xcc, 1f
656	  rdpr	%pil, %g5
657	set	OFW_START_ADDR, %g6
658	cmp	%g5, %g6
659	bgeu,pn	%xcc, prom_trap
660	  rdpr	%pil, %g5
6611:
662	set	ktl0, %g6
663	save	%sp, -SA(REGOFF + REGSIZE), %sp
664	COPY_KWBUF_TO_STACK(%o1, %o2, %o3)
665	ba,a,pt	%xcc, have_win
666	SET_SIZE(priv_trap)
667
668/*
669 * FILL_32bit_rtt/FILL_64bit_rtt fills a 32/64-bit-wide register window
670 * from a 32/64-bit * wide address space via the designated asi.
671 * It is used to fill windows in user_rtt to avoid going above TL 2.
672 */
673/* TODO: Use the faster FILL based on FILL_32bit_asi/FILL_64bit_asi */
674#define	FILL_32bit_rtt(asi_num)				\
675	mov	asi_num, %asi					;\
676	rdpr	%cwp, %g1					;\
677	dec	%g1						;\
678	wrpr	%g1, %cwp					;\
679	srl	%sp, 0, %sp					;\
680	lda	[%sp + 0]%asi, %l0				;\
681	lda	[%sp + 4]%asi, %l1				;\
682	lda	[%sp + 8]%asi, %l2				;\
683	lda	[%sp + 12]%asi, %l3				;\
684	lda	[%sp + 16]%asi, %l4				;\
685	lda	[%sp + 20]%asi, %l5				;\
686	lda	[%sp + 24]%asi, %l6				;\
687	lda	[%sp + 28]%asi, %l7				;\
688	lda	[%sp + 32]%asi, %i0				;\
689	lda	[%sp + 36]%asi, %i1				;\
690	lda	[%sp + 40]%asi, %i2				;\
691	lda	[%sp + 44]%asi, %i3				;\
692	lda	[%sp + 48]%asi, %i4				;\
693	lda	[%sp + 52]%asi, %i5				;\
694	lda	[%sp + 56]%asi, %i6				;\
695	lda	[%sp + 60]%asi, %i7				;\
696	restored						;\
697	add	%g1, 1, %g1					;\
698	wrpr	%g1, %cwp
699
700#define	FILL_64bit_rtt(asi_num)				\
701	mov	asi_num, %asi					;\
702	rdpr	%cwp, %g1					;\
703	sub	%g1, 1, %g1					;\
704	wrpr	%g1, %cwp					;\
705	ldxa	[%sp + V9BIAS64 + 0]%asi, %l0			;\
706	ldxa	[%sp + V9BIAS64 + 8]%asi, %l1			;\
707	ldxa	[%sp + V9BIAS64 + 16]%asi, %l2			;\
708	ldxa	[%sp + V9BIAS64 + 24]%asi, %l3			;\
709	ldxa	[%sp + V9BIAS64 + 32]%asi, %l4			;\
710	ldxa	[%sp + V9BIAS64 + 40]%asi, %l5			;\
711	ldxa	[%sp + V9BIAS64 + 48]%asi, %l6			;\
712	ldxa	[%sp + V9BIAS64 + 56]%asi, %l7			;\
713	ldxa	[%sp + V9BIAS64 + 64]%asi, %i0			;\
714	ldxa	[%sp + V9BIAS64 + 72]%asi, %i1			;\
715	ldxa	[%sp + V9BIAS64 + 80]%asi, %i2			;\
716	ldxa	[%sp + V9BIAS64 + 88]%asi, %i3			;\
717	ldxa	[%sp + V9BIAS64 + 96]%asi, %i4			;\
718	ldxa	[%sp + V9BIAS64 + 104]%asi, %i5			;\
719	ldxa	[%sp + V9BIAS64 + 112]%asi, %i6			;\
720	ldxa	[%sp + V9BIAS64 + 120]%asi, %i7			;\
721	restored						;\
722	add	%g1, 1, %g1					;\
723	wrpr	%g1, %cwp
724
725	ENTRY_NP(utl0)
726	SAVE_GLOBALS(%l7)
727	SAVE_OUTS(%l7)
728	mov	%l6, THREAD_REG
729	wrpr	%g0, PSTATE_KERN, %pstate	! enable ints
730	jmpl	%l3, %o7			! call trap handler
731	mov	%l7, %o0
732	!
733	ALTENTRY(user_rtt)
734	!
735	! Register inputs
736	!	%l7 - regs
737	!
738	! disable interrupts and check for ASTs and wbuf restores
739	! keep cpu_base_spl in %l4
740	!
741	wrpr	%g0, PIL_MAX, %pil
742	ldn	[THREAD_REG + T_CPU], %l0
743	ld	[%l0 + CPU_BASE_SPL], %l4
744
745	ldub	[THREAD_REG + T_ASTFLAG], %l2
746	brz,pt	%l2, 1f
747	ld	[%sp + STACK_BIAS + MPCB_WBCNT], %l3
748	!
749	! call trap to do ast processing
750	!
751	wrpr	%g0, %l4, %pil			! pil = cpu_base_spl
752	mov	%l7, %o0
753	call	trap
754	  mov	T_AST, %o2
755	ba,a,pt	%xcc, user_rtt
7561:
757	brz,pt	%l3, 2f
758	mov	THREAD_REG, %l6
759	!
760	! call restore_wbuf to push wbuf windows to stack
761	!
762	wrpr	%g0, %l4, %pil			! pil = cpu_base_spl
763	mov	%l7, %o0
764	call	trap
765	  mov	T_FLUSH_PCB, %o2
766	ba,a,pt	%xcc, user_rtt
7672:
768#ifdef TRAPTRACE
769	TRACE_RTT(TT_SYS_RTT_USER, %l0, %l1, %l2, %l3)
770#endif /* TRAPTRACE */
771	ld	[%sp + STACK_BIAS + MPCB_WSTATE], %l3	! get wstate
772
773	!
774	! restore user globals and outs
775	!
776	rdpr	%pstate, %l1
777	wrpr	%l1, PSTATE_IE, %pstate
778	RESTORE_GLOBALS(%l7)
779	! switch to global set 1, saving THREAD_REG in %l6
780	wrpr	%g0, 1, %gl
781	mov	%sp, %g6	! remember the mpcb pointer in %g6
782	RESTORE_OUTS(%l7)
783	!
784	! set %pil from cpu_base_spl
785	!
786	wrpr	%g0, %l4, %pil
787	!
788	! raise tl (now using nucleus context)
789	! set pcontext to scontext for user execution
790	!
791	wrpr	%g0, 1, %tl
792
793	mov	MMU_SCONTEXT, %g1
794	ldxa	[%g1]ASI_MMU_CTX, %g2
795	mov	MMU_PCONTEXT, %g1
796	stxa	%g2, [%g1]ASI_MMU_CTX
797	!
798	! If shared context support is not enabled, then the next six
799	! instructions will be patched with nop instructions.
800	!
801	.global sfmmu_shctx_user_rtt_patch
802sfmmu_shctx_user_rtt_patch:
803	!
804	! On processors which support multiple contexts, writing to
805	! pcontext0 automatically updates pcontext1 for backwards
806	! compatibility. So, if scontext0 & scontext1 are the same
807	! a write to pcontext0 is sufficient.
808	!
809	mov	MMU_SCONTEXT1, %g1
810	ldxa	[%g1]ASI_MMU_CTX, %g3
811	cmp	%g2, %g3
812	beq,pt	%xcc, no_pctx1_update
813	  mov	MMU_PCONTEXT1, %g1
814	stxa	%g3, [%g1]ASI_MMU_CTX
815
816no_pctx1_update:
817	! Ensure new ctxs take effect by the time the "retry" (below) completes
818	membar	#Sync
819
820	!
821	! setup trap regs
822	!
823	ldn	[%l7 + PC_OFF], %g1
824	ldn	[%l7 + nPC_OFF], %g2
825	ldx	[%l7 + TSTATE_OFF], %l0
826	andn	%l0, TSTATE_CWP, %g7
827	wrpr	%g1, %tpc
828	wrpr	%g2, %tnpc
829	!
830	! switch "other" windows back to "normal" windows and
831	! restore to window we originally trapped in
832	!
833	rdpr	%otherwin, %g1
834	wrpr	%g0, 0, %otherwin
835	add	%l3, WSTATE_CLEAN_OFFSET, %l3	! convert to "clean" wstate
836	wrpr	%g0, %l3, %wstate
837	wrpr	%g0, %g1, %canrestore
838	!
839	! First attempt to restore from the watchpoint saved register window
840	tst	%g1
841	bne,a	1f
842	  clrn	[%g6 + STACK_BIAS + MPCB_RSP0]
843	tst	%fp
844	be,a	1f
845	  clrn	[%g6 + STACK_BIAS + MPCB_RSP0]
846	! test for user return window in pcb
847	ldn	[%g6 + STACK_BIAS + MPCB_RSP0], %g1
848	cmp	%fp, %g1
849	bne	1f
850	  clrn	[%g6 + STACK_BIAS + MPCB_RSP0]
851	restored
852	restore
853	! restore from user return window
854	RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN0)
855	!
856	! Attempt to restore from the scond watchpoint saved register window
857	tst	%fp
858	be,a	2f
859	  clrn	[%g6 + STACK_BIAS + MPCB_RSP1]
860	ldn	[%g6 + STACK_BIAS + MPCB_RSP1], %g1
861	cmp	%fp, %g1
862	bne	2f
863	  clrn	[%g6 + STACK_BIAS + MPCB_RSP1]
864	restored
865	restore
866	RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN1)
867	save
868	b,a	2f
8691:
870	rdpr	%canrestore, %g1
871	brnz	%g1, 3f
872	nop			! no trap, use restore directly
873	rdpr	%cwp, %g1
874	wrpr	%g1, %g7, %tstate	! needed by wbuf recovery code
875	! hand craft the restore to avoid getting to TL > 2
876	rdpr	%wstate, %g1
877	btst	1, %g1
878	beq	4f
879	nop
880	.global	rtt_fill_start
881rtt_fill_start:
882	FILL_32bit_rtt(ASI_AIUP)
883	ba,a	3f
8844:
885	FILL_64bit_rtt(ASI_AIUP)
886	.global	rtt_fill_end
887rtt_fill_end:
8883:
889	restore				! should not trap
8902:
891	!
892	! set %cleanwin to %canrestore
893	! set %tstate to the correct %cwp
894	! retry resumes user execution
895	!
896	rdpr	%canrestore, %g1
897	wrpr	%g0, %g1, %cleanwin
898	rdpr	%cwp, %g1
899	wrpr	%g1, %g7, %tstate
900	retry
901	/* NOTREACHED */
902	SET_SIZE(user_rtt)
903	SET_SIZE(utl0)
904
905	ENTRY_NP(ptl0)
906	SAVE_GLOBALS(%l7)
907	SAVE_OUTS(%l7)
908	CPU_ADDR(%g5, %g6)
909	ldn	[%g5 + CPU_THREAD], THREAD_REG
910	wrpr	%g0, PSTATE_KERN, %pstate	! enable ints
911	jmpl	%l3, %o7			! call trap handler
912	mov	%l7, %o0
913	!
914	ALTENTRY(prom_rtt)
915#ifdef TRAPTRACE
916	TRACE_RTT(TT_SYS_RTT_PROM, %l0, %l1, %l2, %l3)
917#endif /* TRAPTRACE */
918	ba,pt	%xcc, common_rtt
919	mov	THREAD_REG, %l0
920	SET_SIZE(prom_rtt)
921	SET_SIZE(ptl0)
922
923	ENTRY_NP(ktl0)
924	/*
925	 * THREAD_REG cannot be restored in fault_32bit_fn1 since
926	 * sun4v cannot safely lower %gl then raise it again.
927	 */
928	CPU_ADDR(%l0, %l1)
929	ldn	[%l0 + CPU_THREAD], THREAD_REG
930	SAVE_GLOBALS(%l7)
931	SAVE_OUTS(%l7)				! for the call bug workaround
932	wrpr	%g0, PSTATE_KERN, %pstate	! enable ints
933	jmpl	%l3, %o7			! call trap handler
934	mov	%l7, %o0
935	!
936	ALTENTRY(priv_rtt)
937#ifdef TRAPTRACE
938	TRACE_RTT(TT_SYS_RTT_PRIV, %l0, %l1, %l2, %l3)
939#endif /* TRAPTRACE */
940	!
941	! Register inputs
942	!	%l7 - regs
943	!	%l6 - trap %pil
944	!
945	! Check for a kernel preemption request
946	!
947	ldn	[THREAD_REG + T_CPU], %l0
948	ldub	[%l0 + CPU_KPRUNRUN], %l0
949	brz,pt	%l0, 1f
950	nop
951
952	!
953	! Attempt to preempt
954	!
955	ldstub	[THREAD_REG + T_PREEMPT_LK], %l0	! load preempt lock
956	brnz,pn	%l0, 1f			! can't call kpreempt if this thread is
957	nop				!   already in it...
958
959	call	kpreempt
960	mov	%l6, %o0		! pass original interrupt level
961
962	stub	%g0, [THREAD_REG + T_PREEMPT_LK]	! nuke the lock
963
964	rdpr	%pil, %o0		! compare old pil level
965	cmp	%l6, %o0		!   with current pil level
966	movg	%xcc, %o0, %l6		! if current is lower, drop old pil
9671:
968	!
969	! If we interrupted the mutex_owner_running() critical region we
970	! must reset ! the PC and nPC back to the beginning to prevent missed
971	! wakeups.  ! See the comments in mutex_exit() for details.
972	!
973	ldn	[%l7 + PC_OFF], %l0
974	set	mutex_owner_running_critical_start, %l1
975	sub	%l0, %l1, %l0
976	cmp	%l0, mutex_owner_running_critical_size
977	bgeu,pt	%xcc, 2f
978	mov	THREAD_REG, %l0
979	stn	%l1, [%l7 + PC_OFF]	! restart mutex_owner_running()
980	add	%l1, 4, %l1
981	ba,pt	%xcc, common_rtt
982	stn	%l1, [%l7 + nPC_OFF]
983
9842:
985	!
986	! If we interrupted the mutex_exit() critical region we must reset
987	! the PC and nPC back to the beginning to prevent missed wakeups.
988	! See the comments in mutex_exit() for details.
989	!
990	ldn	[%l7 + PC_OFF], %l0
991	set	mutex_exit_critical_start, %l1
992	sub	%l0, %l1, %l0
993	cmp	%l0, mutex_exit_critical_size
994	bgeu,pt	%xcc, common_rtt
995	mov	THREAD_REG, %l0
996	stn	%l1, [%l7 + PC_OFF]	! restart mutex_exit()
997	add	%l1, 4, %l1
998	stn	%l1, [%l7 + nPC_OFF]
999
1000common_rtt:
1001	!
1002	! restore globals and outs
1003	!
1004	rdpr	%pstate, %l1
1005	wrpr	%l1, PSTATE_IE, %pstate
1006	RESTORE_GLOBALS(%l7)
1007	! switch to global set 1
1008	wrpr	%g0, 1, %gl
1009	RESTORE_OUTS(%l7)
1010	!
1011	! set %pil from max(old pil, cpu_base_spl)
1012	!
1013	ldn	[%l0 + T_CPU], %l0
1014	ld	[%l0 + CPU_BASE_SPL], %l0
1015	cmp	%l6, %l0
1016	movg	%xcc, %l6, %l0
1017	wrpr	%g0, %l0, %pil
1018	!
1019	! raise tl
1020	! setup trap regs
1021	! restore to window we originally trapped in
1022	!
1023	wrpr	%g0, 1, %tl
1024	ldn	[%l7 + PC_OFF], %g1
1025	ldn	[%l7 + nPC_OFF], %g2
1026	ldx	[%l7 + TSTATE_OFF], %l0
1027	andn	%l0, TSTATE_CWP, %g7
1028	wrpr	%g1, %tpc
1029	wrpr	%g2, %tnpc
1030	rdpr	%canrestore, %g1
1031	brnz	%g1, 3f
1032	nop			! no trap, use restore directly
1033	rdpr	%cwp, %g1
1034	wrpr	%g1, %g7, %tstate	! needed by wbuf recovery code
1035	! hand craft the restore to avoid getting to TL > 2
1036	FILL_64bit_rtt(ASI_N)
10373:
1038	restore
1039	!
1040	! set %tstate to the correct %cwp
1041	! retry resumes prom execution
1042	!
1043	rdpr	%cwp, %g1
1044	wrpr	%g1, %g7, %tstate
1045	retry
1046	/* NOTREACHED */
1047	SET_SIZE(priv_rtt)
1048	SET_SIZE(ktl0)
1049
1050#endif	/* lint */
1051
1052#ifndef lint
1053
1054#ifdef DEBUG
1055	.seg	".data"
1056	.align	4
1057
1058	.global bad_g4_called
1059bad_g4_called:
1060	.word	0
1061
1062sys_trap_wrong_pil:
1063	.asciz	"sys_trap: %g4(%d) is lower than %pil(%d)"
1064	.align	4
1065	.seg	".text"
1066
1067	ENTRY_NP(bad_g4)
1068	mov	%o1, %o0
1069	mov	%o2, %o1
1070	call	panic
1071	mov	%o3, %o2
1072	SET_SIZE(bad_g4)
1073#endif /* DEBUG */
1074#endif /* lint */
1075
1076/*
1077 * sys_tl1_panic can be called by traps at tl1 which
1078 * really want to panic, but need the rearrangement of
1079 * the args as provided by this wrapper routine.
1080 */
1081#if defined(lint)
1082
1083void
1084sys_tl1_panic(void)
1085{}
1086
1087#else	/* lint */
1088	ENTRY_NP(sys_tl1_panic)
1089	mov	%o1, %o0
1090	mov	%o2, %o1
1091	call	panic
1092	mov	%o3, %o2
1093	SET_SIZE(sys_tl1_panic)
1094#endif /* lint */
1095
1096
1097/*
1098 * Flush all windows to memory, except for the one we entered in.
1099 * We do this by doing NWINDOW-2 saves then the same number of restores.
1100 * This leaves the WIM immediately before window entered in.
1101 * This is used for context switching.
1102 */
1103
1104#if defined(lint)
1105
1106void
1107flush_windows(void)
1108{}
1109
1110#else	/* lint */
1111
1112	ENTRY_NP(flush_windows)
1113	retl
1114	flushw
1115	SET_SIZE(flush_windows)
1116
1117#endif	/* lint */
1118
1119#if defined(lint)
1120
1121void
1122debug_flush_windows(void)
1123{}
1124
1125#else	/* lint */
1126
1127	ENTRY_NP(debug_flush_windows)
1128	set	nwindows, %g1
1129	ld	[%g1], %g1
1130	mov	%g1, %g2
1131
11321:
1133	save	%sp, -WINDOWSIZE, %sp
1134	brnz	%g2, 1b
1135	dec	%g2
1136
1137	mov	%g1, %g2
11382:
1139	restore
1140	brnz	%g2, 2b
1141	dec	%g2
1142
1143	retl
1144	nop
1145
1146	SET_SIZE(debug_flush_windows)
1147
1148#endif	/* lint */
1149
1150/*
1151 * flush user windows to memory.
1152 */
1153
1154#if defined(lint)
1155
1156void
1157flush_user_windows(void)
1158{}
1159
1160#else	/* lint */
1161
1162	ENTRY_NP(flush_user_windows)
1163	rdpr	%otherwin, %g1
1164	brz	%g1, 3f
1165	clr	%g2
11661:
1167	save	%sp, -WINDOWSIZE, %sp
1168	rdpr	%otherwin, %g1
1169	brnz	%g1, 1b
1170	add	%g2, 1, %g2
11712:
1172	sub	%g2, 1, %g2		! restore back to orig window
1173	brnz	%g2, 2b
1174	restore
11753:
1176	retl
1177	nop
1178	SET_SIZE(flush_user_windows)
1179
1180#endif	/* lint */
1181
1182/*
1183 * Throw out any user windows in the register file.
1184 * Used by setregs (exec) to clean out old user.
1185 * Used by sigcleanup to remove extraneous windows when returning from a
1186 * signal.
1187 */
1188
1189#if defined(lint)
1190
1191void
1192trash_user_windows(void)
1193{}
1194
1195#else	/* lint */
1196
1197	ENTRY_NP(trash_user_windows)
1198	rdpr	%otherwin, %g1
1199	brz	%g1, 3f			! no user windows?
1200	ldn	[THREAD_REG + T_STACK], %g5
1201
1202	!
1203	! There are old user windows in the register file. We disable ints
1204	! and increment cansave so that we don't overflow on these windows.
1205	! Also, this sets up a nice underflow when first returning to the
1206	! new user.
1207	!
1208	rdpr	%pstate, %g2
1209	wrpr	%g2, PSTATE_IE, %pstate
1210	rdpr	%cansave, %g3
1211	rdpr	%otherwin, %g1		! re-read in case of interrupt
1212	add	%g3, %g1, %g3
1213	wrpr	%g0, 0, %otherwin
1214	wrpr	%g0, %g3, %cansave
1215	wrpr	%g0, %g2, %pstate
12163:
1217	retl
1218 	clr     [%g5 + MPCB_WBCNT]       ! zero window buffer cnt
1219	SET_SIZE(trash_user_windows)
1220
1221
1222#endif	/* lint */
1223
1224/*
1225 * Setup g7 via the CPU data structure.
1226 */
1227#if defined(lint)
1228
1229struct scb *
1230set_tbr(struct scb *s)
1231{ return (s); }
1232
1233#else	/* lint */
1234
1235	ENTRY_NP(set_tbr)
1236	retl
1237	ta	72		! no tbr, stop simulation
1238	SET_SIZE(set_tbr)
1239
1240#endif	/* lint */
1241
1242
1243#if defined(lint)
1244/*
1245 * These need to be defined somewhere to lint and there is no "hicore.s"...
1246 */
1247char etext[1], end[1];
1248#endif	/* lint*/
1249
1250#if defined (lint)
1251
1252/* ARGSUSED */
1253void
1254ptl1_panic(u_int reason)
1255{}
1256
1257#else /* lint */
1258
1259#define	PTL1_SAVE_WINDOW(RP)						\
1260	stxa	%l0, [RP + RW64_LOCAL + (0 * RW64_LOCAL_INCR)] %asi;	\
1261	stxa	%l1, [RP + RW64_LOCAL + (1 * RW64_LOCAL_INCR)] %asi;	\
1262	stxa	%l2, [RP + RW64_LOCAL + (2 * RW64_LOCAL_INCR)] %asi;	\
1263	stxa	%l3, [RP + RW64_LOCAL + (3 * RW64_LOCAL_INCR)] %asi;	\
1264	stxa	%l4, [RP + RW64_LOCAL + (4 * RW64_LOCAL_INCR)] %asi;	\
1265	stxa	%l5, [RP + RW64_LOCAL + (5 * RW64_LOCAL_INCR)] %asi;	\
1266	stxa	%l6, [RP + RW64_LOCAL + (6 * RW64_LOCAL_INCR)] %asi;	\
1267	stxa	%l7, [RP + RW64_LOCAL + (7 * RW64_LOCAL_INCR)] %asi;	\
1268	stxa	%i0, [RP + RW64_IN + (0 * RW64_IN_INCR)] %asi;		\
1269	stxa	%i1, [RP + RW64_IN + (1 * RW64_IN_INCR)] %asi;		\
1270	stxa	%i2, [RP + RW64_IN + (2 * RW64_IN_INCR)] %asi;		\
1271	stxa	%i3, [RP + RW64_IN + (3 * RW64_IN_INCR)] %asi;		\
1272	stxa	%i4, [RP + RW64_IN + (4 * RW64_IN_INCR)] %asi;		\
1273	stxa	%i5, [RP + RW64_IN + (5 * RW64_IN_INCR)] %asi;		\
1274	stxa	%i6, [RP + RW64_IN + (6 * RW64_IN_INCR)] %asi;		\
1275	stxa	%i7, [RP + RW64_IN + (7 * RW64_IN_INCR)] %asi
1276#define	PTL1_NEXT_WINDOW(scr)	\
1277	add	scr, RWIN64SIZE, scr
1278
1279#define	PTL1_RESET_RWINDOWS(scr)			\
1280	sethi	%hi(nwin_minus_one), scr;		\
1281	ld	[scr + %lo(nwin_minus_one)], scr;	\
1282	wrpr	scr, %cleanwin;				\
1283	dec	scr;					\
1284	wrpr	scr, %cansave;				\
1285	wrpr	%g0, %canrestore;			\
1286	wrpr	%g0, %otherwin
1287
1288#define	PTL1_DCACHE_LINE_SIZE	4	/* small enough for all CPUs */
1289
1290/*
1291 * ptl1_panic is called when the kernel detects that it is in an invalid state
1292 * and the trap level is greater than 0.  ptl1_panic is responsible to save the
1293 * current CPU state, to restore the CPU state to normal, and to call panic.
1294 * The CPU state must be saved reliably without causing traps.  ptl1_panic saves
1295 * it in the ptl1_state structure, which is a member of the machcpu structure.
1296 * In order to access the ptl1_state structure without causing traps, physical
1297 * addresses are used so that we can avoid MMU miss traps.  The restriction of
1298 * physical memory accesses is that the ptl1_state structure must be on a single
1299 * physical page.  This is because (1) a single physical address for each
1300 * ptl1_state structure is needed and (2) it simplifies physical address
1301 * calculation for each member of the structure.
1302 * ptl1_panic is a likely spot for stack overflows to wind up; thus, the current
1303 * stack may not be usable.  In order to call panic reliably in such a state,
1304 * each CPU needs a dedicated ptl1 panic stack.
1305 * CPU_ALLOC_SIZE, which is defined to be MMU_PAGESIZE, is used to allocate the
1306 * cpu structure and a ptl1 panic stack.  They are put together on the same page
1307 * for memory space efficiency.  The low address part is used for the cpu
1308 * structure, and the high address part is for a ptl1 panic stack.
1309 * The cpu_pa array holds the physical addresses of the allocated cpu structures,
1310 * as the cpu array holds their virtual addresses.
1311 *
1312 * %g1 reason to be called
1313 * %g2 broken
1314 * %g3 broken
1315 */
1316	ENTRY_NP(ptl1_panic)
1317	!
1318	! increment the entry counter.
1319	! save CPU state if this is the first entry.
1320	!
1321	CPU_PADDR(%g2, %g3);
1322	add	%g2, CPU_PTL1, %g2		! pstate = &CPU->mcpu.ptl1_state
1323	wr	%g0, ASI_MEM, %asi		! physical address access
1324	!
1325	! pstate->ptl1_entry_count++
1326	!
1327	lduwa	[%g2 + PTL1_ENTRY_COUNT] %asi, %g3
1328	add	%g3, 1, %g3
1329	stuwa	%g3, [%g2 + PTL1_ENTRY_COUNT] %asi
1330	!
1331	! CPU state saving is skipped from the 2nd entry to ptl1_panic since we
1332	! do not want to clobber the state from the original failure.  panic()
1333	! is responsible for handling multiple or recursive panics.
1334	!
1335	cmp	%g3, 2				! if (ptl1_entry_count >= 2)
1336	bge,pn	%icc, state_saved		!	goto state_saved
1337	  add	%g2, PTL1_REGS, %g3		! %g3 = &pstate->ptl1_regs[0]
1338	!
1339	! save CPU state
1340	!
1341save_cpu_state:
1342	! save current global registers
1343	! so that all them become available for use
1344	!
1345	stxa	%o1, [%g3 + PTL1_RWINDOW] %asi		! save %o1
1346	stxa	%o2, [%g3 + PTL1_RWINDOW + 8] %asi	! save %o2
1347	stxa	%o3, [%g3 + PTL1_RWINDOW + 16] %asi	! save %o3
1348	rdpr	%gl, %o1
1349	add	%g3, PTL1_GREGS, %o2		! %o4 = &ptl1_gregs[0]
1350	mov	%g3, %o3
13516:
1352	stxa	%o1, [%o2 + PTL1_GL] %asi
1353	stxa	%g1, [%o2 + PTL1_G1] %asi
1354	stxa	%g2, [%o2 + PTL1_G2] %asi
1355	stxa	%g3, [%o2 + PTL1_G3] %asi
1356	stxa	%g4, [%o2 + PTL1_G4] %asi
1357	stxa	%g5, [%o2 + PTL1_G5] %asi
1358	stxa	%g6, [%o2 + PTL1_G6] %asi
1359	stxa	%g7, [%o2 + PTL1_G7] %asi
1360	add	%o2, PTL1_GREGS_INCR, %o2
1361	deccc	%o1
1362	brgez,a,pt %o1, 6b
1363	wrpr	%o1, %gl
1364	!
1365	! restore %g3, %o1, %o2 and %o3
1366	!
1367	mov	%o3, %g3
1368	ldxa	[%g3 + PTL1_RWINDOW] %asi, %o1
1369	ldxa	[%g3 + PTL1_RWINDOW + 8] %asi, %o2
1370	ldxa	[%g3 + PTL1_RWINDOW + 16] %asi, %o3
1371	!
1372	! %tl, %tt, %tstate, %tpc, %tnpc for each TL
1373	!
1374	rdpr	%tl, %g1
1375	brz	%g1, 1f				! if(trap_level == 0) -------+
1376	add	%g3, PTL1_TRAP_REGS, %g4	! %g4 = &ptl1_trap_regs[0];  !
13770:						! -----------<----------+    !
1378	stwa	%g1, [%g4 + PTL1_TL] %asi				!    !
1379	rdpr	%tt, %g5						!    !
1380	stwa	%g5, [%g4 + PTL1_TT] %asi				!    !
1381	rdpr	%tstate, %g5						!    !
1382	stxa	%g5, [%g4 + PTL1_TSTATE] %asi				!    !
1383	rdpr	%tpc, %g5						!    !
1384	stxa	%g5, [%g4 + PTL1_TPC] %asi				!    !
1385	rdpr	%tnpc, %g5						!    !
1386	stxa	%g5, [%g4 + PTL1_TNPC] %asi				!    !
1387	add	%g4, PTL1_TRAP_REGS_INCR, %g4				!    !
1388	deccc	%g1							!    !
1389	bnz,a,pt %icc, 0b			! if(trap_level != 0) --+    !
1390	  wrpr	%g1, %tl						     !
13911:						! ----------<----------------+
1392	!
1393	! %pstate, %pil, SOFTINT, (S)TICK
1394	! Pending interrupts is also cleared in order to avoid a recursive call
1395	! to ptl1_panic in case the interrupt handler causes a panic.
1396	!
1397	rdpr	%pil, %g1
1398	stba	%g1, [%g3 + PTL1_PIL] %asi
1399	rdpr	%pstate, %g1
1400	stha	%g1, [%g3 + PTL1_PSTATE] %asi
1401	rd	SOFTINT, %g1
1402	sta	%g1, [%g3 + PTL1_SOFTINT] %asi
1403	wr	%g1, CLEAR_SOFTINT
1404	RD_TICKSTICK_FLAG(%g1, %g4, traptrace_use_stick)
1405	stxa	%g1, [%g3 + PTL1_TICK] %asi
1406
1407	MMU_FAULT_STATUS_AREA(%g1)
1408	ldx	[%g1 + MMFSA_D_TYPE], %g4
1409	stxa	%g4, [%g3 + PTL1_DMMU_TYPE] %asi
1410	ldx	[%g1 + MMFSA_D_ADDR], %g4
1411	stxa	%g4, [%g3 + PTL1_DMMU_ADDR] %asi
1412	ldx	[%g1 + MMFSA_D_CTX], %g4
1413	stxa	%g4, [%g3 + PTL1_DMMU_CTX] %asi
1414	ldx	[%g1 + MMFSA_I_TYPE], %g4
1415	stxa	%g4, [%g3 + PTL1_IMMU_TYPE] %asi
1416	ldx	[%g1 + MMFSA_I_ADDR], %g4
1417	stxa	%g4, [%g3 + PTL1_IMMU_ADDR] %asi
1418	ldx	[%g1 + MMFSA_I_CTX], %g4
1419	stxa	%g4, [%g3 + PTL1_IMMU_CTX] %asi
1420
1421	!
1422	! Save register window state and register windows.
1423	!
1424	rdpr	%cwp, %g1
1425	stba	%g1, [%g3 + PTL1_CWP] %asi
1426	rdpr	%wstate, %g1
1427	stba	%g1, [%g3 + PTL1_WSTATE] %asi
1428	rdpr	%otherwin, %g1
1429	stba	%g1, [%g3 + PTL1_OTHERWIN] %asi
1430	rdpr	%cleanwin, %g1
1431	stba	%g1, [%g3 + PTL1_CLEANWIN] %asi
1432	rdpr	%cansave, %g1
1433	stba	%g1, [%g3 + PTL1_CANSAVE] %asi
1434	rdpr	%canrestore, %g1
1435	stba	%g1, [%g3 + PTL1_CANRESTORE] %asi
1436
1437	PTL1_RESET_RWINDOWS(%g1)
1438	clr	%g1
1439	wrpr	%g1, %cwp
1440	add	%g3, PTL1_RWINDOW, %g4		! %g4 = &ptl1_rwindow[0];
1441
14423:	PTL1_SAVE_WINDOW(%g4)	! <-------------+
1443	inc	%g1				!
1444	cmp	%g1, MAXWIN			!
1445	bgeu,pn	%icc, 5f			!
1446	wrpr	%g1, %cwp			!
1447	rdpr	%cwp, %g2			!
1448	cmp	%g1, %g2			! saturation check
1449	be,pt	%icc, 3b			!
1450	  PTL1_NEXT_WINDOW(%g4)		! ------+
14515:
1452	!
1453	! most crucial CPU state was saved.
1454	! Proceed to go back to TL = 0.
1455	!
1456state_saved:
1457	wrpr	%g0, 1, %tl
1458	wrpr	%g0, 1, %gl
1459	wrpr	%g0, PIL_MAX, %pil
1460	!
1461	PTL1_RESET_RWINDOWS(%g1)
1462	wrpr	%g0, %cwp
1463	wrpr	%g0, %cleanwin
1464	wrpr	%g0, WSTATE_KERN, %wstate
1465	!
1466	! Set pcontext to run kernel.
1467	!
1468	set	MMU_PCONTEXT, %g1
1469	stxa	%g0, [%g1]ASI_MMU_CTX
1470	membar	#Sync
1471
1472	rdpr	%cwp, %g1
1473	set	TSTATE_KERN, %g3
1474	wrpr	%g3, %g1, %tstate
1475	set	ptl1_panic_tl0, %g3
1476	wrpr	%g0, %g3, %tnpc
1477	done					! go to -->-+	TL:1
1478							    !
1479ptl1_panic_tl0:					! ----<-----+	TL:0
1480	CPU_ADDR(%l0, %l1)			! %l0 = cpu[cpuid]
1481	add	%l0, CPU_PTL1, %l1		! %l1 = &CPU->mcpu.ptl1_state
1482	!
1483	! prepare to call panic()
1484	!
1485	ldn	[%l0 + CPU_THREAD], THREAD_REG	! restore %g7
1486	ldn	[%l1 + PTL1_STKTOP], %l2	! %sp = ptl1_stktop
1487	sub	%l2, SA(MINFRAME) + STACK_BIAS, %sp
1488	clr	%fp				! no frame below this window
1489	clr	%i7
1490	!
1491	! enable limited interrupts
1492	!
1493	wrpr	%g0, CLOCK_LEVEL, %pil
1494	wrpr	%g0, PSTATE_KERN, %pstate
1495	!
1496	ba,pt	%xcc, ptl1_panic_handler
1497	  mov	%l1, %o0
1498	/*NOTREACHED*/
1499	SET_SIZE(ptl1_panic)
1500#endif /* lint */
1501
1502#ifdef	PTL1_PANIC_DEBUG
1503#if defined (lint)
1504/*
1505 * ptl1_recurse() calls itself a number of times to either set up a known
1506 * stack or to cause a kernel stack overflow. It decrements the arguments
1507 * on each recursion.
1508 * It's called by #ifdef PTL1_PANIC_DEBUG code in startup.c to set the
1509 * registers to a known state to facilitate debugging.
1510 */
1511
1512/* ARGSUSED */
1513void
1514ptl1_recurse(int count_threshold, int trap_threshold)
1515{}
1516
1517#else /* lint */
1518
1519	ENTRY_NP(ptl1_recurse)
1520	save    %sp, -SA(MINFRAME), %sp
1521
1522	set 	ptl1_recurse_call, %o7
1523	cmp	%o7, %i7			! if ptl1_recurse is called
1524	be,pt  %icc, 0f				! by itself, then skip
1525	  nop					! register initialization
1526
1527	/*
1528	 * Initialize Out Registers to Known Values
1529	 */
1530	set	0x01000, %l0			! %i0 is the ...
1531						! recursion_depth_count
1532	sub	%i0, 1, %o0;
1533	sub 	%i1, 1, %o1;
1534	add	%l0, %o0, %o2;
1535	add	%l0, %o2, %o3;
1536	add	%l0, %o3, %o4;
1537	add	%l0, %o4, %o5;
1538	ba,a	1f
1539	  nop
1540
15410:	/* Outs = Ins - 1 */
1542	sub	%i0, 1, %o0;
1543	sub	%i1, 1, %o1;
1544	sub	%i2, 1, %o2;
1545	sub	%i3, 1, %o3;
1546	sub	%i4, 1, %o4;
1547	sub	%i5, 1, %o5;
1548
1549	/* Locals = Ins + 1 */
15501:	add	%i0, 1, %l0;
1551	add	%i1, 1, %l1;
1552	add	%i2, 1, %l2;
1553	add	%i3, 1, %l3;
1554	add	%i4, 1, %l4;
1555	add	%i5, 1, %l5;
1556
1557	set     0x0100000, %g5
1558	add	%g5, %g0, %g1
1559	add	%g5, %g1, %g2
1560	add	%g5, %g2, %g3
1561	add	%g5, %g3, %g4
1562	add	%g5, %g4, %g5
1563
1564	brz,pn %i1, ptl1_recurse_trap		! if trpp_count == 0) {
1565	  nop					!    trap to ptl1_panic
1566						!
1567	brz,pn %i0, ptl1_recure_exit		! if(depth_count == 0) {
1568	  nop					!    skip recursive call
1569						! }
1570ptl1_recurse_call:
1571	call	ptl1_recurse
1572	  nop
1573
1574ptl1_recure_exit:
1575	ret
1576	restore
1577
1578ptl1_recurse_trap:
1579	ta	PTL1_DEBUG_TRAP; 		! Trap Always to ptl1_panic()
1580	  nop 					! NOTREACHED
1581        SET_SIZE(ptl1_recurse)
1582
1583#endif /* lint */
1584
1585#if defined (lint)
1586
1587/* ARGSUSED */
1588void
1589ptl1_panic_xt(int arg1, int arg2)
1590{}
1591
1592#else /* lint */
1593	/*
1594	 * Asm function to handle a cross trap to call ptl1_panic()
1595	 */
1596	ENTRY_NP(ptl1_panic_xt)
1597	ba	ptl1_panic
1598	  mov	PTL1_BAD_DEBUG, %g1
1599        SET_SIZE(ptl1_panic_xt)
1600
1601#endif /* lint */
1602
1603#endif	/* PTL1_PANIC_DEBUG */
1604
1605#ifdef	TRAPTRACE
1606#if	defined (lint)
1607
1608void
1609trace_ptr_panic(void)
1610{
1611}
1612
1613#else	/* lint */
1614
1615	ENTRY_NP(trace_ptr_panic)
1616	!
1617	! freeze the trap trace to disable the assertions.  Otherwise,
1618	! ptl1_panic is likely to be repeatedly called from there.
1619	! %g2 and %g3 are used as scratch registers in ptl1_panic.
1620	!
1621	mov	1, %g3
1622	sethi	%hi(trap_freeze), %g2
1623	st	%g3, [%g2 + %lo(trap_freeze)]
1624	!
1625	! %g1 contains the %pc address where an assertion was failed.
1626	! save it in trap_freeze_pc for a debugging hint if there is
1627	! no value saved in it.
1628	!
1629	set	trap_freeze_pc, %g2
1630	casn	[%g2], %g0, %g1
1631
1632	ba	ptl1_panic
1633	mov	PTL1_BAD_TRACE_PTR, %g1
1634	SET_SIZE(trace_ptr_panic)
1635
1636#endif	/* lint */
1637#endif	/* TRAPTRACE */
1638
1639/*
1640 * The interface for a 32-bit client program that takes over the TBA
1641 * calling the 64-bit romvec OBP.
1642 */
1643
1644#if defined(lint)
1645
1646/* ARGSUSED */
1647int
1648client_handler(void *cif_handler, void *arg_array)
1649{ return 0; }
1650
1651#else	/* lint */
1652
1653	ENTRY(client_handler)
1654	save	%sp, -SA64(MINFRAME64), %sp	! 32 bit frame, 64 bit sized
1655	sethi	%hi(tba_taken_over), %l2
1656	ld	[%l2+%lo(tba_taken_over)], %l3
1657	brz	%l3, 1f				! is the tba_taken_over = 1 ?
1658	rdpr	%wstate, %l5			! save %wstate
1659	andn	%l5, WSTATE_MASK, %l6
1660	wrpr	%l6, WSTATE_KMIX, %wstate
16611:	mov	%i1, %o0
16621:	rdpr	%pstate, %l4			! Get the present pstate value
1663	andn	%l4, PSTATE_AM, %l6
1664	wrpr	%l6, 0, %pstate			! Set PSTATE_AM = 0
1665	jmpl	%i0, %o7			! Call cif handler
1666	nop
1667	wrpr	%l4, 0, %pstate			! restore pstate
1668	brz	%l3, 1f				! is the tba_taken_over = 1
1669	  nop
1670	wrpr	%g0, %l5, %wstate		! restore wstate
16711:	ret					! Return result ...
1672	restore	%o0, %g0, %o0			! delay; result in %o0
1673	SET_SIZE(client_handler)
1674
1675#endif	/* lint */
1676
1677#if defined(lint)
1678
1679/*ARGSUSED*/
1680void
1681panic_bad_hcall(uint64_t err, uint64_t hcall)
1682{}
1683
1684#else   /* lint */
1685
1686	.seg	".text"
1687bad_hcall_error:
1688	.asciz	"hypervisor call 0x%x returned an unexpected error %d"
1689
1690	/*
1691	 * panic_bad_hcall is called when a hcall returns
1692	 * unexpected error
1693	 * %o0 error number
1694	 * %o1 hcall number
1695	 */
1696
1697	ENTRY(panic_bad_hcall)
1698	mov	%o0, %o2
1699	sethi	%hi(bad_hcall_error), %o0
1700	or	%o0, %lo(bad_hcall_error), %o0
1701	mov	%o7, %o3
1702	call	panic
1703	mov	%o3, %o7
1704	SET_SIZE(panic_bad_hcall)
1705
1706#endif  /* lint */
1707