locore.s revision 1.43
1/*	$NetBSD: locore.s,v 1.43 2000/11/03 05:28:28 tsutsui Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
41 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
42 */
43
44#include "opt_compat_netbsd.h"
45#include "opt_compat_svr4.h"
46#include "opt_compat_sunos.h"
47#include "opt_lockdebug.h"
48
49#include "assym.h"
50#include <machine/asm.h>
51#include <machine/trap.h>
52
53| Remember this is a fun project!
54
55	.data
56GLOBAL(mon_crp)
57	.long	0,0
58
59| This is for kvm_mkdb, and should be the address of the beginning
60| of the kernel text segment (not necessarily the same as kernbase).
61	.text
62GLOBAL(kernel_text)
63
64| This is the entry point, as well as the end of the temporary stack
65| used during process switch (one 8K page ending at start)
66ASGLOBAL(tmpstk)
67ASGLOBAL(start)
68
69| The first step, after disabling interrupts, is to map enough of the kernel
70| into high virtual address space so that we can use position dependent code.
71| This is a tricky task on the sun3x because the MMU is already enabled and
72| the ROM monitor provides no indication of where the root MMU table is mapped.
73| Therefore we must use one of the 68030's 'transparent translation' registers
74| to define a range in the address space where the MMU translation is
75| turned off.  Once this is complete we can modify the MMU table directly
76| without the need for it to be mapped into virtual memory.
77| All code must be position independent until otherwise noted, as the
78| boot loader has loaded us into low memory but all the symbols in this
79| code have been linked high.
80	movw	#PSL_HIGHIPL, sr	| no interrupts
81	movl	#KERNBASE, a5		| for vtop conversion
82	lea	_C_LABEL(mon_crp), a0	| where to store the CRP
83	subl	a5, a0
84	| Note: borrowing mon_crp for tt0 setup...
85	movl	#0x3F8107, a0@		| map the low 1GB v=p with the
86	.long	0xf0100800		| transparent translation reg0
87					| [ pmove a0@, tt0 ]
88| In order to map the kernel into high memory we will copy the root table
89| entry which maps the 16 megabytes of memory starting at 0x0 into the
90| entry which maps the 16 megabytes starting at KERNBASE.
91	pmove	crp, a0@		| Get monitor CPU root pointer
92	movl	a0@(4), a1		| 2nd word is PA of level A table
93
94	movl	a1, a0			| compute the descriptor address
95	addl	#0x3e0, a1		| for VA starting at KERNBASE
96	movl	a0@, a1@		| copy descriptor type
97	movl	a0@(4), a1@(4)		| copy physical address
98
99| Kernel is now double mapped at zero and KERNBASE.
100| Force a long jump to the relocated code (high VA).
101	movl	#IC_CLEAR, d0		| Flush the I-cache
102	movc	d0, cacr
103	jmp L_high_code:l		| long jump
104
105L_high_code:
106| We are now running in the correctly relocated kernel, so
107| we are no longer restricted to position-independent code.
108| It is handy to leave transparent translation enabled while
109| for the low 1GB while _bootstrap() is doing its thing.
110
111| Do bootstrap stuff needed before main() gets called.
112| Our boot loader leaves a copy of the kernel's exec header
113| just before the start of the kernel text segment, so the
114| kernel can sanity-check the DDB symbols at [end...esym].
115| Pass the struct exec at tmpstk-32 to _bootstrap().
116| Also, make sure the initial frame pointer is zero so that
117| the backtrace algorithm used by KGDB terminates nicely.
118	lea	_ASM_LABEL(tmpstk)-32, sp
119	movl	#0,a6
120	jsr	_C_LABEL(_bootstrap)	| See locore2.c
121
122| Now turn off the transparent translation of the low 1GB.
123| (this also flushes the ATC)
124	clrl	sp@-
125	.long	0xf0170800		| pmove	sp@,tt0
126	addql	#4,sp
127
128| Now that _bootstrap() is done using the PROM functions,
129| we can safely set the sfc/dfc to something != FC_CONTROL
130	moveq	#FC_USERD, d0		| make movs access "user data"
131	movc	d0, sfc			| space for copyin/copyout
132	movc	d0, dfc
133
134| Setup process zero user/kernel stacks.
135	movl	_C_LABEL(proc0paddr),a1	| get proc0 pcb addr
136	lea	a1@(USPACE-4),sp	| set SSP to last word
137	movl	#USRSTACK-4,a2
138	movl	a2,usp			| init user SP
139
140| Note curpcb was already set in _bootstrap().
141| Will do fpu initialization during autoconfig (see fpu.c)
142| The interrupt vector table and stack are now ready.
143| Interrupts will be enabled later, AFTER  autoconfiguration
144| is finished, to avoid spurrious interrupts.
145
146/*
147 * Final preparation for calling main.
148 *
149 * Create a fake exception frame that returns to user mode,
150 * and save its address in p->p_md.md_regs for cpu_fork().
151 * The new frames for process 1 and 2 will be adjusted by
152 * cpu_set_kpc() to arrange for a call to a kernel function
153 * before the new process does its rte out to user mode.
154 */
155	clrw	sp@-			| tf_format,tf_vector
156	clrl	sp@-			| tf_pc (filled in later)
157	movw	#PSL_USER,sp@-		| tf_sr for user mode
158	clrl	sp@-			| tf_stackadj
159	lea	sp@(-64),sp		| tf_regs[16]
160	movl	sp,a1			| a1=trapframe
161	lea	_C_LABEL(proc0),a0	| proc0.p_md.md_regs =
162	movl	a1,a0@(P_MDREGS)	|   trapframe
163	movl	a2,a1@(FR_SP)		| a2 == usp (from above)
164	pea	a1@			| push &trapframe
165	jbsr	_C_LABEL(main)		| main(&trapframe)
166	addql	#4,sp			| help DDB backtrace
167	trap	#15			| should not get here
168
169| This is used by cpu_fork() to return to user mode.
170| It is called with SP pointing to a struct trapframe.
171GLOBAL(proc_do_uret)
172	movl	sp@(FR_SP),a0		| grab and load
173	movl	a0,usp			|   user SP
174	moveml	sp@+,#0x7FFF		| load most registers (all but SSP)
175	addql	#8,sp			| pop SSP and stack adjust count
176	rte
177
178/*
179 * proc_trampoline:
180 * This is used by cpu_set_kpc() to "push" a function call onto the
181 * kernel stack of some process, very much like a signal delivery.
182 * When we get here, the stack has:
183 *
184 * SP+8:	switchframe from before cpu_set_kpc
185 * SP+4:	void *arg;
186 * SP:  	u_long func;
187 *
188 * On entry, the switchframe pushed by cpu_set_kpc has already been
189 * popped off the stack, so all this needs to do is pop the function
190 * pointer into a register, call it, then pop the arg, and finally
191 * return using the switchframe that remains on the stack.
192 */
193GLOBAL(proc_trampoline)
194	movl	sp@+,a0			| function pointer
195	jbsr	a0@			| (*func)(arg)
196	addql	#4,sp			| toss the arg
197	rts				| as cpu_switch would do
198
199| That is all the assembly startup code we need on the sun3x!
200| The rest of this is like the hp300/locore.s where possible.
201
202/*
203 * Trap/interrupt vector routines
204 */
205#include <m68k/m68k/trap_subr.s>
206
207GLOBAL(buserr)
208	tstl	_C_LABEL(nofault)	| device probe?
209	jeq	_C_LABEL(addrerr)	| no, handle as usual
210	movl	_C_LABEL(nofault),sp@-	| yes,
211	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
212GLOBAL(addrerr)
213	clrl	sp@-			| stack adjust count
214	moveml	#0xFFFF,sp@-		| save user registers
215	movl	usp,a0			| save the user SP
216	movl	a0,sp@(FR_SP)		|   in the savearea
217	lea	sp@(FR_HW),a1		| grab base of HW berr frame
218	moveq	#0,d0
219	movw	a1@(10),d0		| grab SSW for fault processing
220	btst	#12,d0			| RB set?
221	jeq	LbeX0			| no, test RC
222	bset	#14,d0			| yes, must set FB
223	movw	d0,a1@(10)		| for hardware too
224LbeX0:
225	btst	#13,d0			| RC set?
226	jeq	LbeX1			| no, skip
227	bset	#15,d0			| yes, must set FC
228	movw	d0,a1@(10)		| for hardware too
229LbeX1:
230	btst	#8,d0			| data fault?
231	jeq	Lbe0			| no, check for hard cases
232	movl	a1@(16),d1		| fault address is as given in frame
233	jra	Lbe10			| thats it
234Lbe0:
235	btst	#4,a1@(6)		| long (type B) stack frame?
236	jne	Lbe4			| yes, go handle
237	movl	a1@(2),d1		| no, can use save PC
238	btst	#14,d0			| FB set?
239	jeq	Lbe3			| no, try FC
240	addql	#4,d1			| yes, adjust address
241	jra	Lbe10			| done
242Lbe3:
243	btst	#15,d0			| FC set?
244	jeq	Lbe10			| no, done
245	addql	#2,d1			| yes, adjust address
246	jra	Lbe10			| done
247Lbe4:
248	movl	a1@(36),d1		| long format, use stage B address
249	btst	#15,d0			| FC set?
250	jeq	Lbe10			| no, all done
251	subql	#2,d1			| yes, adjust address
252Lbe10:
253	movl	d1,sp@-			| push fault VA
254	movl	d0,sp@-			| and padded SSW
255	movw	a1@(6),d0		| get frame format/vector offset
256	andw	#0x0FFF,d0		| clear out frame format
257	cmpw	#12,d0			| address error vector?
258	jeq	Lisaerr			| yes, go to it
259
260/* MMU-specific code to determine reason for bus error. */
261	movl	d1,a0			| fault address
262	movl	sp@,d0			| function code from ssw
263	btst	#8,d0			| data fault?
264	jne	Lbe10a
265	movql	#1,d0			| user program access FC
266					| (we dont separate data/program)
267	btst	#5,a1@			| supervisor mode?
268	jeq	Lbe10a			| if no, done
269	movql	#5,d0			| else supervisor program access
270Lbe10a:
271	ptestr	d0,a0@,#7		| do a table search
272	pmove	psr,sp@			| save result
273	movb	sp@,d1
274	btst	#2,d1			| invalid? (incl. limit viol and berr)
275	jeq	Lmightnotbemerr		| no -> wp check
276	btst	#7,d1			| is it MMU table berr?
277	jeq	Lismerr			| no, must be fast
278	jra	Lisberr1		| real bus err needs not be fast
279Lmightnotbemerr:
280	btst	#3,d1			| write protect bit set?
281	jeq	Lisberr1		| no, must be bus error
282	movl	sp@,d0			| ssw into low word of d0
283	andw	#0xc0,d0		| write protect is set on page:
284	cmpw	#0x40,d0		| was it read cycle?
285	jeq	Lisberr1		| yes, was not WPE, must be bus err
286/* End of MMU-specific bus error code. */
287
288Lismerr:
289	movl	#T_MMUFLT,sp@-		| show that we are an MMU fault
290	jra	_ASM_LABEL(faultstkadj)	| and deal with it
291Lisaerr:
292	movl	#T_ADDRERR,sp@-		| mark address error
293	jra	_ASM_LABEL(faultstkadj)	| and deal with it
294Lisberr1:
295	clrw	sp@			| re-clear pad word
296Lisberr:
297	movl	#T_BUSERR,sp@-		| mark bus error
298	jra	_ASM_LABEL(faultstkadj)	| and deal with it
299
300/*
301 * FP exceptions.
302 */
303GLOBAL(fpfline)
304	clrl	sp@-			| stack adjust count
305	moveml	#0xFFFF,sp@-		| save registers
306	moveq	#T_FPEMULI,d0		| denote as FP emulation trap
307	jra	_ASM_LABEL(fault)	| do it
308
309GLOBAL(fpunsupp)
310	clrl	sp@-			| stack adjust count
311	moveml	#0xFFFF,sp@-		| save registers
312	moveq	#T_FPEMULD,d0		| denote as FP emulation trap
313	jra	_ASM_LABEL(fault)	| do it
314
315/*
316 * Handles all other FP coprocessor exceptions.
317 * Note that since some FP exceptions generate mid-instruction frames
318 * and may cause signal delivery, we need to test for stack adjustment
319 * after the trap call.
320 */
321GLOBAL(fpfault)
322	clrl	sp@-		| stack adjust count
323	moveml	#0xFFFF,sp@-	| save user registers
324	movl	usp,a0		| and save
325	movl	a0,sp@(FR_SP)	|   the user stack pointer
326	clrl	sp@-		| no VA arg
327	movl	_C_LABEL(curpcb),a0	| current pcb
328	lea	a0@(PCB_FPCTX),a0 | address of FP savearea
329	fsave	a0@		| save state
330	tstb	a0@		| null state frame?
331	jeq	Lfptnull	| yes, safe
332	clrw	d0		| no, need to tweak BIU
333	movb	a0@(1),d0	| get frame size
334	bset	#3,a0@(0,d0:w)	| set exc_pend bit of BIU
335Lfptnull:
336	fmovem	fpsr,sp@-	| push fpsr as code argument
337	frestore a0@		| restore state
338	movl	#T_FPERR,sp@-	| push type arg
339	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
340
341/*
342 * Other exceptions only cause four and six word stack frame and require
343 * no post-trap stack adjustment.
344 */
345GLOBAL(badtrap)
346	clrl	sp@-			| stack adjust count
347	moveml	#0xFFFF,sp@-		| save std frame regs
348	jbsr	_C_LABEL(straytrap)	| report
349	moveml	sp@+,#0xFFFF		| restore regs
350	addql	#4, sp			| stack adjust count
351	jra	_ASM_LABEL(rei)		| all done
352
353/*
354 * Trap 0 is for system calls
355 */
356GLOBAL(trap0)
357	clrl	sp@-			| stack adjust count
358	moveml	#0xFFFF,sp@-		| save user registers
359	movl	usp,a0			| save the user SP
360	movl	a0,sp@(FR_SP)		|   in the savearea
361	movl	d0,sp@-			| push syscall number
362	jbsr	_C_LABEL(syscall)	| handle it
363	addql	#4,sp			| pop syscall arg
364	movl	sp@(FR_SP),a0		| grab and restore
365	movl	a0,usp			|   user SP
366	moveml	sp@+,#0x7FFF		| restore most registers
367	addql	#8,sp			| pop SP and stack adjust
368	jra	_ASM_LABEL(rei)		| all done
369
370/*
371 * Trap 12 is the entry point for the cachectl "syscall"
372 *	cachectl(command, addr, length)
373 * command in d0, addr in a1, length in d1
374 */
375GLOBAL(trap12)
376	movl	_C_LABEL(curproc),sp@-	| push curproc pointer
377	movl	d1,sp@-			| push length
378	movl	a1,sp@-			| push addr
379	movl	d0,sp@-			| push command
380	jbsr	_C_LABEL(cachectl1)	| do it
381	lea	sp@(16),sp		| pop args
382	jra	_ASM_LABEL(rei)		| all done
383
384/*
385 * Trace (single-step) trap.  Kernel-mode is special.
386 * User mode traps are simply passed on to trap().
387 */
388GLOBAL(trace)
389	clrl	sp@-			| stack adjust count
390	moveml	#0xFFFF,sp@-
391	moveq	#T_TRACE,d0
392
393	| Check PSW and see what happen.
394	|   T=0 S=0	(should not happen)
395	|   T=1 S=0	trace trap from user mode
396	|   T=0 S=1	trace trap on a trap instruction
397	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
398
399	movw	sp@(FR_HW),d1		| get PSW
400	notw	d1			| XXX no support for T0 on 680[234]0
401	andw	#PSL_TS,d1		| from system mode (T=1, S=1)?
402	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
403	jra	_ASM_LABEL(fault)	| no, user-mode fault
404
405/*
406 * Trap 15 is used for:
407 *	- GDB breakpoints (in user programs)
408 *	- KGDB breakpoints (in the kernel)
409 *	- trace traps for SUN binaries (not fully supported yet)
410 * User mode traps are simply passed to trap().
411 */
412GLOBAL(trap15)
413	clrl	sp@-			| stack adjust count
414	moveml	#0xFFFF,sp@-
415	moveq	#T_TRAP15,d0
416	btst	#5,sp@(FR_HW)		| was supervisor mode?
417	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
418	jra	_ASM_LABEL(fault)	| no, user-mode fault
419
420ASLOCAL(kbrkpt)
421	| Kernel-mode breakpoint or trace trap. (d0=trap_type)
422	| Save the system sp rather than the user sp.
423	movw	#PSL_HIGHIPL,sr		| lock out interrupts
424	lea	sp@(FR_SIZE),a6		| Save stack pointer
425	movl	a6,sp@(FR_SP)		|  from before trap
426
427	| If we are not on tmpstk switch to it.
428	| (so debugger can change the stack pointer)
429	movl	a6,d1
430	cmpl	#_ASM_LABEL(tmpstk),d1
431	jls	Lbrkpt2 		| already on tmpstk
432	| Copy frame to the temporary stack
433	movl	sp,a0			| a0=src
434	lea	_ASM_LABEL(tmpstk)-96,a1	| a1=dst
435	movl	a1,sp			| sp=new frame
436	moveq	#FR_SIZE,d1
437Lbrkpt1:
438	movl	a0@+,a1@+
439	subql	#4,d1
440	bgt	Lbrkpt1
441
442Lbrkpt2:
443	| Call the trap handler for the kernel debugger.
444	| Do not call trap() to handle it, so that we can
445	| set breakpoints in trap() if we want.  We know
446	| the trap type is either T_TRACE or T_BREAKPOINT.
447	movl	d0,sp@-			| push trap type
448	jbsr	_C_LABEL(trap_kdebug)
449	addql	#4,sp			| pop args
450
451	| The stack pointer may have been modified, or
452	| data below it modified (by kgdb push call),
453	| so push the hardware frame at the current sp
454	| before restoring registers and returning.
455	movl	sp@(FR_SP),a0		| modified sp
456	lea	sp@(FR_SIZE),a1		| end of our frame
457	movl	a1@-,a0@-		| copy 2 longs with
458	movl	a1@-,a0@-		| ... predecrement
459	movl	a0,sp@(FR_SP)		| sp = h/w frame
460	moveml	sp@+,#0x7FFF		| restore all but sp
461	movl	sp@,sp			| ... and sp
462	rte				| all done
463
464/* Use common m68k sigreturn */
465#include <m68k/m68k/sigreturn.s>
466
467/*
468 * Interrupt handlers.  Most are auto-vectored,
469 * and hard-wired the same way on all sun3 models.
470 * Format in the stack is:
471 *   d0,d1,a0,a1, sr, pc, vo
472 */
473
474#define INTERRUPT_SAVEREG \
475	moveml	#0xC0C0,sp@-
476
477#define INTERRUPT_RESTORE \
478	moveml	sp@+,#0x0303
479
480/*
481 * This is the common auto-vector interrupt handler,
482 * for which the CPU provides the vector=0x18+level.
483 * These are installed in the interrupt vector table.
484 */
485	.align	2
486GLOBAL(_isr_autovec)
487	INTERRUPT_SAVEREG
488	jbsr	_C_LABEL(isr_autovec)
489	INTERRUPT_RESTORE
490	jra	_ASM_LABEL(rei)
491
492/* clock: see clock.c */
493	.align	2
494GLOBAL(_isr_clock)
495	INTERRUPT_SAVEREG
496	jbsr	_C_LABEL(clock_intr)
497	INTERRUPT_RESTORE
498	jra	_ASM_LABEL(rei)
499
500| Handler for all vectored interrupts (i.e. VME interrupts)
501	.align	2
502GLOBAL(_isr_vectored)
503	INTERRUPT_SAVEREG
504	jbsr	_C_LABEL(isr_vectored)
505	INTERRUPT_RESTORE
506	jra	_ASM_LABEL(rei)
507
508#undef	INTERRUPT_SAVEREG
509#undef	INTERRUPT_RESTORE
510
511/* interrupt counters (needed by vmstat) */
512GLOBAL(intrnames)
513	.asciz	"spur"	| 0
514	.asciz	"lev1"	| 1
515	.asciz	"lev2"	| 2
516	.asciz	"lev3"	| 3
517	.asciz	"lev4"	| 4
518	.asciz	"clock"	| 5
519	.asciz	"lev6"	| 6
520	.asciz	"nmi"	| 7
521GLOBAL(eintrnames)
522
523	.data
524	.even
525GLOBAL(intrcnt)
526	.long	0,0,0,0,0,0,0,0,0,0
527GLOBAL(eintrcnt)
528	.text
529
530/*
531 * Emulation of VAX REI instruction.
532 *
533 * This code is (mostly) un-altered from the hp300 code,
534 * except that sun machines do not need a simulated SIR
535 * because they have a real software interrupt register.
536 *
537 * This code deals with checking for and servicing ASTs
538 * (profiling, scheduling) and software interrupts (network, softclock).
539 * We check for ASTs first, just like the VAX.  To avoid excess overhead
540 * the T_ASTFLT handling code will also check for software interrupts so we
541 * do not have to do it here.  After identifying that we need an AST we
542 * drop the IPL to allow device interrupts.
543 *
544 * This code is complicated by the fact that sendsig may have been called
545 * necessitating a stack cleanup.
546 */
547
548ASGLOBAL(rei)
549#ifdef	DIAGNOSTIC
550	tstl	_C_LABEL(panicstr)	| have we paniced?
551	jne	Ldorte			| yes, do not make matters worse
552#endif
553	tstl	_C_LABEL(astpending)	| AST pending?
554	jeq	Ldorte			| no, done
555Lrei1:
556	btst	#5,sp@			| yes, are we returning to user mode?
557	jne	Ldorte			| no, done
558	movw	#PSL_LOWIPL,sr		| lower SPL
559	clrl	sp@-			| stack adjust
560	moveml	#0xFFFF,sp@-		| save all registers
561	movl	usp,a1			| including
562	movl	a1,sp@(FR_SP)		|    the users SP
563	clrl	sp@-			| VA == none
564	clrl	sp@-			| code == none
565	movl	#T_ASTFLT,sp@-		| type == async system trap
566	jbsr	_C_LABEL(trap)		| go handle it
567	lea	sp@(12),sp		| pop value args
568	movl	sp@(FR_SP),a0		| restore user SP
569	movl	a0,usp			|   from save area
570	movw	sp@(FR_ADJ),d0		| need to adjust stack?
571	jne	Laststkadj		| yes, go to it
572	moveml	sp@+,#0x7FFF		| no, restore most user regs
573	addql	#8,sp			| toss SP and stack adjust
574	rte				| and do real RTE
575Laststkadj:
576	lea	sp@(FR_HW),a1		| pointer to HW frame
577	addql	#8,a1			| source pointer
578	movl	a1,a0			| source
579	addw	d0,a0			|  + hole size = dest pointer
580	movl	a1@-,a0@-		| copy
581	movl	a1@-,a0@-		|  8 bytes
582	movl	a0,sp@(FR_SP)		| new SSP
583	moveml	sp@+,#0x7FFF		| restore user registers
584	movl	sp@,sp			| and our SP
585Ldorte:
586	rte				| real return
587
588/*
589 * Initialization is at the beginning of this file, because the
590 * kernel entry point needs to be at zero for compatibility with
591 * the Sun boot loader.  This works on Sun machines because the
592 * interrupt vector table for reset is NOT at address zero.
593 * (The MMU has a "boot" bit that forces access to the PROM)
594 */
595
596/*
597 * Use common m68k sigcode.
598 */
599#include <m68k/m68k/sigcode.s>
600
601	.text
602
603/*
604 * Primitives
605 */
606
607/*
608 * Use common m68k support routines.
609 */
610#include <m68k/m68k/support.s>
611
612BSS(want_resched,4)
613
614/*
615 * Use common m68k process manipulation routines.
616 */
617#include <m68k/m68k/proc_subr.s>
618
619| Message for Lbadsw panic
620Lsw0:
621	.asciz	"cpu_switch"
622	.even
623
624	.data
625GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
626GLOBAL(curpcb)
627	.long	0
628ASBSS(nullpcb,SIZEOF_PCB)
629	.text
630
631/*
632 * At exit of a process, do a cpu_switch for the last time.
633 * Switch to a safe stack and PCB, and select a new process to run.  The
634 * old stack and u-area will be freed by the reaper.
635 *
636 * MUST BE CALLED AT SPLHIGH!
637 */
638ENTRY(switch_exit)
639	movl	sp@(4),a0		| struct proc *p
640					| save state into garbage pcb
641	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
642	lea	_ASM_LABEL(tmpstk),sp	| goto a tmp stack
643
644	/* Schedule the vmspace and stack to be freed. */
645	movl	a0,sp@-			| exit2(p)
646	jbsr	_C_LABEL(exit2)
647	lea	sp@(4),sp
648
649#if defined(LOCKDEBUG)
650	/* Acquire sched_lock */
651	jbsr	_C_LABEL(sched_lock_idle)
652#endif
653
654	jra	_C_LABEL(cpu_switch)
655
656/*
657 * When no processes are on the runq, cpu_switch() branches to idle
658 * to wait for something to come ready.
659 */
660Lidle:
661#if defined(LOCKDEBUG)
662	/* Release sched_lock */
663	jbsr	_C_LABEL(sched_unlock_idle)
664#endif
665	stop	#PSL_LOWIPL
666GLOBAL(_Idle)				| See clock.c
667	movw	#PSL_HIGHIPL,sr
668#if defined(LOCKDEBUG)
669	/* Acquire sched_lock */
670	jbsr	_C_LABEL(sched_lock_idle)
671#endif
672	movl	_C_LABEL(sched_whichqs),%d0
673	jeq	Lidle
674	jra	Lsw1
675
676Lbadsw:
677	movl	#Lsw0,sp@-
678	jbsr	_C_LABEL(panic)
679	/*NOTREACHED*/
680
681/*
682 * cpu_switch()
683 * Hacked for sun3
684 */
685ENTRY(cpu_switch)
686	movl	_C_LABEL(curpcb),a1	| current pcb
687	movw	sr,a1@(PCB_PS)		| save sr before changing ipl
688#ifdef notyet
689	movl	_C_LABEL(curproc),sp@-	| remember last proc running
690#endif
691	clrl	_C_LABEL(curproc)
692
693	/*
694	 * Find the highest-priority queue that isn't empty,
695	 * then take the first proc from that queue.
696	 */
697	movl	_C_LABEL(sched_whichqs),%d0
698	jeq	Lidle
699Lsw1:
700	/*
701	 * Interrupts are blocked, sched_lock is held.  If
702	 * we come here via Idle, %d0 contains the contents
703	 * of a non-zero sched_whichqs.
704	 */
705	movl	%d0,%d1
706	negl	%d0
707	andl	%d1,%d0
708	bfffo	%d0{#0:#32},%d1
709	eorib	#31,%d1
710
711	movl	%d1,%d0
712	lslb	#3,%d1			| convert queue number to index
713	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
714	movl	%d1,%a1
715	movl	%a1@(P_FORW),%a0	| p = q->p_forw
716	cmpal	%d1,%a0			| anyone on queue?
717	jeq	Lbadsw			| no, panic
718#ifdef DIAGNOSTIC
719	tstl	a0@(P_WCHAN)
720	jne	Lbadsw
721	cmpb	#SRUN,a0@(P_STAT)
722	jne	Lbadsw
723#endif
724	movl	%a0@(P_FORW),%a1@(P_FORW)	| q->p_forw = p->p_forw
725	movl	%a0@(P_FORW),%a1		| n = p->p_forw
726	movl	%a0@(P_BACK),%a1@(P_BACK)	| n->p_back = q
727	cmpal	%d1,%a1			| anyone left on queue?
728	jne	Lsw2			| yes, skip
729	movl	_C_LABEL(sched_whichqs),%d1
730	bclr	%d0,%d1			| no, clear bit
731	movl	%d1,_C_LABEL(sched_whichqs)
732Lsw2:
733	/* p->p_cpu initialized in fork1() for single-processor */
734	movb	#SONPROC,a0@(P_STAT)	| p->p_stat = SONPROC
735	movl	a0,_C_LABEL(curproc)
736	clrl	_C_LABEL(want_resched)
737#ifdef notyet
738	movl	sp@+,a1			| XXX - Make this work!
739	cmpl	a0,a1			| switching to same proc?
740	jeq	Lswdone			| yes, skip save and restore
741#endif
742	/*
743	 * Save state of previous process in its pcb.
744	 */
745	movl	_C_LABEL(curpcb),a1
746	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
747	movl	usp,a2			| grab USP (a2 has been saved)
748	movl	a2,a1@(PCB_USP)		| and save it
749
750	tstl	_C_LABEL(fputype)	| Do we have an fpu?
751	jeq	Lswnofpsave		| No?  Then don't try save.
752	lea	a1@(PCB_FPCTX),a2	| pointer to FP save area
753	fsave	a2@			| save FP state
754	tstb	a2@			| null state frame?
755	jeq	Lswnofpsave		| yes, all done
756	fmovem	fp0-fp7,a2@(FPF_REGS)		| save FP general regs
757	fmovem	fpcr/fpsr/fpi,a2@(FPF_FPCR)	| save FP control regs
758Lswnofpsave:
759
760	/*
761	 * Now that we have saved all the registers that must be
762	 * preserved, we are free to use those registers until
763	 * we load the registers for the switched-to process.
764	 * In this section, keep:  a0=curproc, a1=curpcb
765	 */
766
767	clrl	a0@(P_BACK)		| clear back link
768	movl	a0@(P_ADDR),a1		| get p_addr
769	movl	a1,_C_LABEL(curpcb)
770
771#if defined(LOCKDEBUG)
772	/*
773	 * Done mucking with the run queues, release the
774	 * scheduler lock, but keep interrupts out.
775	 */
776	movl	%a0,sp@-		| not args...
777	movl	%a1,sp@-		| ...just saving
778	jbsr	_C_LABEL(sched_unlock_idle)
779	movl	sp@+,%a1
780	movl	sp@+,%a0
781#endif
782
783	/*
784	 * Load the new VM context (new MMU root pointer)
785	 */
786	movl	a0@(P_VMSPACE),a2	| vm = p->p_vmspace
787#ifdef DIAGNOSTIC
788	tstl	a2			| vm == VM_MAP_NULL?
789	jeq	Lbadsw			| panic
790#endif
791#ifdef PMAP_DEBUG
792	/* When debugging just call _pmap_switch(). */
793	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
794	pea	a2@			| push pmap
795	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
796	addql	#4,sp
797	movl	_C_LABEL(curpcb),a1	| restore p_addr
798#else
799	/* Otherwise, use this inline version. */
800	lea	_C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
801	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
802	movl	a2@(PM_A_PHYS),d0	| phys = pmap->pm_a_phys
803	cmpl	a3@(4),d0		|  == kernel_crp.rp_addr ?
804	jeq	Lsame_mmuctx		| skip loadcrp/flush
805	/* OK, it is a new MMU context.  Load it up. */
806	movl	d0,a3@(4)
807	movl	#CACHE_CLR,d0
808	movc	d0,cacr			| invalidate cache(s)
809	pflusha				| flush entire TLB
810	pmove	a3@,crp			| load new user root pointer
811Lsame_mmuctx:
812#endif
813
814	/*
815	 * Reload the registers for the new process.
816	 * After this point we can only use d0,d1,a0,a1
817	 */
818	moveml	a1@(PCB_REGS),#0xFCFC	| reload registers
819	movl	a1@(PCB_USP),a0
820	movl	a0,usp			| and USP
821
822	tstl	_C_LABEL(fputype)	| If we don't have an fpu,
823	jeq	Lres_skip		|  don't try to restore it.
824	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
825	tstb	a0@			| null state frame?
826	jeq	Lresfprest		| yes, easy
827	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
828	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
829Lresfprest:
830	frestore a0@			| restore state
831Lres_skip:
832	movw	a1@(PCB_PS),d0		| no, restore PS
833#ifdef DIAGNOSTIC
834	btst	#13,d0			| supervisor mode?
835	jeq	Lbadsw			| no? panic!
836#endif
837	movw	d0,sr			| OK, restore PS
838	moveq	#1,d0			| return 1 (for alternate returns)
839	rts
840
841/*
842 * savectx(pcb)
843 * Update pcb, saving current processor state.
844 */
845ENTRY(savectx)
846	movl	sp@(4),a1
847	movw	sr,a1@(PCB_PS)
848	movl	usp,a0			| grab USP
849	movl	a0,a1@(PCB_USP)		| and save it
850	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
851
852	tstl	_C_LABEL(fputype)	| Do we have FPU?
853	jeq	Lsavedone		| No?  Then don't save state.
854	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
855	fsave	a0@			| save FP state
856	tstb	a0@			| null state frame?
857	jeq	Lsavedone		| yes, all done
858	fmovem	fp0-fp7,a0@(FPF_REGS)		| save FP general regs
859	fmovem	fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
860Lsavedone:
861	moveq	#0,d0			| return 0
862	rts
863
864/* suline() */
865
866#ifdef DEBUG
867	.data
868ASGLOBAL(fulltflush)
869	.long	0
870ASGLOBAL(fullcflush)
871	.long	0
872	.text
873#endif
874
875/*
876 * Invalidate entire TLB.
877 */
878ENTRY(TBIA)
879_C_LABEL(_TBIA):
880	pflusha
881	movl	#DC_CLEAR,d0
882	movc	d0,cacr			| invalidate on-chip d-cache
883	rts
884
885/*
886 * Invalidate any TLB entry for given VA (TB Invalidate Single)
887 */
888ENTRY(TBIS)
889#ifdef DEBUG
890	tstl	_ASM_LABEL(fulltflush)	| being conservative?
891	jne	_C_LABEL(_TBIA)		| yes, flush entire TLB
892#endif
893	movl	sp@(4),a0
894	pflush	#0,#0,a0@		| flush address from both sides
895	movl	#DC_CLEAR,d0
896	movc	d0,cacr			| invalidate on-chip data cache
897	rts
898
899/*
900 * Invalidate supervisor side of TLB
901 */
902ENTRY(TBIAS)
903#ifdef DEBUG
904	tstl	_ASM_LABEL(fulltflush)	| being conservative?
905	jne	_C_LABEL(_TBIA)		| yes, flush everything
906#endif
907	pflush	#4,#4			| flush supervisor TLB entries
908	movl	#DC_CLEAR,d0
909	movc	d0,cacr			| invalidate on-chip d-cache
910	rts
911
912/*
913 * Invalidate user side of TLB
914 */
915ENTRY(TBIAU)
916#ifdef DEBUG
917	tstl	_ASM_LABEL(fulltflush)	| being conservative?
918	jne	_C_LABEL(_TBIA)		| yes, flush everything
919#endif
920	pflush	#0,#4			| flush user TLB entries
921	movl	#DC_CLEAR,d0
922	movc	d0,cacr			| invalidate on-chip d-cache
923	rts
924
925/*
926 * Invalidate instruction cache
927 */
928ENTRY(ICIA)
929	movl	#IC_CLEAR,d0
930	movc	d0,cacr			| invalidate i-cache
931	rts
932
933/*
934 * Invalidate data cache.
935 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
936 * problems with DC_WA.  The only cases we have to worry about are context
937 * switch and TLB changes, both of which are handled "in-line" in resume
938 * and TBI*.
939 */
940ENTRY(DCIA)
941__DCIA:
942	rts
943
944ENTRY(DCIS)
945__DCIS:
946	rts
947
948/*
949 * Invalidate data cache.
950 */
951ENTRY(DCIU)
952	movl	#DC_CLEAR,d0
953	movc	d0,cacr			| invalidate on-chip d-cache
954	rts
955
956/* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
957
958ENTRY(PCIA)
959	movl	#DC_CLEAR,d0
960	movc	d0,cacr			| invalidate on-chip d-cache
961	rts
962
963ENTRY(ecacheon)
964	rts
965
966ENTRY(ecacheoff)
967	rts
968
969/*
970 * Get callers current SP value.
971 * Note that simply taking the address of a local variable in a C function
972 * doesn't work because callee saved registers may be outside the stack frame
973 * defined by A6 (e.g. GCC generated code).
974 *
975 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
976 */
977GLOBAL(getsp)
978	movl	sp,d0			| get current SP
979	addql	#4,d0			| compensate for return address
980	rts
981
982ENTRY(getsfc)
983	movc	sfc,d0
984	rts
985
986ENTRY(getdfc)
987	movc	dfc,d0
988	rts
989
990ENTRY(getvbr)
991	movc vbr, d0
992	rts
993
994ENTRY(setvbr)
995	movl sp@(4), d0
996	movc d0, vbr
997	rts
998
999/*
1000 * Load a new CPU Root Pointer (CRP) into the MMU.
1001 *	void	loadcrp(struct mmu_rootptr *);
1002 */
1003ENTRY(loadcrp)
1004	movl	sp@(4),a0		| arg1: &CRP
1005	movl	#CACHE_CLR,d0
1006	movc	d0,cacr			| invalidate cache(s)
1007	pflusha				| flush entire TLB
1008	pmove	a0@,crp			| load new user root pointer
1009	rts
1010
1011/*
1012 * Get the physical address of the PTE for a given VA.
1013 */
1014ENTRY(ptest_addr)
1015	movl	sp@(4),a0		| VA
1016	ptestr	#5,a0@,#7,a1		| a1 = addr of PTE
1017	movl	a1,d0
1018	rts
1019
1020/*
1021 * Set processor priority level calls.  Most are implemented with
1022 * inline asm expansions.  However, we need one instantiation here
1023 * in case some non-optimized code makes external references.
1024 * Most places will use the inlined functions param.h supplies.
1025 */
1026
1027ENTRY(_getsr)
1028	clrl	d0
1029	movw	sr,d0
1030	rts
1031
1032ENTRY(_spl)
1033	clrl	d0
1034	movw	sr,d0
1035	movl	sp@(4),d1
1036	movw	d1,sr
1037	rts
1038
1039ENTRY(_splraise)
1040	clrl	d0
1041	movw	sr,d0
1042	movl	d0,d1
1043	andl	#PSL_HIGHIPL,d1 	| old &= PSL_HIGHIPL
1044	cmpl	sp@(4),d1		| (old - new)
1045	bge	Lsplr
1046	movl	sp@(4),d1
1047	movw	d1,sr
1048Lsplr:
1049	rts
1050
1051/*
1052 * Save and restore 68881 state.
1053 */
1054ENTRY(m68881_save)
1055	movl	sp@(4),a0		| save area pointer
1056	fsave	a0@			| save state
1057	tstb	a0@			| null state frame?
1058	jeq	Lm68881sdone		| yes, all done
1059	fmovem fp0-fp7,a0@(FPF_REGS)		| save FP general regs
1060	fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
1061Lm68881sdone:
1062	rts
1063
1064ENTRY(m68881_restore)
1065	movl	sp@(4),a0		| save area pointer
1066	tstb	a0@			| null state frame?
1067	jeq	Lm68881rdone		| yes, easy
1068	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
1069	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
1070Lm68881rdone:
1071	frestore a0@			| restore state
1072	rts
1073
1074/*
1075 * _delay(unsigned N)
1076 * Delay for at least (N/256) microseconds.
1077 * This routine depends on the variable:  delay_divisor
1078 * which should be set based on the CPU clock rate.
1079 * XXX: Currently this is set based on the CPU model,
1080 * XXX: but this should be determined at run time...
1081 */
1082GLOBAL(_delay)
1083	| d0 = arg = (usecs << 8)
1084	movl	sp@(4),d0
1085	| d1 = delay_divisor;
1086	movl	_C_LABEL(delay_divisor),d1
1087	jra	L_delay			/* Jump into the loop! */
1088
1089	/*
1090	 * Align the branch target of the loop to a half-line (8-byte)
1091	 * boundary to minimize cache effects.  This guarantees both
1092	 * that there will be no prefetch stalls due to cache line burst
1093	 * operations and that the loop will run from a single cache
1094	 * half-line.
1095	 */
1096	.align	8
1097L_delay:
1098	subl	d1,d0
1099	jgt	L_delay
1100	rts
1101
1102
1103| Define some addresses, mostly so DDB can print useful info.
1104| Not using _C_LABEL() here because these symbols are never
1105| referenced by any C code, and if the leading underscore
1106| ever goes away, these lines turn into syntax errors...
1107	.set	_KERNBASE,KERNBASE
1108	.set	_MONSTART,SUN3X_MONSTART
1109	.set	_PROM_BASE,SUN3X_PROM_BASE
1110	.set	_MONEND,SUN3X_MONEND
1111
1112|The end!
1113