locore.s revision 1.32
1/*	$NetBSD: locore.s,v 1.32 1999/02/26 22:03:29 is Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
41 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
42 */
43
44#include "opt_compat_netbsd.h"
45#include "opt_uvm.h"
46
47#include "assym.h"
48#include <machine/asm.h>
49#include <machine/trap.h>
50
51| Remember this is a fun project!
52
53	.data
54GLOBAL(mon_crp)
55	.long	0,0
56
57| This is for kvm_mkdb, and should be the address of the beginning
58| of the kernel text segment (not necessarily the same as kernbase).
59	.text
60GLOBAL(kernel_text)
61
62| This is the entry point, as well as the end of the temporary stack
63| used during process switch (one 8K page ending at start)
64ASGLOBAL(tmpstk)
65ASGLOBAL(start)
66
67| The first step, after disabling interrupts, is to map enough of the kernel
68| into high virtual address space so that we can use position dependent code.
69| This is a tricky task on the sun3x because the MMU is already enabled and
70| the ROM monitor provides no indication of where the root MMU table is mapped.
71| Therefore we must use one of the 68030's 'transparent translation' registers
72| to define a range in the address space where the MMU translation is
73| turned off.  Once this is complete we can modify the MMU table directly
74| without the need for it to be mapped into virtual memory.
75| All code must be position independent until otherwise noted, as the
76| boot loader has loaded us into low memory but all the symbols in this
77| code have been linked high.
78	movw	#PSL_HIGHIPL, sr	| no interrupts
79	movl	#KERNBASE, a5		| for vtop conversion
80	lea	_C_LABEL(mon_crp), a0	| where to store the CRP
81	subl	a5, a0
82	| Note: borrowing mon_crp for tt0 setup...
83	movl	#0x3F8107, a0@		| map the low 1GB v=p with the
84	.long	0xf0100800		| transparent translation reg0
85					| [ pmove a0@, tt0 ]
86| In order to map the kernel into high memory we will copy the root table
87| entry which maps the 16 megabytes of memory starting at 0x0 into the
88| entry which maps the 16 megabytes starting at KERNBASE.
89	pmove	crp, a0@		| Get monitor CPU root pointer
90	movl	a0@(4), a1		| 2nd word is PA of level A table
91
92	movl	a1, a0			| compute the descriptor address
93	addl	#0x3e0, a1		| for VA starting at KERNBASE
94	movl	a0@, a1@		| copy descriptor type
95	movl	a0@(4), a1@(4)		| copy physical address
96
97| Kernel is now double mapped at zero and KERNBASE.
98| Force a long jump to the relocated code (high VA).
99	movl	#IC_CLEAR, d0		| Flush the I-cache
100	movc	d0, cacr
101	jmp L_high_code:l		| long jump
102
103L_high_code:
104| We are now running in the correctly relocated kernel, so
105| we are no longer restricted to position-independent code.
106| It is handy to leave transparent translation enabled while
107| for the low 1GB while _bootstrap() is doing its thing.
108
109| Do bootstrap stuff needed before main() gets called.
110| Our boot loader leaves a copy of the kernel's exec header
111| just before the start of the kernel text segment, so the
112| kernel can sanity-check the DDB symbols at [end...esym].
113| Pass the struct exec at tmpstk-32 to _bootstrap().
114| Also, make sure the initial frame pointer is zero so that
115| the backtrace algorithm used by KGDB terminates nicely.
116	lea	_ASM_LABEL(tmpstk)-32, sp
117	movl	#0,a6
118	jsr	_C_LABEL(_bootstrap)	| See locore2.c
119
120| Now turn off the transparent translation of the low 1GB.
121| (this also flushes the ATC)
122	clrl	sp@-
123	.long	0xf0170800		| pmove	sp@,tt0
124	addql	#4,sp
125
126| Now that _bootstrap() is done using the PROM functions,
127| we can safely set the sfc/dfc to something != FC_CONTROL
128	moveq	#FC_USERD, d0		| make movs access "user data"
129	movc	d0, sfc			| space for copyin/copyout
130	movc	d0, dfc
131
132| Setup process zero user/kernel stacks.
133	movl	_C_LABEL(proc0paddr),a1	| get proc0 pcb addr
134	lea	a1@(USPACE-4),sp	| set SSP to last word
135	movl	#USRSTACK-4,a2
136	movl	a2,usp			| init user SP
137
138| Note curpcb was already set in _bootstrap().
139| Will do fpu initialization during autoconfig (see fpu.c)
140| The interrupt vector table and stack are now ready.
141| Interrupts will be enabled later, AFTER  autoconfiguration
142| is finished, to avoid spurrious interrupts.
143
144/*
145 * Final preparation for calling main.
146 *
147 * Create a fake exception frame that returns to user mode,
148 * and save its address in p->p_md.md_regs for cpu_fork().
149 * The new frames for process 1 and 2 will be adjusted by
150 * cpu_set_kpc() to arrange for a call to a kernel function
151 * before the new process does its rte out to user mode.
152 */
153	clrw	sp@-			| tf_format,tf_vector
154	clrl	sp@-			| tf_pc (filled in later)
155	movw	#PSL_USER,sp@-		| tf_sr for user mode
156	clrl	sp@-			| tf_stackadj
157	lea	sp@(-64),sp		| tf_regs[16]
158	movl	sp,a1			| a1=trapframe
159	lea	_C_LABEL(proc0),a0	| proc0.p_md.md_regs =
160	movl	a1,a0@(P_MDREGS)	|   trapframe
161	movl	a2,a1@(FR_SP)		| a2 == usp (from above)
162	pea	a1@			| push &trapframe
163	jbsr	_C_LABEL(main)		| main(&trapframe)
164	addql	#4,sp			| help DDB backtrace
165	trap	#15			| should not get here
166
167| This is used by cpu_fork() to return to user mode.
168| It is called with SP pointing to a struct trapframe.
169GLOBAL(proc_do_uret)
170	movl	sp@(FR_SP),a0		| grab and load
171	movl	a0,usp			|   user SP
172	moveml	sp@+,#0x7FFF		| load most registers (all but SSP)
173	addql	#8,sp			| pop SSP and stack adjust count
174	rte
175
176/*
177 * proc_trampoline:
178 * This is used by cpu_set_kpc() to "push" a function call onto the
179 * kernel stack of some process, very much like a signal delivery.
180 * When we get here, the stack has:
181 *
182 * SP+8:	switchframe from before cpu_set_kpc
183 * SP+4:	void *arg;
184 * SP:  	u_long func;
185 *
186 * On entry, the switchframe pushed by cpu_set_kpc has already been
187 * popped off the stack, so all this needs to do is pop the function
188 * pointer into a register, call it, then pop the arg, and finally
189 * return using the switchframe that remains on the stack.
190 */
191GLOBAL(proc_trampoline)
192	movl	sp@+,a0			| function pointer
193	jbsr	a0@			| (*func)(arg)
194	addql	#4,sp			| toss the arg
195	rts				| as cpu_switch would do
196
197| That is all the assembly startup code we need on the sun3x!
198| The rest of this is like the hp300/locore.s where possible.
199
200/*
201 * Trap/interrupt vector routines
202 */
203#include <m68k/m68k/trap_subr.s>
204
205GLOBAL(buserr)
206	tstl	_C_LABEL(nofault)	| device probe?
207	jeq	_C_LABEL(addrerr)	| no, handle as usual
208	movl	_C_LABEL(nofault),sp@-	| yes,
209	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
210GLOBAL(addrerr)
211	clrl	sp@-			| stack adjust count
212	moveml	#0xFFFF,sp@-		| save user registers
213	movl	usp,a0			| save the user SP
214	movl	a0,sp@(FR_SP)		|   in the savearea
215	lea	sp@(FR_HW),a1		| grab base of HW berr frame
216	moveq	#0,d0
217	movw	a1@(10),d0		| grab SSW for fault processing
218	btst	#12,d0			| RB set?
219	jeq	LbeX0			| no, test RC
220	bset	#14,d0			| yes, must set FB
221	movw	d0,a1@(10)		| for hardware too
222LbeX0:
223	btst	#13,d0			| RC set?
224	jeq	LbeX1			| no, skip
225	bset	#15,d0			| yes, must set FC
226	movw	d0,a1@(10)		| for hardware too
227LbeX1:
228	btst	#8,d0			| data fault?
229	jeq	Lbe0			| no, check for hard cases
230	movl	a1@(16),d1		| fault address is as given in frame
231	jra	Lbe10			| thats it
232Lbe0:
233	btst	#4,a1@(6)		| long (type B) stack frame?
234	jne	Lbe4			| yes, go handle
235	movl	a1@(2),d1		| no, can use save PC
236	btst	#14,d0			| FB set?
237	jeq	Lbe3			| no, try FC
238	addql	#4,d1			| yes, adjust address
239	jra	Lbe10			| done
240Lbe3:
241	btst	#15,d0			| FC set?
242	jeq	Lbe10			| no, done
243	addql	#2,d1			| yes, adjust address
244	jra	Lbe10			| done
245Lbe4:
246	movl	a1@(36),d1		| long format, use stage B address
247	btst	#15,d0			| FC set?
248	jeq	Lbe10			| no, all done
249	subql	#2,d1			| yes, adjust address
250Lbe10:
251	movl	d1,sp@-			| push fault VA
252	movl	d0,sp@-			| and padded SSW
253	movw	a1@(6),d0		| get frame format/vector offset
254	andw	#0x0FFF,d0		| clear out frame format
255	cmpw	#12,d0			| address error vector?
256	jeq	Lisaerr			| yes, go to it
257
258/* MMU-specific code to determine reason for bus error. */
259	movl	d1,a0			| fault address
260	movl	sp@,d0			| function code from ssw
261	btst	#8,d0			| data fault?
262	jne	Lbe10a
263	movql	#1,d0			| user program access FC
264					| (we dont separate data/program)
265	btst	#5,a1@			| supervisor mode?
266	jeq	Lbe10a			| if no, done
267	movql	#5,d0			| else supervisor program access
268Lbe10a:
269	ptestr	d0,a0@,#7		| do a table search
270	pmove	psr,sp@			| save result
271	movb	sp@,d1
272	btst	#2,d1			| invalid? (incl. limit viol and berr)
273	jeq	Lmightnotbemerr		| no -> wp check
274	btst	#7,d1			| is it MMU table berr?
275	jeq	Lismerr			| no, must be fast
276	jra	Lisberr1		| real bus err needs not be fast
277Lmightnotbemerr:
278	btst	#3,d1			| write protect bit set?
279	jeq	Lisberr1		| no, must be bus error
280	movl	sp@,d0			| ssw into low word of d0
281	andw	#0xc0,d0		| write protect is set on page:
282	cmpw	#0x40,d0		| was it read cycle?
283	jeq	Lisberr1		| yes, was not WPE, must be bus err
284/* End of MMU-specific bus error code. */
285
286Lismerr:
287	movl	#T_MMUFLT,sp@-		| show that we are an MMU fault
288	jra	_ASM_LABEL(faultstkadj)	| and deal with it
289Lisaerr:
290	movl	#T_ADDRERR,sp@-		| mark address error
291	jra	_ASM_LABEL(faultstkadj)	| and deal with it
292Lisberr1:
293	clrw	sp@			| re-clear pad word
294Lisberr:
295	movl	#T_BUSERR,sp@-		| mark bus error
296	jra	_ASM_LABEL(faultstkadj)	| and deal with it
297
298/*
299 * FP exceptions.
300 */
301GLOBAL(fpfline)
302	clrl	sp@-			| stack adjust count
303	moveml	#0xFFFF,sp@-		| save registers
304	moveq	#T_FPEMULI,d0		| denote as FP emulation trap
305	jra	_ASM_LABEL(fault)	| do it
306
307GLOBAL(fpunsupp)
308	clrl	sp@-			| stack adjust count
309	moveml	#0xFFFF,sp@-		| save registers
310	moveq	#T_FPEMULD,d0		| denote as FP emulation trap
311	jra	_ASM_LABEL(fault)	| do it
312
313/*
314 * Handles all other FP coprocessor exceptions.
315 * Note that since some FP exceptions generate mid-instruction frames
316 * and may cause signal delivery, we need to test for stack adjustment
317 * after the trap call.
318 */
319GLOBAL(fpfault)
320	clrl	sp@-		| stack adjust count
321	moveml	#0xFFFF,sp@-	| save user registers
322	movl	usp,a0		| and save
323	movl	a0,sp@(FR_SP)	|   the user stack pointer
324	clrl	sp@-		| no VA arg
325	movl	_C_LABEL(curpcb),a0	| current pcb
326	lea	a0@(PCB_FPCTX),a0 | address of FP savearea
327	fsave	a0@		| save state
328	tstb	a0@		| null state frame?
329	jeq	Lfptnull	| yes, safe
330	clrw	d0		| no, need to tweak BIU
331	movb	a0@(1),d0	| get frame size
332	bset	#3,a0@(0,d0:w)	| set exc_pend bit of BIU
333Lfptnull:
334	fmovem	fpsr,sp@-	| push fpsr as code argument
335	frestore a0@		| restore state
336	movl	#T_FPERR,sp@-	| push type arg
337	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
338
339/*
340 * Other exceptions only cause four and six word stack frame and require
341 * no post-trap stack adjustment.
342 */
343GLOBAL(badtrap)
344	clrl	sp@-			| stack adjust count
345	moveml	#0xFFFF,sp@-		| save std frame regs
346	jbsr	_C_LABEL(straytrap)	| report
347	moveml	sp@+,#0xFFFF		| restore regs
348	addql	#4, sp			| stack adjust count
349	jra	_ASM_LABEL(rei)		| all done
350
351/*
352 * Trap 0 is for system calls
353 */
354GLOBAL(trap0)
355	clrl	sp@-			| stack adjust count
356	moveml	#0xFFFF,sp@-		| save user registers
357	movl	usp,a0			| save the user SP
358	movl	a0,sp@(FR_SP)		|   in the savearea
359	movl	d0,sp@-			| push syscall number
360	jbsr	_C_LABEL(syscall)	| handle it
361	addql	#4,sp			| pop syscall arg
362	movl	sp@(FR_SP),a0		| grab and restore
363	movl	a0,usp			|   user SP
364	moveml	sp@+,#0x7FFF		| restore most registers
365	addql	#8,sp			| pop SP and stack adjust
366	jra	_ASM_LABEL(rei)		| all done
367
368/*
369 * Trap 12 is the entry point for the cachectl "syscall"
370 *	cachectl(command, addr, length)
371 * command in d0, addr in a1, length in d1
372 */
373GLOBAL(trap12)
374	movl	_C_LABEL(curproc),sp@-	| push curproc pointer
375	movl	d1,sp@-			| push length
376	movl	a1,sp@-			| push addr
377	movl	d0,sp@-			| push command
378	jbsr	_C_LABEL(cachectl1)	| do it
379	lea	sp@(16),sp		| pop args
380	jra	_ASM_LABEL(rei)		| all done
381
382/*
383 * Trace (single-step) trap.  Kernel-mode is special.
384 * User mode traps are simply passed on to trap().
385 */
386GLOBAL(trace)
387	clrl	sp@-			| stack adjust count
388	moveml	#0xFFFF,sp@-
389	moveq	#T_TRACE,d0
390	btst	#5,sp@(FR_HW)		| was supervisor mode?
391	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
392	jra	_ASM_LABEL(fault)	| no, user-mode fault
393
394/*
395 * Trap 15 is used for:
396 *	- GDB breakpoints (in user programs)
397 *	- KGDB breakpoints (in the kernel)
398 *	- trace traps for SUN binaries (not fully supported yet)
399 * User mode traps are simply passed to trap().
400 */
401GLOBAL(trap15)
402	clrl	sp@-			| stack adjust count
403	moveml	#0xFFFF,sp@-
404	moveq	#T_TRAP15,d0
405	btst	#5,sp@(FR_HW)		| was supervisor mode?
406	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
407	jra	_ASM_LABEL(fault)	| no, user-mode fault
408
409ASLOCAL(kbrkpt)
410	| Kernel-mode breakpoint or trace trap. (d0=trap_type)
411	| Save the system sp rather than the user sp.
412	movw	#PSL_HIGHIPL,sr		| lock out interrupts
413	lea	sp@(FR_SIZE),a6		| Save stack pointer
414	movl	a6,sp@(FR_SP)		|  from before trap
415
416	| If we are not on tmpstk switch to it.
417	| (so debugger can change the stack pointer)
418	movl	a6,d1
419	cmpl	#_ASM_LABEL(tmpstk),d1
420	jls	Lbrkpt2 		| already on tmpstk
421	| Copy frame to the temporary stack
422	movl	sp,a0			| a0=src
423	lea	_ASM_LABEL(tmpstk)-96,a1	| a1=dst
424	movl	a1,sp			| sp=new frame
425	moveq	#FR_SIZE,d1
426Lbrkpt1:
427	movl	a0@+,a1@+
428	subql	#4,d1
429	bgt	Lbrkpt1
430
431Lbrkpt2:
432	| Call the trap handler for the kernel debugger.
433	| Do not call trap() to handle it, so that we can
434	| set breakpoints in trap() if we want.  We know
435	| the trap type is either T_TRACE or T_BREAKPOINT.
436	movl	d0,sp@-			| push trap type
437	jbsr	_C_LABEL(trap_kdebug)
438	addql	#4,sp			| pop args
439
440	| The stack pointer may have been modified, or
441	| data below it modified (by kgdb push call),
442	| so push the hardware frame at the current sp
443	| before restoring registers and returning.
444	movl	sp@(FR_SP),a0		| modified sp
445	lea	sp@(FR_SIZE),a1		| end of our frame
446	movl	a1@-,a0@-		| copy 2 longs with
447	movl	a1@-,a0@-		| ... predecrement
448	movl	a0,sp@(FR_SP)		| sp = h/w frame
449	moveml	sp@+,#0x7FFF		| restore all but sp
450	movl	sp@,sp			| ... and sp
451	rte				| all done
452
453/* Use common m68k sigreturn */
454#include <m68k/m68k/sigreturn.s>
455
456/*
457 * Interrupt handlers.  Most are auto-vectored,
458 * and hard-wired the same way on all sun3 models.
459 * Format in the stack is:
460 *   d0,d1,a0,a1, sr, pc, vo
461 */
462
463#define INTERRUPT_SAVEREG \
464	moveml	#0xC0C0,sp@-
465
466#define INTERRUPT_RESTORE \
467	moveml	sp@+,#0x0303
468
469/*
470 * This is the common auto-vector interrupt handler,
471 * for which the CPU provides the vector=0x18+level.
472 * These are installed in the interrupt vector table.
473 */
474	.align	2
475GLOBAL(_isr_autovec)
476	INTERRUPT_SAVEREG
477	jbsr	_C_LABEL(isr_autovec)
478	INTERRUPT_RESTORE
479	jra	_ASM_LABEL(rei)
480
481/* clock: see clock.c */
482	.align	2
483GLOBAL(_isr_clock)
484	INTERRUPT_SAVEREG
485	jbsr	_C_LABEL(clock_intr)
486	INTERRUPT_RESTORE
487	jra	_ASM_LABEL(rei)
488
489| Handler for all vectored interrupts (i.e. VME interrupts)
490	.align	2
491GLOBAL(_isr_vectored)
492	INTERRUPT_SAVEREG
493	jbsr	_C_LABEL(isr_vectored)
494	INTERRUPT_RESTORE
495	jra	_ASM_LABEL(rei)
496
497#undef	INTERRUPT_SAVEREG
498#undef	INTERRUPT_RESTORE
499
500/* interrupt counters (needed by vmstat) */
501GLOBAL(intrnames)
502	.asciz	"spur"	| 0
503	.asciz	"lev1"	| 1
504	.asciz	"lev2"	| 2
505	.asciz	"lev3"	| 3
506	.asciz	"lev4"	| 4
507	.asciz	"clock"	| 5
508	.asciz	"lev6"	| 6
509	.asciz	"nmi"	| 7
510GLOBAL(eintrnames)
511
512	.data
513	.even
514GLOBAL(intrcnt)
515	.long	0,0,0,0,0,0,0,0,0,0
516GLOBAL(eintrcnt)
517	.text
518
519/*
520 * Emulation of VAX REI instruction.
521 *
522 * This code is (mostly) un-altered from the hp300 code,
523 * except that sun machines do not need a simulated SIR
524 * because they have a real software interrupt register.
525 *
526 * This code deals with checking for and servicing ASTs
527 * (profiling, scheduling) and software interrupts (network, softclock).
528 * We check for ASTs first, just like the VAX.  To avoid excess overhead
529 * the T_ASTFLT handling code will also check for software interrupts so we
530 * do not have to do it here.  After identifying that we need an AST we
531 * drop the IPL to allow device interrupts.
532 *
533 * This code is complicated by the fact that sendsig may have been called
534 * necessitating a stack cleanup.
535 */
536
537ASGLOBAL(rei)
538#ifdef	DIAGNOSTIC
539	tstl	_C_LABEL(panicstr)	| have we paniced?
540	jne	Ldorte			| yes, do not make matters worse
541#endif
542	tstl	_C_LABEL(astpending)	| AST pending?
543	jeq	Ldorte			| no, done
544Lrei1:
545	btst	#5,sp@			| yes, are we returning to user mode?
546	jne	Ldorte			| no, done
547	movw	#PSL_LOWIPL,sr		| lower SPL
548	clrl	sp@-			| stack adjust
549	moveml	#0xFFFF,sp@-		| save all registers
550	movl	usp,a1			| including
551	movl	a1,sp@(FR_SP)		|    the users SP
552	clrl	sp@-			| VA == none
553	clrl	sp@-			| code == none
554	movl	#T_ASTFLT,sp@-		| type == async system trap
555	jbsr	_C_LABEL(trap)		| go handle it
556	lea	sp@(12),sp		| pop value args
557	movl	sp@(FR_SP),a0		| restore user SP
558	movl	a0,usp			|   from save area
559	movw	sp@(FR_ADJ),d0		| need to adjust stack?
560	jne	Laststkadj		| yes, go to it
561	moveml	sp@+,#0x7FFF		| no, restore most user regs
562	addql	#8,sp			| toss SP and stack adjust
563	rte				| and do real RTE
564Laststkadj:
565	lea	sp@(FR_HW),a1		| pointer to HW frame
566	addql	#8,a1			| source pointer
567	movl	a1,a0			| source
568	addw	d0,a0			|  + hole size = dest pointer
569	movl	a1@-,a0@-		| copy
570	movl	a1@-,a0@-		|  8 bytes
571	movl	a0,sp@(FR_SP)		| new SSP
572	moveml	sp@+,#0x7FFF		| restore user registers
573	movl	sp@,sp			| and our SP
574Ldorte:
575	rte				| real return
576
577/*
578 * Initialization is at the beginning of this file, because the
579 * kernel entry point needs to be at zero for compatibility with
580 * the Sun boot loader.  This works on Sun machines because the
581 * interrupt vector table for reset is NOT at address zero.
582 * (The MMU has a "boot" bit that forces access to the PROM)
583 */
584
585/*
586 * Use common m68k sigcode.
587 */
588#include <m68k/m68k/sigcode.s>
589
590	.text
591
592/*
593 * Primitives
594 */
595
596/*
597 * Use common m68k support routines.
598 */
599#include <m68k/m68k/support.s>
600
601BSS(want_resched,4)
602
603/*
604 * Use common m68k process manipulation routines.
605 */
606#include <m68k/m68k/proc_subr.s>
607
608| Message for Lbadsw panic
609Lsw0:
610	.asciz	"cpu_switch"
611	.even
612
613	.data
614GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
615GLOBAL(curpcb)
616	.long	0
617ASBSS(nullpcb,SIZEOF_PCB)
618	.text
619
620/*
621 * At exit of a process, do a cpu_switch for the last time.
622 * Switch to a safe stack and PCB, and select a new process to run.  The
623 * old stack and u-area will be freed by the reaper.
624 */
625ENTRY(switch_exit)
626	movl	sp@(4),a0		| struct proc *p
627					| save state into garbage pcb
628	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
629	lea	_ASM_LABEL(tmpstk),sp	| goto a tmp stack
630
631	/* Schedule the vmspace and stack to be freed. */
632	movl	a0,sp@-			| exit2(p)
633	jbsr	_C_LABEL(exit2)
634
635	/* Don't pop the proc; pass it to cpu_switch(). */
636
637	jra	_C_LABEL(cpu_switch)
638
639/*
640 * When no processes are on the runq, cpu_switch() branches to idle
641 * to wait for something to come ready.
642 */
643	.data
644GLOBAL(Idle_count)
645	.long	0
646	.text
647
648Lidle:
649	stop	#PSL_LOWIPL
650GLOBAL(_Idle)				| See clock.c
651	movw	#PSL_HIGHIPL,sr
652	addql	#1, _C_LABEL(Idle_count)
653	tstl	_C_LABEL(whichqs)
654	jeq	Lidle
655	movw	#PSL_LOWIPL,sr
656	jra	Lsw1
657
658Lbadsw:
659	movl	#Lsw0,sp@-
660	jbsr	_C_LABEL(panic)
661	/*NOTREACHED*/
662
663/*
664 * cpu_switch()
665 * Hacked for sun3
666 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
667 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
668 */
669ENTRY(cpu_switch)
670	movl	_C_LABEL(curpcb),a1	| current pcb
671	movw	sr,a1@(PCB_PS)		| save sr before changing ipl
672#ifdef notyet
673	movl	_C_LABEL(curproc),sp@-	| remember last proc running
674#endif
675	clrl	_C_LABEL(curproc)
676
677Lsw1:
678	/*
679	 * Find the highest-priority queue that isn't empty,
680	 * then take the first proc from that queue.
681	 */
682	clrl	d0
683	lea	_C_LABEL(whichqs),a0
684	movl	a0@,d1
685Lswchk:
686	btst	d0,d1
687	jne	Lswfnd
688	addqb	#1,d0
689	cmpb	#32,d0
690	jne	Lswchk
691	jra	_C_LABEL(_Idle)
692Lswfnd:
693	movw	#PSL_HIGHIPL,sr		| lock out interrupts
694	movl	a0@,d1			| and check again...
695	bclr	d0,d1
696	jeq	Lsw1			| proc moved, rescan
697	movl	d1,a0@			| update whichqs
698	moveq	#1,d1			| double check for higher priority
699	lsll	d0,d1			| process (which may have snuck in
700	subql	#1,d1			| while we were finding this one)
701	andl	a0@,d1
702	jeq	Lswok			| no one got in, continue
703	movl	a0@,d1
704	bset	d0,d1			| otherwise put this one back
705	movl	d1,a0@
706	jra	Lsw1			| and rescan
707Lswok:
708	movl	d0,d1
709	lslb	#3,d1			| convert queue number to index
710	addl	#_qs,d1			| locate queue (q)
711	movl	d1,a1
712	cmpl	a1@(P_FORW),a1		| anyone on queue?
713	jeq	Lbadsw			| no, panic
714	movl	a1@(P_FORW),a0		| p = q->p_forw
715	movl	a0@(P_FORW),a1@(P_FORW)	| q->p_forw = p->p_forw
716	movl	a0@(P_FORW),a1		| q = p->p_forw
717	movl	a0@(P_BACK),a1@(P_BACK)	| q->p_back = p->p_back
718	cmpl	a0@(P_FORW),d1		| anyone left on queue?
719	jeq	Lsw2			| no, skip
720	movl	_C_LABEL(whichqs),d1
721	bset	d0,d1			| yes, reset bit
722	movl	d1,_C_LABEL(whichqs)
723Lsw2:
724	movl	a0,_C_LABEL(curproc)
725	clrl	_C_LABEL(want_resched)
726#ifdef notyet
727	movl	sp@+,a1			| XXX - Make this work!
728	cmpl	a0,a1			| switching to same proc?
729	jeq	Lswdone			| yes, skip save and restore
730#endif
731	/*
732	 * Save state of previous process in its pcb.
733	 */
734	movl	_C_LABEL(curpcb),a1
735	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
736	movl	usp,a2			| grab USP (a2 has been saved)
737	movl	a2,a1@(PCB_USP)		| and save it
738
739	tstl	_C_LABEL(fputype)	| Do we have an fpu?
740	jeq	Lswnofpsave		| No?  Then don't try save.
741	lea	a1@(PCB_FPCTX),a2	| pointer to FP save area
742	fsave	a2@			| save FP state
743	tstb	a2@			| null state frame?
744	jeq	Lswnofpsave		| yes, all done
745	fmovem	fp0-fp7,a2@(FPF_REGS)		| save FP general regs
746	fmovem	fpcr/fpsr/fpi,a2@(FPF_FPCR)	| save FP control regs
747Lswnofpsave:
748
749	/*
750	 * Now that we have saved all the registers that must be
751	 * preserved, we are free to use those registers until
752	 * we load the registers for the switched-to process.
753	 * In this section, keep:  a0=curproc, a1=curpcb
754	 */
755
756#ifdef DIAGNOSTIC
757	tstl	a0@(P_WCHAN)
758	jne	Lbadsw
759	cmpb	#SRUN,a0@(P_STAT)
760	jne	Lbadsw
761#endif
762	clrl	a0@(P_BACK)		| clear back link
763	movl	a0@(P_ADDR),a1		| get p_addr
764	movl	a1,_C_LABEL(curpcb)
765
766	/*
767	 * Load the new VM context (new MMU root pointer)
768	 */
769	movl	a0@(P_VMSPACE),a2	| vm = p->p_vmspace
770#ifdef DIAGNOSTIC
771	tstl	a2			| vm == VM_MAP_NULL?
772	jeq	Lbadsw			| panic
773#endif
774#ifdef PMAP_DEBUG
775	/* When debugging just call _pmap_switch(). */
776	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
777	pea	a2@			| push pmap
778	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
779	addql	#4,sp
780	movl	_C_LABEL(curpcb),a1	| restore p_addr
781#else
782	/* Otherwise, use this inline version. */
783	lea	_C_LABEL(kernel_crp), a3 | our CPU Root Ptr. (CRP)
784	movl	a2@(VM_PMAP),a2 	| pmap = vm->vm_map.pmap
785	movl	a2@(PM_A_PHYS),d0	| phys = pmap->pm_a_phys
786	cmpl	a3@(4),d0		|  == kernel_crp.rp_addr ?
787	jeq	Lsame_mmuctx		| skip loadcrp/flush
788	/* OK, it is a new MMU context.  Load it up. */
789	movl	d0,a3@(4)
790	movl	#CACHE_CLR,d0
791	movc	d0,cacr			| invalidate cache(s)
792	pflusha				| flush entire TLB
793	pmove	a3@,crp			| load new user root pointer
794Lsame_mmuctx:
795#endif
796
797	/*
798	 * Reload the registers for the new process.
799	 * After this point we can only use d0,d1,a0,a1
800	 */
801	moveml	a1@(PCB_REGS),#0xFCFC	| reload registers
802	movl	a1@(PCB_USP),a0
803	movl	a0,usp			| and USP
804
805	tstl	_C_LABEL(fputype)	| If we don't have an fpu,
806	jeq	Lres_skip		|  don't try to restore it.
807	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
808	tstb	a0@			| null state frame?
809	jeq	Lresfprest		| yes, easy
810	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
811	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
812Lresfprest:
813	frestore a0@			| restore state
814Lres_skip:
815	movw	a1@(PCB_PS),d0		| no, restore PS
816#ifdef DIAGNOSTIC
817	btst	#13,d0			| supervisor mode?
818	jeq	Lbadsw			| no? panic!
819#endif
820	movw	d0,sr			| OK, restore PS
821	moveq	#1,d0			| return 1 (for alternate returns)
822	rts
823
824/*
825 * savectx(pcb)
826 * Update pcb, saving current processor state.
827 */
828ENTRY(savectx)
829	movl	sp@(4),a1
830	movw	sr,a1@(PCB_PS)
831	movl	usp,a0			| grab USP
832	movl	a0,a1@(PCB_USP)		| and save it
833	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
834
835	tstl	_C_LABEL(fputype)	| Do we have FPU?
836	jeq	Lsavedone		| No?  Then don't save state.
837	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
838	fsave	a0@			| save FP state
839	tstb	a0@			| null state frame?
840	jeq	Lsavedone		| yes, all done
841	fmovem	fp0-fp7,a0@(FPF_REGS)		| save FP general regs
842	fmovem	fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
843Lsavedone:
844	moveq	#0,d0			| return 0
845	rts
846
847/* suline() */
848
849#ifdef DEBUG
850	.data
851ASGLOBAL(fulltflush)
852	.long	0
853ASGLOBAL(fullcflush)
854	.long	0
855	.text
856#endif
857
858/*
859 * Invalidate entire TLB.
860 */
861ENTRY(TBIA)
862_C_LABEL(_TBIA):
863	pflusha
864	movl	#DC_CLEAR,d0
865	movc	d0,cacr			| invalidate on-chip d-cache
866	rts
867
868/*
869 * Invalidate any TLB entry for given VA (TB Invalidate Single)
870 */
871ENTRY(TBIS)
872#ifdef DEBUG
873	tstl	_ASM_LABEL(fulltflush)	| being conservative?
874	jne	_C_LABEL(_TBIA)		| yes, flush entire TLB
875#endif
876	movl	sp@(4),a0
877	pflush	#0,#0,a0@		| flush address from both sides
878	movl	#DC_CLEAR,d0
879	movc	d0,cacr			| invalidate on-chip data cache
880	rts
881
882/*
883 * Invalidate supervisor side of TLB
884 */
885ENTRY(TBIAS)
886#ifdef DEBUG
887	tstl	_ASM_LABEL(fulltflush)	| being conservative?
888	jne	_C_LABEL(_TBIA)		| yes, flush everything
889#endif
890	pflush	#4,#4			| flush supervisor TLB entries
891	movl	#DC_CLEAR,d0
892	movc	d0,cacr			| invalidate on-chip d-cache
893	rts
894
895/*
896 * Invalidate user side of TLB
897 */
898ENTRY(TBIAU)
899#ifdef DEBUG
900	tstl	_ASM_LABEL(fulltflush)	| being conservative?
901	jne	_C_LABEL(_TBIA)		| yes, flush everything
902#endif
903	pflush	#0,#4			| flush user TLB entries
904	movl	#DC_CLEAR,d0
905	movc	d0,cacr			| invalidate on-chip d-cache
906	rts
907
908/*
909 * Invalidate instruction cache
910 */
911ENTRY(ICIA)
912	movl	#IC_CLEAR,d0
913	movc	d0,cacr			| invalidate i-cache
914	rts
915
916/*
917 * Invalidate data cache.
918 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
919 * problems with DC_WA.  The only cases we have to worry about are context
920 * switch and TLB changes, both of which are handled "in-line" in resume
921 * and TBI*.
922 */
923ENTRY(DCIA)
924__DCIA:
925	rts
926
927ENTRY(DCIS)
928__DCIS:
929	rts
930
931/*
932 * Invalidate data cache.
933 */
934ENTRY(DCIU)
935	movl	#DC_CLEAR,d0
936	movc	d0,cacr			| invalidate on-chip d-cache
937	rts
938
939/* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
940
941ENTRY(PCIA)
942	movl	#DC_CLEAR,d0
943	movc	d0,cacr			| invalidate on-chip d-cache
944	rts
945
946ENTRY(ecacheon)
947	rts
948
949ENTRY(ecacheoff)
950	rts
951
952/*
953 * Get callers current SP value.
954 * Note that simply taking the address of a local variable in a C function
955 * doesn't work because callee saved registers may be outside the stack frame
956 * defined by A6 (e.g. GCC generated code).
957 *
958 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
959 */
960GLOBAL(getsp)
961	movl	sp,d0			| get current SP
962	addql	#4,d0			| compensate for return address
963	rts
964
965ENTRY(getsfc)
966	movc	sfc,d0
967	rts
968
969ENTRY(getdfc)
970	movc	dfc,d0
971	rts
972
973ENTRY(getvbr)
974	movc vbr, d0
975	rts
976
977ENTRY(setvbr)
978	movl sp@(4), d0
979	movc d0, vbr
980	rts
981
982/*
983 * Load a new CPU Root Pointer (CRP) into the MMU.
984 *	void	loadcrp(struct mmu_rootptr *);
985 */
986ENTRY(loadcrp)
987	movl	sp@(4),a0		| arg1: &CRP
988	movl	#CACHE_CLR,d0
989	movc	d0,cacr			| invalidate cache(s)
990	pflusha				| flush entire TLB
991	pmove	a0@,crp			| load new user root pointer
992	rts
993
994/*
995 * Get the physical address of the PTE for a given VA.
996 */
997ENTRY(ptest_addr)
998	movl	sp@(4),a0		| VA
999	ptestr	#5,a0@,#7,a1		| a1 = addr of PTE
1000	movl	a1,d0
1001	rts
1002
1003/*
1004 * Set processor priority level calls.  Most are implemented with
1005 * inline asm expansions.  However, we need one instantiation here
1006 * in case some non-optimized code makes external references.
1007 * Most places will use the inlined functions param.h supplies.
1008 */
1009
1010ENTRY(_getsr)
1011	clrl	d0
1012	movw	sr,d0
1013	rts
1014
1015ENTRY(_spl)
1016	clrl	d0
1017	movw	sr,d0
1018	movl	sp@(4),d1
1019	movw	d1,sr
1020	rts
1021
1022ENTRY(_splraise)
1023	clrl	d0
1024	movw	sr,d0
1025	movl	d0,d1
1026	andl	#PSL_HIGHIPL,d1 	| old &= PSL_HIGHIPL
1027	cmpl	sp@(4),d1		| (old - new)
1028	bge	Lsplr
1029	movl	sp@(4),d1
1030	movw	d1,sr
1031Lsplr:
1032	rts
1033
1034/*
1035 * Save and restore 68881 state.
1036 */
1037ENTRY(m68881_save)
1038	movl	sp@(4),a0		| save area pointer
1039	fsave	a0@			| save state
1040	tstb	a0@			| null state frame?
1041	jeq	Lm68881sdone		| yes, all done
1042	fmovem fp0-fp7,a0@(FPF_REGS)		| save FP general regs
1043	fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
1044Lm68881sdone:
1045	rts
1046
1047ENTRY(m68881_restore)
1048	movl	sp@(4),a0		| save area pointer
1049	tstb	a0@			| null state frame?
1050	jeq	Lm68881rdone		| yes, easy
1051	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
1052	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
1053Lm68881rdone:
1054	frestore a0@			| restore state
1055	rts
1056
1057/*
1058 * _delay(unsigned N)
1059 * Delay for at least (N/256) microseconds.
1060 * This routine depends on the variable:  delay_divisor
1061 * which should be set based on the CPU clock rate.
1062 * XXX: Currently this is set based on the CPU model,
1063 * XXX: but this should be determined at run time...
1064 */
1065GLOBAL(_delay)
1066	| d0 = arg = (usecs << 8)
1067	movl	sp@(4),d0
1068	| d1 = delay_divisor;
1069	movl	_C_LABEL(delay_divisor),d1
1070L_delay:
1071	subl	d1,d0
1072	jgt	L_delay
1073	rts
1074
1075
1076| Define some addresses, mostly so DDB can print useful info.
1077| Not using _C_LABEL() here because these symbols are never
1078| referenced by any C code, and if the leading underscore
1079| ever goes away, these lines turn into syntax errors...
1080	.set	_KERNBASE,KERNBASE
1081	.set	_MONSTART,SUN3X_MONSTART
1082	.set	_PROM_BASE,SUN3X_PROM_BASE
1083	.set	_MONEND,SUN3X_MONEND
1084
1085|The end!
1086