locore.s revision 1.8
1/*	$NetBSD: locore.s,v 1.8 1997/02/12 23:06:27 gwr Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
41 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
42 */
43
44#include "assym.h"
45#include <machine/trap.h>
46
47| Remember this is a fun project!
48
49	.data
50	.globl	_mon_crp
51_mon_crp:
52	.long	0,0
53
54| This is for kvm_mkdb, and should be the address of the beginning
55| of the kernel text segment (not necessarily the same as kernbase).
56	.text
57	.globl	_kernel_text
58_kernel_text:
59
60| This is the entry point, as well as the end of the temporary stack
61| used during process switch (one 8K page ending at start)
62	.globl tmpstk
63tmpstk:
64	.globl start
65start:
66| The first step, after disabling interrupts, is to map enough of the kernel
67| into high virtual address space so that we can use position dependent code.
68| This is a tricky task on the sun3x because the MMU is already enabled and
69| the ROM monitor provides no indication of where the root MMU table is mapped.
70| Therefore we must use one of the 68030's 'transparent translation' registers
71| to define a range in the address space where the MMU translation is
72| turned off.  Once this is complete we can modify the MMU table directly
73| without the need for it to be mapped into virtual memory.
74| All code must be position independent until otherwise noted, as the
75| boot loader has loaded us into low memory but all the symbols in this
76| code have been linked high.
77	movw	#PSL_HIGHIPL, sr	| no interrupts
78	movl	#KERNBASE, a5		| for vtop conversion
79	lea	_mon_crp, a0		| where to store the CRP
80	subl	a5, a0
81	| Note: borrowing mon_crp for tt0 setup...
82	movl	#0x3F8107, a0@		| map the low 1GB v=p with the
83	pmove	a0@, tt0		| transparent translation reg0
84
85| In order to map the kernel into high memory we will copy the root table
86| entry which maps the 16 megabytes of memory starting at 0x0 into the
87| entry which maps the 16 megabytes starting at KERNBASE.
88	pmove	crp, a0@		| Get monitor CPU root pointer
89	movl	a0@(4), a1		| 2nd word is PA of level A table
90
91	movl	a1, a0			| compute the descriptor address
92	addl	#0x3e0, a1		| for VA starting at KERNBASE
93	movl	a0@, a1@		| copy descriptor type
94	movl	a0@(4), a1@(4)		| copy physical address
95
96| Kernel is now double mapped at zero and KERNBASE.
97| Force a long jump to the relocated code (high VA).
98	movl	#IC_CLEAR, d0		| Flush the I-cache
99	movc	d0, cacr
100	jmp L_high_code:l		| long jump
101
102L_high_code:
103| We are now running in the correctly relocated kernel, so
104| we are no longer restricted to position-independent code.
105| It is handy to leave transparent translation enabled while
106| for the low 1GB while __bootstrap() is doing its thing.
107
108| Do bootstrap stuff needed before main() gets called.
109| Our boot loader leaves a copy of the kernel's exec header
110| just before the start of the kernel text segment, so the
111| kernel can sanity-check the DDB symbols at [end...esym].
112| Pass the struct exec at tmpstk-32 to __bootstrap().
113| Also, make sure the initial frame pointer is zero so that
114| the backtrace algorithm used by KGDB terminates nicely.
115	lea	tmpstk-32, sp
116	movl	#0,a6
117	jsr	__bootstrap		| See _startup.c
118
119| Now turn off the transparent translation of the low 1GB.
120| (this also flushes the ATC)
121	clrl	sp@-
122	pmove	sp@,tt0
123	addql	#4,sp
124
125| Now that __bootstrap() is done using the PROM functions,
126| we can safely set the sfc/dfc to something != FC_CONTROL
127	moveq	#FC_USERD, d0		| make movs access "user data"
128	movc	d0, sfc			| space for copyin/copyout
129	movc	d0, dfc
130
131| Setup process zero user/kernel stacks.
132	movl	_proc0paddr,a1		| get proc0 pcb addr
133	lea	a1@(USPACE-4),sp	| set SSP to last word
134	movl	#USRSTACK-4,a2
135	movl	a2,usp			| init user SP
136
137| Note curpcb was already set in __bootstrap().
138| Will do fpu initialization during autoconfig (see fpu.c)
139| The interrupt vector table and stack are now ready.
140| Interrupts will be enabled later, AFTER  autoconfiguration
141| is finished, to avoid spurrious interrupts.
142
143/*
144 * Final preparation for calling main.
145 *
146 * Create a fake exception frame that returns to user mode,
147 * and save its address in p->p_md.md_regs for cpu_fork().
148 * The new frames for process 1 and 2 will be adjusted by
149 * cpu_set_kpc() to arrange for a call to a kernel function
150 * before the new process does its rte out to user mode.
151 */
152	clrw	sp@-			| tf_format,tf_vector
153	clrl	sp@-			| tf_pc (filled in later)
154	movw	#PSL_USER,sp@-		| tf_sr for user mode
155	clrl	sp@-			| tf_stackadj
156	lea	sp@(-64),sp		| tf_regs[16]
157	movl	sp,a1			| a1=trapframe
158	lea	_proc0,a0		| proc0.p_md.md_regs =
159	movl	a1,a0@(P_MDREGS)	|   trapframe
160	movl	a2,a1@(FR_SP)		| a2 == usp (from above)
161	pea	a1@			| push &trapframe
162	jbsr	_main			| main(&trapframe)
163	addql	#4,sp			| help DDB backtrace
164	trap	#15			| should not get here
165
166| This is used by cpu_fork() to return to user mode.
167| It is called with SP pointing to a struct trapframe.
168	.globl	_proc_do_uret
169_proc_do_uret:
170	movl	sp@(FR_SP),a0		| grab and load
171	movl	a0,usp			|   user SP
172	moveml	sp@+,#0x7FFF		| load most registers (all but SSP)
173	addql	#8,sp			| pop SSP and stack adjust count
174	rte
175
176/*
177 * proc_trampoline:
178 * This is used by cpu_set_kpc() to "push" a function call onto the
179 * kernel stack of some process, very much like a signal delivery.
180 * When we get here, the stack has:
181 *
182 * SP+8:	switchframe from before cpu_set_kpc
183 * SP+4:	void *proc;
184 * SP:  	u_long func;
185 *
186 * On entry, the switchframe pushed by cpu_set_kpc has already been
187 * popped off the stack, so all this needs to do is pop the function
188 * pointer into a register, call it, then pop the arg, and finally
189 * return using the switchframe that remains on the stack.
190 */
191	.globl	_proc_trampoline
192_proc_trampoline:
193	movl	sp@+,a0			| function pointer
194	jbsr	a0@			| (*func)(procp)
195	addql	#4,sp			| toss the arg
196	rts				| as cpu_switch would do
197
198| That is all the assembly startup code we need on the sun3x!
199| The rest of this is like the hp300/locore.s where possible.
200
201/*
202 * Trap/interrupt vector routines
203 */
204
205	.globl _buserr, _addrerr, _illinst, _zerodiv, _chkinst
206	.globl _trapvinst, _privinst, _trace, _badtrap, _fmterr
207	.globl _trap0, _trap1, _trap2, _trap12, _trap15
208	.globl _coperr, _fpfline, _fpunsupp
209
210	.globl	_trap, _nofault, _longjmp
211_buserr:
212	tstl	_nofault		| device probe?
213	jeq	_addrerr		| no, handle as usual
214	movl	_nofault,sp@-		| yes,
215	jbsr	_longjmp		|  longjmp(nofault)
216_addrerr:
217	clrl	sp@-			| stack adjust count
218	moveml	#0xFFFF,sp@-		| save user registers
219	movl	usp,a0			| save the user SP
220	movl	a0,sp@(FR_SP)		|   in the savearea
221	lea	sp@(FR_HW),a1		| grab base of HW berr frame
222	moveq	#0,d0
223	movw	a1@(10),d0		| grab SSW for fault processing
224	btst	#12,d0			| RB set?
225	jeq	LbeX0			| no, test RC
226	bset	#14,d0			| yes, must set FB
227	movw	d0,a1@(10)		| for hardware too
228LbeX0:
229	btst	#13,d0			| RC set?
230	jeq	LbeX1			| no, skip
231	bset	#15,d0			| yes, must set FC
232	movw	d0,a1@(10)		| for hardware too
233LbeX1:
234	btst	#8,d0			| data fault?
235	jeq	Lbe0			| no, check for hard cases
236	movl	a1@(16),d1		| fault address is as given in frame
237	jra	Lbe10			| thats it
238Lbe0:
239	btst	#4,a1@(6)		| long (type B) stack frame?
240	jne	Lbe4			| yes, go handle
241	movl	a1@(2),d1		| no, can use save PC
242	btst	#14,d0			| FB set?
243	jeq	Lbe3			| no, try FC
244	addql	#4,d1			| yes, adjust address
245	jra	Lbe10			| done
246Lbe3:
247	btst	#15,d0			| FC set?
248	jeq	Lbe10			| no, done
249	addql	#2,d1			| yes, adjust address
250	jra	Lbe10			| done
251Lbe4:
252	movl	a1@(36),d1		| long format, use stage B address
253	btst	#15,d0			| FC set?
254	jeq	Lbe10			| no, all done
255	subql	#2,d1			| yes, adjust address
256Lbe10:
257	movl	d1,sp@-			| push fault VA
258	movl	d0,sp@-			| and padded SSW
259	movw	a1@(6),d0		| get frame format/vector offset
260	andw	#0x0FFF,d0		| clear out frame format
261	cmpw	#12,d0			| address error vector?
262	jeq	Lisaerr			| yes, go to it
263
264/* MMU-specific code to determine reason for bus error. */
265	movl	d1,a0			| fault address
266	movl	sp@,d0			| function code from ssw
267	btst	#8,d0			| data fault?
268	jne	Lbe10a
269	movql	#1,d0			| user program access FC
270					| (we dont separate data/program)
271	btst	#5,a1@			| supervisor mode?
272	jeq	Lbe10a			| if no, done
273	movql	#5,d0			| else supervisor program access
274Lbe10a:
275	ptestr	d0,a0@,#7		| do a table search
276	pmove	psr,sp@			| save result
277	movb	sp@,d1
278	btst	#2,d1			| invalid? (incl. limit viol and berr)
279	jeq	Lmightnotbemerr		| no -> wp check
280	btst	#7,d1			| is it MMU table berr?
281	jeq	Lismerr			| no, must be fast
282	jra	Lisberr1		| real bus err needs not be fast
283Lmightnotbemerr:
284	btst	#3,d1			| write protect bit set?
285	jeq	Lisberr1		| no, must be bus error
286	movl	sp@,d0			| ssw into low word of d0
287	andw	#0xc0,d0		| write protect is set on page:
288	cmpw	#0x40,d0		| was it read cycle?
289	jeq	Lisberr1		| yes, was not WPE, must be bus err
290/* End of MMU-specific bus error code. */
291
292Lismerr:
293	movl	#T_MMUFLT,sp@-		| show that we are an MMU fault
294	jra	Ltrapnstkadj		| and deal with it
295Lisaerr:
296	movl	#T_ADDRERR,sp@-		| mark address error
297	jra	Ltrapnstkadj		| and deal with it
298Lisberr1:
299	clrw	sp@			| re-clear pad word
300Lisberr:
301	movl	#T_BUSERR,sp@-		| mark bus error
302Ltrapnstkadj:
303	jbsr	_trap			| handle the error
304	lea	sp@(12),sp		| pop value args
305	movl	sp@(FR_SP),a0		| restore user SP
306	movl	a0,usp			|   from save area
307	movw	sp@(FR_ADJ),d0		| need to adjust stack?
308	jne	Lstkadj			| yes, go to it
309	moveml	sp@+,#0x7FFF		| no, restore most user regs
310	addql	#8,sp			| toss SSP and stkadj
311	jra	rei			| all done
312Lstkadj:
313	lea	sp@(FR_HW),a1		| pointer to HW frame
314	addql	#8,a1			| source pointer
315	movl	a1,a0			| source
316	addw	d0,a0			|  + hole size = dest pointer
317	movl	a1@-,a0@-		| copy
318	movl	a1@-,a0@-		|  8 bytes
319	movl	a0,sp@(FR_SP)		| new SSP
320	moveml	sp@+,#0x7FFF		| restore user registers
321	movl	sp@,sp			| and our SP
322	jra	rei			| all done
323
324/*
325 * FP exceptions.
326 */
327_fpfline:
328	clrl	sp@-			| stack adjust count
329	moveml	#0xFFFF,sp@-		| save registers
330	moveq	#T_FPEMULI,d0		| denote as FP emulation trap
331	jra	fault			| do it
332
333_fpunsupp:
334	clrl	sp@-			| stack adjust count
335	moveml	#0xFFFF,sp@-		| save registers
336	moveq	#T_FPEMULD,d0		| denote as FP emulation trap
337	jra	fault			| do it
338
339/*
340 * Handles all other FP coprocessor exceptions.
341 * Note that since some FP exceptions generate mid-instruction frames
342 * and may cause signal delivery, we need to test for stack adjustment
343 * after the trap call.
344 */
345	.globl	_fpfault
346_fpfault:
347	clrl	sp@-		| stack adjust count
348	moveml	#0xFFFF,sp@-	| save user registers
349	movl	usp,a0		| and save
350	movl	a0,sp@(FR_SP)	|   the user stack pointer
351	clrl	sp@-		| no VA arg
352	movl	_curpcb,a0	| current pcb
353	lea	a0@(PCB_FPCTX),a0 | address of FP savearea
354	fsave	a0@		| save state
355	tstb	a0@		| null state frame?
356	jeq	Lfptnull	| yes, safe
357	clrw	d0		| no, need to tweak BIU
358	movb	a0@(1),d0	| get frame size
359	bset	#3,a0@(0,d0:w)	| set exc_pend bit of BIU
360Lfptnull:
361	fmovem	fpsr,sp@-	| push fpsr as code argument
362	frestore a0@		| restore state
363	movl	#T_FPERR,sp@-	| push type arg
364	jra	Ltrapnstkadj	| call trap and deal with stack cleanup
365
366/*
367 * Coprocessor and format errors can generate mid-instruction stack
368 * frames and cause signal delivery hence we need to check for potential
369 * stack adjustment.
370 */
371_coperr:
372	clrl	sp@-		| stack adjust count
373	moveml	#0xFFFF,sp@-
374	movl	usp,a0		| get and save
375	movl	a0,sp@(FR_SP)	|   the user stack pointer
376	clrl	sp@-		| no VA arg
377	clrl	sp@-		| or code arg
378	movl	#T_COPERR,sp@-	| push trap type
379	jra	Ltrapnstkadj	| call trap and deal with stack adjustments
380
381_fmterr:
382	clrl	sp@-		| stack adjust count
383	moveml	#0xFFFF,sp@-
384	movl	usp,a0		| get and save
385	movl	a0,sp@(FR_SP)	|   the user stack pointer
386	clrl	sp@-		| no VA arg
387	clrl	sp@-		| or code arg
388	movl	#T_FMTERR,sp@-	| push trap type
389	jra	Ltrapnstkadj	| call trap and deal with stack adjustments
390
391/*
392 * Other exceptions only cause four and six word stack frame and require
393 * no post-trap stack adjustment.
394 */
395_illinst:
396	clrl	sp@-
397	moveml	#0xFFFF,sp@-
398	moveq	#T_ILLINST,d0
399	jra	fault
400
401_zerodiv:
402	clrl	sp@-
403	moveml	#0xFFFF,sp@-
404	moveq	#T_ZERODIV,d0
405	jra	fault
406
407_chkinst:
408	clrl	sp@-
409	moveml	#0xFFFF,sp@-
410	moveq	#T_CHKINST,d0
411	jra	fault
412
413_trapvinst:
414	clrl	sp@-
415	moveml	#0xFFFF,sp@-
416	moveq	#T_TRAPVINST,d0
417	jra	fault
418
419_privinst:
420	clrl	sp@-
421	moveml	#0xFFFF,sp@-
422	moveq	#T_PRIVINST,d0
423	jra	fault
424
425	.globl	fault
426fault:
427	movl	usp,a0			| get and save
428	movl	a0,sp@(FR_SP)		|   the user stack pointer
429	clrl	sp@-			| no VA arg
430	clrl	sp@-			| or code arg
431	movl	d0,sp@-			| push trap type
432	jbsr	_trap			| handle trap
433	lea	sp@(12),sp		| pop value args
434	movl	sp@(FR_SP),a0		| restore
435	movl	a0,usp			|   user SP
436	moveml	sp@+,#0x7FFF		| restore most user regs
437	addql	#8,sp			| pop SP and stack adjust
438	jra	rei			| all done
439
440	.globl	_straytrap
441_badtrap:
442	clrl	sp@-			| stack adjust count
443	moveml	#0xFFFF,sp@-		| save std frame regs
444	jbsr	_straytrap		| report
445	moveml	sp@+,#0xFFFF		| restore regs
446	addql	#4, sp			| stack adjust count
447	jra	rei			| all done
448
449/*
450 * Trap 0 is for system calls
451 */
452	.globl	_syscall
453_trap0:
454	clrl	sp@-			| stack adjust count
455	moveml	#0xFFFF,sp@-		| save user registers
456	movl	usp,a0			| save the user SP
457	movl	a0,sp@(FR_SP)		|   in the savearea
458	movl	d0,sp@-			| push syscall number
459	jbsr	_syscall		| handle it
460	addql	#4,sp			| pop syscall arg
461	movl	sp@(FR_SP),a0		| grab and restore
462	movl	a0,usp			|   user SP
463	moveml	sp@+,#0x7FFF		| restore most registers
464	addql	#8,sp			| pop SP and stack adjust
465	jra	rei			| all done
466
467/*
468 * Trap 1 is either:
469 * sigreturn (native NetBSD executable)
470 * breakpoint (HPUX executable)
471 */
472_trap1:
473#if 0 /* COMPAT_HPUX */
474	/* If process is HPUX, this is a user breakpoint. */
475	jne	trap15			| breakpoint
476#endif
477	/* fall into sigreturn */
478
479/*
480 * The sigreturn() syscall comes here.  It requires special handling
481 * because we must open a hole in the stack to fill in the (possibly much
482 * larger) original stack frame.
483 */
484sigreturn:
485	lea	sp@(-84),sp		| leave enough space for largest frame
486	movl	sp@(84),sp@		| move up current 8 byte frame
487	movl	sp@(88),sp@(4)
488	movl	#84,sp@-		| default: adjust by 84 bytes
489	moveml	#0xFFFF,sp@-		| save user registers
490	movl	usp,a0			| save the user SP
491	movl	a0,sp@(FR_SP)		|   in the savearea
492	movl	#SYS_sigreturn,sp@-	| push syscall number
493	jbsr	_syscall		| handle it
494	addql	#4,sp			| pop syscall#
495	movl	sp@(FR_SP),a0		| grab and restore
496	movl	a0,usp			|   user SP
497	lea	sp@(FR_HW),a1		| pointer to HW frame
498	movw	sp@(FR_ADJ),d0		| do we need to adjust the stack?
499	jeq	Lsigr1			| no, just continue
500	moveq	#92,d1			| total size
501	subw	d0,d1			|  - hole size = frame size
502	lea	a1@(92),a0		| destination
503	addw	d1,a1			| source
504	lsrw	#1,d1			| convert to word count
505	subqw	#1,d1			| minus 1 for dbf
506Lsigrlp:
507	movw	a1@-,a0@-		| copy a word
508	dbf	d1,Lsigrlp		| continue
509	movl	a0,a1			| new HW frame base
510Lsigr1:
511	movl	a1,sp@(FR_SP)		| new SP value
512	moveml	sp@+,#0x7FFF		| restore user registers
513	movl	sp@,sp			| and our SP
514	jra	rei			| all done
515
516/*
517 * Trap 2 is one of:
518 * NetBSD: not used (ignore)
519 * SunOS:  Some obscure FPU operation
520 * HPUX:   sigreturn
521 */
522_trap2:
523#if 0 /* COMPAT_HPUX */
524	/* XXX:	If HPUX, this is a user breakpoint. */
525	jne	sigreturn
526#endif
527	/* fall into trace (NetBSD or SunOS) */
528
529/*
530 * Trace (single-step) trap.  Kernel-mode is special.
531 * User mode traps are simply passed on to trap().
532 */
533_trace:
534	clrl	sp@-			| stack adjust count
535	moveml	#0xFFFF,sp@-
536	moveq	#T_TRACE,d0
537	movw	sp@(FR_HW),d1		| get PSW
538	andw	#PSL_S,d1		| from system mode?
539	jne	kbrkpt			| yes, kernel breakpoint
540	jra	fault			| no, user-mode fault
541
542/*
543 * Trap 15 is used for:
544 *	- GDB breakpoints (in user programs)
545 *	- KGDB breakpoints (in the kernel)
546 *	- trace traps for SUN binaries (not fully supported yet)
547 * User mode traps are passed simply passed to trap()
548 */
549_trap15:
550	clrl	sp@-			| stack adjust count
551	moveml	#0xFFFF,sp@-
552	moveq	#T_TRAP15,d0
553	movw	sp@(FR_HW),d1		| get PSW
554	andw	#PSL_S,d1		| from system mode?
555	jne	kbrkpt			| yes, kernel breakpoint
556	jra	fault			| no, user-mode fault
557
558kbrkpt:	| Kernel-mode breakpoint or trace trap. (d0=trap_type)
559	| Save the system sp rather than the user sp.
560	movw	#PSL_HIGHIPL,sr		| lock out interrupts
561	lea	sp@(FR_SIZE),a6		| Save stack pointer
562	movl	a6,sp@(FR_SP)		|  from before trap
563
564	| If we are not on tmpstk switch to it.
565	| (so debugger can change the stack pointer)
566	movl	a6,d1
567	cmpl	#tmpstk,d1
568	jls	Lbrkpt2 		| already on tmpstk
569	| Copy frame to the temporary stack
570	movl	sp,a0			| a0=src
571	lea	tmpstk-96,a1		| a1=dst
572	movl	a1,sp			| sp=new frame
573	moveq	#FR_SIZE,d1
574Lbrkpt1:
575	movl	a0@+,a1@+
576	subql	#4,d1
577	bgt	Lbrkpt1
578
579Lbrkpt2:
580	| Call the special kernel debugger trap handler.
581	| Do not call trap() to handle it, so that we can
582	| set breakpoints in trap() if we want.  We know
583	| the trap type is either T_TRACE or T_BREAKPOINT.
584	movl	d0,sp@-			| push trap type
585	jbsr	_trap_kdebug
586	addql	#4,sp			| pop args
587
588	| The stack pointer may have been modified, or
589	| data below it modified (by kgdb push call),
590	| so push the hardware frame at the current sp
591	| before restoring registers and returning.
592	movl	sp@(FR_SP),a0		| modified sp
593	lea	sp@(FR_SIZE),a1		| end of our frame
594	movl	a1@-,a0@-		| copy 2 longs with
595	movl	a1@-,a0@-		| ... predecrement
596	movl	a0,sp@(FR_SP)		| sp = h/w frame
597	moveml	sp@+,#0x7FFF		| restore all but sp
598	movl	sp@,sp			| ... and sp
599	rte				| all done
600
601/*
602 * Trap 12 is the entry point for the cachectl "syscall"
603 *	cachectl(command, addr, length)
604 * command in d0, addr in a1, length in d1
605 */
606	.globl	_cachectl
607_trap12:
608	movl	d1,sp@-			| push length
609	movl	a1,sp@-			| push addr
610	movl	d0,sp@-			| push command
611	jbsr	_cachectl		| do it
612	lea	sp@(12),sp		| pop args
613	jra	rei			| all done
614
615/*
616 * Interrupt handlers.  Most are auto-vectored,
617 * and hard-wired the same way on all sun3 models.
618 * Format in the stack is:
619 *   d0,d1,a0,a1, sr, pc, vo
620 */
621
622#define INTERRUPT_SAVEREG \
623	moveml	#0xC0C0,sp@-
624
625#define INTERRUPT_RESTORE \
626	moveml	sp@+,#0x0303
627
628/*
629 * This is the common auto-vector interrupt handler,
630 * for which the CPU provides the vector=0x18+level.
631 * These are installed in the interrupt vector table.
632 */
633	.align	2
634	.globl	__isr_autovec, _isr_autovec
635__isr_autovec:
636	INTERRUPT_SAVEREG
637	jbsr	_isr_autovec
638	INTERRUPT_RESTORE
639	jra	rei
640
641/* clock: see clock.c */
642	.align	2
643	.globl	__isr_clock, _clock_intr
644__isr_clock:
645	INTERRUPT_SAVEREG
646	jbsr	_clock_intr
647	INTERRUPT_RESTORE
648	jra	rei
649
650| Handler for all vectored interrupts (i.e. VME interrupts)
651	.align	2
652	.globl	__isr_vectored, _isr_vectored
653__isr_vectored:
654	INTERRUPT_SAVEREG
655	jbsr	_isr_vectored
656	INTERRUPT_RESTORE
657	jra	rei
658
659#undef	INTERRUPT_SAVEREG
660#undef	INTERRUPT_RESTORE
661
662/* interrupt counters (needed by vmstat) */
663	.globl	_intrcnt,_eintrcnt,_intrnames,_eintrnames
664_intrnames:
665	.asciz	"spur"	| 0
666	.asciz	"lev1"	| 1
667	.asciz	"lev2"	| 2
668	.asciz	"lev3"	| 3
669	.asciz	"lev4"	| 4
670	.asciz	"clock"	| 5
671	.asciz	"lev6"	| 6
672	.asciz	"nmi"	| 7
673_eintrnames:
674
675	.data
676	.even
677_intrcnt:
678	.long	0,0,0,0,0,0,0,0,0,0
679_eintrcnt:
680	.text
681
682/*
683 * Emulation of VAX REI instruction.
684 *
685 * This code is (mostly) un-altered from the hp300 code,
686 * except that sun machines do not need a simulated SIR
687 * because they have a real software interrupt register.
688 *
689 * This code deals with checking for and servicing ASTs
690 * (profiling, scheduling) and software interrupts (network, softclock).
691 * We check for ASTs first, just like the VAX.  To avoid excess overhead
692 * the T_ASTFLT handling code will also check for software interrupts so we
693 * do not have to do it here.  After identifying that we need an AST we
694 * drop the IPL to allow device interrupts.
695 *
696 * This code is complicated by the fact that sendsig may have been called
697 * necessitating a stack cleanup.
698 */
699
700	.globl	_astpending
701	.globl	rei
702rei:
703#ifdef	DIAGNOSTIC
704	tstl	_panicstr		| have we paniced?
705	jne	Ldorte			| yes, do not make matters worse
706#endif
707	tstl	_astpending		| AST pending?
708	jeq	Ldorte			| no, done
709Lrei1:
710	btst	#5,sp@			| yes, are we returning to user mode?
711	jne	Ldorte			| no, done
712	movw	#PSL_LOWIPL,sr		| lower SPL
713	clrl	sp@-			| stack adjust
714	moveml	#0xFFFF,sp@-		| save all registers
715	movl	usp,a1			| including
716	movl	a1,sp@(FR_SP)		|    the users SP
717	clrl	sp@-			| VA == none
718	clrl	sp@-			| code == none
719	movl	#T_ASTFLT,sp@-		| type == async system trap
720	jbsr	_trap			| go handle it
721	lea	sp@(12),sp		| pop value args
722	movl	sp@(FR_SP),a0		| restore user SP
723	movl	a0,usp			|   from save area
724	movw	sp@(FR_ADJ),d0		| need to adjust stack?
725	jne	Laststkadj		| yes, go to it
726	moveml	sp@+,#0x7FFF		| no, restore most user regs
727	addql	#8,sp			| toss SP and stack adjust
728	rte				| and do real RTE
729Laststkadj:
730	lea	sp@(FR_HW),a1		| pointer to HW frame
731	addql	#8,a1			| source pointer
732	movl	a1,a0			| source
733	addw	d0,a0			|  + hole size = dest pointer
734	movl	a1@-,a0@-		| copy
735	movl	a1@-,a0@-		|  8 bytes
736	movl	a0,sp@(FR_SP)		| new SSP
737	moveml	sp@+,#0x7FFF		| restore user registers
738	movl	sp@,sp			| and our SP
739Ldorte:
740	rte				| real return
741
742/*
743 * Initialization is at the beginning of this file, because the
744 * kernel entry point needs to be at zero for compatibility with
745 * the Sun boot loader.  This works on Sun machines because the
746 * interrupt vector table for reset is NOT at address zero.
747 * (The MMU has a "boot" bit that forces access to the PROM)
748 */
749
750/*
751 * Signal "trampoline" code (18 bytes).  Invoked from RTE setup by sendsig().
752 *
753 * Stack looks like:
754 *
755 *	sp+0 ->	signal number
756 *	sp+4	signal specific code
757 *	sp+8	pointer to signal context frame (scp)
758 *	sp+12	address of handler
759 *	sp+16	saved hardware state
760 *			.
761 *			.
762 *	scp+0->	beginning of signal context frame
763 */
764	.globl	_sigcode, _esigcode
765	.data
766	.align	2
767_sigcode:	/* Found at address: 0x0DFFffdc */
768	movl	sp@(12),a0		| signal handler addr	(4 bytes)
769	jsr	a0@			| call signal handler	(2 bytes)
770	addql	#4,sp			| pop signo		(2 bytes)
771	trap	#1			| special syscall entry	(2 bytes)
772	movl	d0,sp@(4)		| save errno		(4 bytes)
773	moveq	#1,d0			| syscall == exit	(2 bytes)
774	trap	#0			| exit(errno)		(2 bytes)
775	.align	2
776_esigcode:
777	.text
778
779/* XXX - hp300 still has icode here... */
780
781/*
782 * Primitives
783 */
784#include <machine/asm.h>
785
786/*
787 * non-local gotos
788 */
789ENTRY(setjmp)
790	movl	sp@(4),a0	| savearea pointer
791	moveml	#0xFCFC,a0@	| save d2-d7/a2-a7
792	movl	sp@,a0@(48)	| and return address
793	moveq	#0,d0		| return 0
794	rts
795
796ENTRY(longjmp)
797	movl	sp@(4),a0
798	moveml	a0@+,#0xFCFC
799	movl	a0@,sp@
800	moveq	#1,d0
801	rts
802
803/*
804 * The following primitives manipulate the run queues.
805 * _whichqs tells which of the 32 queues _qs have processes in them.
806 * Setrunqueue puts processes into queues, Remrunqueue removes them
807 * from queues.  The running process is on no queue, other processes
808 * are on a queue related to p->p_priority, divided by 4 actually to
809 * shrink the 0-127 range of priorities into the 32 available queues.
810 */
811
812	.globl	_whichqs,_qs,_cnt,_panic
813	.globl	_curproc
814	.comm	_want_resched,4
815
816/*
817 * setrunqueue(p)
818 *
819 * Call should be made at splclock(), and p->p_stat should be SRUN
820 */
821ENTRY(setrunqueue)
822	movl	sp@(4),a0
823#ifdef DIAGNOSTIC
824	tstl	a0@(P_BACK)
825	jne	Lset1
826	tstl	a0@(P_WCHAN)
827	jne	Lset1
828	cmpb	#SRUN,a0@(P_STAT)
829	jne	Lset1
830#endif
831	clrl	d0
832	movb	a0@(P_PRIORITY),d0
833	lsrb	#2,d0
834	movl	_whichqs,d1
835	bset	d0,d1
836	movl	d1,_whichqs
837	lslb	#3,d0
838	addl	#_qs,d0
839	movl	d0,a0@(P_FORW)
840	movl	d0,a1
841	movl	a1@(P_BACK),a0@(P_BACK)
842	movl	a0,a1@(P_BACK)
843	movl	a0@(P_BACK),a1
844	movl	a0,a1@(P_FORW)
845	rts
846#ifdef DIAGNOSTIC
847Lset1:
848	movl	#Lset2,sp@-
849	jbsr	_panic
850Lset2:
851	.asciz	"setrunqueue"
852	.even
853#endif
854
855/*
856 * remrunqueue(p)
857 *
858 * Call should be made at splclock().
859 */
860ENTRY(remrunqueue)
861	movl	sp@(4),a0		| proc *p
862	clrl	d0
863	movb	a0@(P_PRIORITY),d0
864	lsrb	#2,d0
865	movl	_whichqs,d1
866	bclr	d0,d1			| if ((d1 & (1 << d0)) == 0)
867	jeq	Lrem2			|   panic (empty queue)
868	movl	d1,_whichqs
869	movl	a0@(P_FORW),a1
870	movl	a0@(P_BACK),a1@(P_BACK)
871	movl	a0@(P_BACK),a1
872	movl	a0@(P_FORW),a1@(P_FORW)
873	movl	#_qs,a1
874	movl	d0,d1
875	lslb	#3,d1
876	addl	d1,a1
877	cmpl	a1@(P_FORW),a1
878	jeq	Lrem1
879	movl	_whichqs,d1
880	bset	d0,d1
881	movl	d1,_whichqs
882Lrem1:
883	clrl	a0@(P_BACK)
884	rts
885Lrem2:
886	movl	#Lrem3,sp@-
887	jbsr	_panic
888Lrem3:
889	.asciz	"remrunqueue"
890	.even
891
892| Message for Lbadsw panic
893Lsw0:
894	.asciz	"cpu_switch"
895	.even
896
897	.globl	_curpcb
898	.globl	_masterpaddr	| XXX compatibility (debuggers)
899	.data
900_masterpaddr:			| XXX compatibility (debuggers)
901_curpcb:
902	.long	0
903	.comm	nullpcb,SIZEOF_PCB
904	.text
905
906/*
907 * At exit of a process, do a cpu_switch for the last time.
908 * Switch to a safe stack and PCB, and deallocate the process's resources.
909 * The ipl is high enough to prevent the memory from being reallocated.
910 */
911ENTRY(switch_exit)
912	movl	sp@(4),a0		| struct proc *p
913	movl	#nullpcb,_curpcb	| save state into garbage pcb
914	lea	tmpstk,sp		| goto a tmp stack
915	movl	a0,sp@-			| pass proc ptr down
916
917	/* Free old process's u-area. */
918	movl	#USPACE,sp@-		| size of u-area
919	movl	a0@(P_ADDR),sp@-	| address of process's u-area
920	movl	_kernel_map,sp@-	| map it was allocated in
921	jbsr	_kmem_free		| deallocate it
922	lea	sp@(12),sp		| pop args
923
924	jra	_cpu_switch
925
926/*
927 * When no processes are on the runq, cpu_switch() branches to idle
928 * to wait for something to come ready.
929 */
930	.data
931	.globl _Idle_count
932_Idle_count:
933	.long	0
934	.text
935
936	.globl	Idle
937Lidle:
938	stop	#PSL_LOWIPL
939Idle:
940	movw	#PSL_HIGHIPL,sr
941	addql	#1, _Idle_count
942	tstl	_whichqs
943	jeq	Lidle
944	movw	#PSL_LOWIPL,sr
945	jra	Lsw1
946
947Lbadsw:
948	movl	#Lsw0,sp@-
949	jbsr	_panic
950	/*NOTREACHED*/
951
952/*
953 * cpu_switch()
954 * Hacked for sun3
955 * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
956 * XXX - Sould we use p->p_addr instead of curpcb? -gwr
957 */
958ENTRY(cpu_switch)
959	movl	_curpcb,a1		| current pcb
960	movw	sr,a1@(PCB_PS)		| save sr before changing ipl
961#ifdef notyet
962	movl	_curproc,sp@-		| remember last proc running
963#endif
964	clrl	_curproc
965
966Lsw1:
967	/*
968	 * Find the highest-priority queue that isn't empty,
969	 * then take the first proc from that queue.
970	 */
971	clrl	d0
972	lea	_whichqs,a0
973	movl	a0@,d1
974Lswchk:
975	btst	d0,d1
976	jne	Lswfnd
977	addqb	#1,d0
978	cmpb	#32,d0
979	jne	Lswchk
980	jra	Idle
981Lswfnd:
982	movw	#PSL_HIGHIPL,sr		| lock out interrupts
983	movl	a0@,d1			| and check again...
984	bclr	d0,d1
985	jeq	Lsw1			| proc moved, rescan
986	movl	d1,a0@			| update whichqs
987	moveq	#1,d1			| double check for higher priority
988	lsll	d0,d1			| process (which may have snuck in
989	subql	#1,d1			| while we were finding this one)
990	andl	a0@,d1
991	jeq	Lswok			| no one got in, continue
992	movl	a0@,d1
993	bset	d0,d1			| otherwise put this one back
994	movl	d1,a0@
995	jra	Lsw1			| and rescan
996Lswok:
997	movl	d0,d1
998	lslb	#3,d1			| convert queue number to index
999	addl	#_qs,d1			| locate queue (q)
1000	movl	d1,a1
1001	cmpl	a1@(P_FORW),a1		| anyone on queue?
1002	jeq	Lbadsw			| no, panic
1003	movl	a1@(P_FORW),a0		| p = q->p_forw
1004	movl	a0@(P_FORW),a1@(P_FORW)	| q->p_forw = p->p_forw
1005	movl	a0@(P_FORW),a1		| q = p->p_forw
1006	movl	a0@(P_BACK),a1@(P_BACK)	| q->p_back = p->p_back
1007	cmpl	a0@(P_FORW),d1		| anyone left on queue?
1008	jeq	Lsw2			| no, skip
1009	movl	_whichqs,d1
1010	bset	d0,d1			| yes, reset bit
1011	movl	d1,_whichqs
1012Lsw2:
1013	movl	a0,_curproc
1014	clrl	_want_resched
1015#ifdef notyet
1016	movl	sp@+,a1			| XXX - Make this work!
1017	cmpl	a0,a1			| switching to same proc?
1018	jeq	Lswdone			| yes, skip save and restore
1019#endif
1020	/*
1021	 * Save state of previous process in its pcb.
1022	 */
1023	movl	_curpcb,a1
1024	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
1025	movl	usp,a2			| grab USP (a2 has been saved)
1026	movl	a2,a1@(PCB_USP)		| and save it
1027
1028	tstl	_fpu_type		| Do we have an fpu?
1029	jeq	Lswnofpsave		| No?  Then don't try save.
1030	lea	a1@(PCB_FPCTX),a2	| pointer to FP save area
1031	fsave	a2@			| save FP state
1032	tstb	a2@			| null state frame?
1033	jeq	Lswnofpsave		| yes, all done
1034	fmovem	fp0-fp7,a2@(FPF_REGS)		| save FP general regs
1035	fmovem	fpcr/fpsr/fpi,a2@(FPF_FPCR)	| save FP control regs
1036Lswnofpsave:
1037
1038	/*
1039	 * Now that we have saved all the registers that must be
1040	 * preserved, we are free to use those registers until
1041	 * we load the registers for the switched-to process.
1042	 * In this section, keep:  a0=curproc, a1=curpcb
1043	 */
1044
1045#ifdef DIAGNOSTIC
1046	tstl	a0@(P_WCHAN)
1047	jne	Lbadsw
1048	cmpb	#SRUN,a0@(P_STAT)
1049	jne	Lbadsw
1050#endif
1051	clrl	a0@(P_BACK)		| clear back link
1052	movl	a0@(P_ADDR),a1		| get p_addr
1053	movl	a1,_curpcb
1054
1055	/*
1056	 * Load the new VM context (new MMU root pointer)
1057	 */
1058	movl	a0@(P_VMSPACE),a2	| vm = p->p_vmspace
1059#ifdef DIAGNOSTIC
1060	tstl	a2			| map == VM_MAP_NULL?
1061	jeq	Lbadsw			| panic
1062#endif
1063#ifdef PMAP_DEBUG
1064	/*
1065	 * Just call pmap_activate() for now.  Later on,
1066	 * use the in-line version below (for speed).
1067	 */
1068	lea	a2@(VM_PMAP),a2 	| pmap = &vmspace.vm_pmap
1069	pea	a2@			| push pmap
1070	jbsr	_pmap_activate		| pmap_activate(pmap)
1071	addql	#4,sp
1072	movl	_curpcb,a1		| restore p_addr
1073#else
1074	/* XXX - Later, use this inline version. */
1075	/* Just load the new CPU Root Pointer (MMU) */
1076	lea	_kernel_crp, a3		| our CPU Root Ptr. (CRP)
1077	lea	a2@(VM_PMAP),a2 	| pmap = &vmspace.vm_pmap
1078	movl	a2@(PM_A_PHYS),d0	| phys = pmap->pm_a_phys
1079	cmpl	a3(4),d0		|  == kernel_crp.rp_addr ?
1080	jeq	Lsame_mmuctx		| skip loadcrp/flush
1081	/* OK, it is a new MMU context.  Load it up. */
1082	movl	d0,a3(4)
1083	movl	#CACHE_CLR,d0
1084	movc	d0,cacr			| invalidate cache(s)
1085	pflusha				| flush entire TLB
1086	pmove	a3@,crp			| load new user root pointer
1087Lsame_mmuctx:
1088#endif
1089
1090	/*
1091	 * Reload the registers for the new process.
1092	 * After this point we can only use d0,d1,a0,a1
1093	 */
1094	moveml	a1@(PCB_REGS),#0xFCFC	| reload registers
1095	movl	a1@(PCB_USP),a0
1096	movl	a0,usp			| and USP
1097
1098	tstl	_fpu_type		| If we don't have an fpu,
1099	jeq	Lres_skip		|  don't try to restore it.
1100	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
1101	tstb	a0@			| null state frame?
1102	jeq	Lresfprest		| yes, easy
1103	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
1104	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
1105Lresfprest:
1106	frestore a0@			| restore state
1107Lres_skip:
1108	movw	a1@(PCB_PS),d0		| no, restore PS
1109#ifdef DIAGNOSTIC
1110	btst	#13,d0			| supervisor mode?
1111	jeq	Lbadsw			| no? panic!
1112#endif
1113	movw	d0,sr			| OK, restore PS
1114	moveq	#1,d0			| return 1 (for alternate returns)
1115	rts
1116
1117/*
1118 * savectx(pcb)
1119 * Update pcb, saving current processor state.
1120 */
1121ENTRY(savectx)
1122	movl	sp@(4),a1
1123	movw	sr,a1@(PCB_PS)
1124	movl	usp,a0			| grab USP
1125	movl	a0,a1@(PCB_USP)		| and save it
1126	moveml	#0xFCFC,a1@(PCB_REGS)	| save non-scratch registers
1127
1128	tstl	_fpu_type		| Do we have FPU?
1129	jeq	Lsavedone		| No?  Then don't save state.
1130	lea	a1@(PCB_FPCTX),a0	| pointer to FP save area
1131	fsave	a0@			| save FP state
1132	tstb	a0@			| null state frame?
1133	jeq	Lsavedone		| yes, all done
1134	fmovem	fp0-fp7,a0@(FPF_REGS)		| save FP general regs
1135	fmovem	fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
1136Lsavedone:
1137	moveq	#0,d0			| return 0
1138	rts
1139
1140/* suline() `040 only */
1141
1142#ifdef DEBUG
1143	.data
1144	.globl	fulltflush, fullcflush
1145fulltflush:
1146	.long	0
1147fullcflush:
1148	.long	0
1149	.text
1150#endif
1151
1152/*
1153 * Invalidate entire TLB.
1154 */
1155ENTRY(TBIA)
1156__TBIA:
1157	pflusha
1158	movl	#DC_CLEAR,d0
1159	movc	d0,cacr			| invalidate on-chip d-cache
1160	rts
1161
1162/*
1163 * Invalidate any TLB entry for given VA (TB Invalidate Single)
1164 */
1165ENTRY(TBIS)
1166#ifdef DEBUG
1167	tstl	fulltflush		| being conservative?
1168	jne	__TBIA			| yes, flush entire TLB
1169#endif
1170	movl	sp@(4),a0
1171	pflush	#0,#0,a0@		| flush address from both sides
1172	movl	#DC_CLEAR,d0
1173	movc	d0,cacr			| invalidate on-chip data cache
1174	rts
1175
1176/*
1177 * Invalidate supervisor side of TLB
1178 */
1179ENTRY(TBIAS)
1180#ifdef DEBUG
1181	tstl	fulltflush		| being conservative?
1182	jne	__TBIA			| yes, flush everything
1183#endif
1184	pflush	#4,#4			| flush supervisor TLB entries
1185	movl	#DC_CLEAR,d0
1186	movc	d0,cacr			| invalidate on-chip d-cache
1187	rts
1188
1189/*
1190 * Invalidate user side of TLB
1191 */
1192ENTRY(TBIAU)
1193#ifdef DEBUG
1194	tstl	fulltflush		| being conservative?
1195	jne	__TBIA			| yes, flush everything
1196#endif
1197	pflush	#0,#4			| flush user TLB entries
1198	movl	#DC_CLEAR,d0
1199	movc	d0,cacr			| invalidate on-chip d-cache
1200	rts
1201
1202/*
1203 * Invalidate instruction cache
1204 */
1205ENTRY(ICIA)
1206	movl	#IC_CLEAR,d0
1207	movc	d0,cacr			| invalidate i-cache
1208	rts
1209
1210/*
1211 * Invalidate data cache.
1212 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
1213 * problems with DC_WA.  The only cases we have to worry about are context
1214 * switch and TLB changes, both of which are handled "in-line" in resume
1215 * and TBI*.
1216 */
1217ENTRY(DCIA)
1218__DCIA:
1219	rts
1220
1221ENTRY(DCIS)
1222__DCIS:
1223	rts
1224
1225/*
1226 * Invalidate data cache.
1227 */
1228ENTRY(DCIU)
1229	rts
1230
1231/* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
1232
1233ENTRY(PCIA)
1234	movl	#DC_CLEAR,d0
1235	movc	d0,cacr			| invalidate on-chip d-cache
1236	rts
1237
1238ENTRY(ecacheon)
1239	rts
1240
1241ENTRY(ecacheoff)
1242	rts
1243
1244/*
1245 * Get callers current SP value.
1246 * Note that simply taking the address of a local variable in a C function
1247 * doesn't work because callee saved registers may be outside the stack frame
1248 * defined by A6 (e.g. GCC generated code).
1249 *
1250 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
1251 */
1252	.globl	_getsp
1253_getsp:
1254	movl	sp,d0			| get current SP
1255	addql	#4,d0			| compensate for return address
1256	rts
1257
1258ENTRY(getsfc)
1259	movc	sfc,d0
1260	rts
1261
1262ENTRY(getdfc)
1263	movc	dfc,d0
1264	rts
1265
1266ENTRY(getvbr)
1267	movc vbr, d0
1268	rts
1269
1270ENTRY(setvbr)
1271	movl sp@(4), d0
1272	movc d0, vbr
1273	rts
1274
1275/*
1276 * Load a new CPU Root Pointer (CRP) into the MMU.
1277 *	void	loadcrp(struct mmu_rootptr *);
1278 */
1279ENTRY(loadcrp)
1280	movl	sp@(4),a0		| arg1: &CRP
1281	movl	#CACHE_CLR,d0
1282	movc	d0,cacr			| invalidate cache(s)
1283	pflusha				| flush entire TLB
1284	pmove	a0@,crp			| load new user root pointer
1285	rts
1286
1287/*
1288 * Set processor priority level calls.  Most are implemented with
1289 * inline asm expansions.  However, we need one instantiation here
1290 * in case some non-optimized code makes external references.
1291 * Most places will use the inlined function param.h supplies.
1292 */
1293
1294ENTRY(_spl)
1295	movl	sp@(4),d1
1296	clrl	d0
1297	movw	sr,d0
1298	movw	d1,sr
1299	rts
1300
1301ENTRY(getsr)
1302	moveq	#0, d0
1303	movw	sr, d0
1304	rts
1305
1306ENTRY(_insque)
1307	movw	sr,d0
1308	movw	#PSL_HIGHIPL,sr		| atomic
1309	movl	sp@(8),a0		| where to insert (after)
1310	movl	sp@(4),a1		| element to insert (e)
1311	movl	a0@,a1@			| e->next = after->next
1312	movl	a0,a1@(4)		| e->prev = after
1313	movl	a1,a0@			| after->next = e
1314	movl	a1@,a0
1315	movl	a1,a0@(4)		| e->next->prev = e
1316	movw	d0,sr
1317	rts
1318
1319ENTRY(_remque)
1320	movw	sr,d0
1321	movw	#PSL_HIGHIPL,sr		| atomic
1322	movl	sp@(4),a0		| element to remove (e)
1323	movl	a0@,a1
1324	movl	a0@(4),a0
1325	movl	a0,a1@(4)		| e->next->prev = e->prev
1326	movl	a1,a0@			| e->prev->next = e->next
1327	movw	d0,sr
1328	rts
1329
1330/*
1331 * Save and restore 68881 state.
1332 */
1333ENTRY(m68881_save)
1334	movl	sp@(4),a0		| save area pointer
1335	fsave	a0@			| save state
1336	tstb	a0@			| null state frame?
1337	jeq	Lm68881sdone		| yes, all done
1338	fmovem fp0-fp7,a0@(FPF_REGS)		| save FP general regs
1339	fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR)	| save FP control regs
1340Lm68881sdone:
1341	rts
1342
1343ENTRY(m68881_restore)
1344	movl	sp@(4),a0		| save area pointer
1345	tstb	a0@			| null state frame?
1346	jeq	Lm68881rdone		| yes, easy
1347	fmovem	a0@(FPF_FPCR),fpcr/fpsr/fpi	| restore FP control regs
1348	fmovem	a0@(FPF_REGS),fp0-fp7		| restore FP general regs
1349Lm68881rdone:
1350	frestore a0@			| restore state
1351	rts
1352
1353/*
1354 * _delay(unsigned N)
1355 * Delay for at least (N/256) microseconds.
1356 * This routine depends on the variable:  delay_divisor
1357 * which should be set based on the CPU clock rate.
1358 * XXX: Currently this is set in sun3_startup.c based on the
1359 * XXX: CPU model but this should be determined at run time...
1360 */
1361	.globl	__delay
1362__delay:
1363	| d0 = arg = (usecs << 8)
1364	movl	sp@(4),d0
1365	| d1 = delay_divisor;
1366	movl	_delay_divisor,d1
1367L_delay:
1368	subl	d1,d0
1369	jgt	L_delay
1370	rts
1371
1372
1373| Define some addresses, mostly so DDB can print useful info.
1374	.globl	_kernbase
1375	.set	_kernbase,KERNBASE
1376	.globl	_dvma_base
1377	.set	_dvma_base,DVMA_SPACE_START
1378	.globl	_prom_start
1379	.set	_prom_start,MONSTART
1380	.globl	_prom_base
1381	.set	_prom_base,PROM_BASE
1382
1383|The end!
1384