locore.s revision 1.74
1/*	$NetBSD: locore.s,v 1.74 2024/01/14 22:34:54 thorpej Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
37 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
38 */
39
40#include "opt_compat_netbsd.h"
41#include "opt_compat_sunos.h"
42#include "opt_kgdb.h"
43#include "opt_lockdebug.h"
44
45#include "assym.h"
46#include <machine/asm.h>
47#include <machine/trap.h>
48
49| Remember this is a fun project!
50
51	.data
52GLOBAL(mon_crp)
53	.long	0,0
54
55| This is for kvm_mkdb, and should be the address of the beginning
56| of the kernel text segment (not necessarily the same as kernbase).
57	.text
58GLOBAL(kernel_text)
59
60| This is the entry point, as well as the end of the temporary stack
61| used during process switch (one 8K page ending at start)
62ASGLOBAL(tmpstk)
63ASGLOBAL(start)
64
65| The first step, after disabling interrupts, is to map enough of the kernel
66| into high virtual address space so that we can use position dependent code.
67| This is a tricky task on the sun3x because the MMU is already enabled and
68| the ROM monitor provides no indication of where the root MMU table is mapped.
69| Therefore we must use one of the 68030's 'transparent translation' registers
70| to define a range in the address space where the MMU translation is
71| turned off.  Once this is complete we can modify the MMU table directly
72| without the need for it to be mapped into virtual memory.
73| All code must be position independent until otherwise noted, as the
74| boot loader has loaded us into low memory but all the symbols in this
75| code have been linked high.
76	movw	#PSL_HIGHIPL,%sr	| no interrupts
77	movl	#KERNBASE3X,%a5		| for vtop conversion
78	lea	_C_LABEL(mon_crp),%a0	| where to store the CRP
79	subl	%a5,%a0
80	| Note: borrowing mon_crp for tt0 setup...
81	movl	#0x3F8107,%a0@		| map the low 1GB v=p with the
82	.long	0xf0100800		| transparent translation reg0
83					| [ pmove a0@, tt0 ]
84| In order to map the kernel into high memory we will copy the root table
85| entry which maps the 16 megabytes of memory starting at 0x0 into the
86| entry which maps the 16 megabytes starting at KERNBASE.
87	pmove	%crp,%a0@		| Get monitor CPU root pointer
88	movl	%a0@(4),%a1		| 2nd word is PA of level A table
89
90	movl	%a1,%a0			| compute the descriptor address
91	addl	#0x3e0,%a1		| for VA starting at KERNBASE
92	movl	%a0@,%a1@		| copy descriptor type
93	movl	%a0@(4),%a1@(4)		| copy physical address
94
95| Kernel is now double mapped at zero and KERNBASE.
96| Force a long jump to the relocated code (high VA).
97	movl	#IC_CLEAR,%d0		| Flush the I-cache
98	movc	%d0,%cacr
99	jmp L_high_code:l		| long jump
100
101L_high_code:
102| We are now running in the correctly relocated kernel, so
103| we are no longer restricted to position-independent code.
104| It is handy to leave transparent translation enabled while
105| for the low 1GB while _bootstrap() is doing its thing.
106
107| Do bootstrap stuff needed before main() gets called.
108| Our boot loader leaves a copy of the kernel's exec header
109| just before the start of the kernel text segment, so the
110| kernel can sanity-check the DDB symbols at [end...esym].
111| Pass the struct exec at tmpstk-32 to _bootstrap().
112| Also, make sure the initial frame pointer is zero so that
113| the backtrace algorithm used by KGDB terminates nicely.
114	lea	_ASM_LABEL(tmpstk)-32,%sp
115	movl	#0,%a6
116	jsr	_C_LABEL(_bootstrap)	| See locore2.c
117
118| Now turn off the transparent translation of the low 1GB.
119| (this also flushes the ATC)
120	clrl	%sp@-
121	.long	0xf0170800		| pmove	sp@,tt0
122	addql	#4,%sp
123
124| Now that _bootstrap() is done using the PROM functions,
125| we can safely set the sfc/dfc to something != FC_CONTROL
126	moveq	#FC_USERD,%d0		| make movs access "user data"
127	movc	%d0,%sfc		| space for copyin/copyout
128	movc	%d0,%dfc
129
130| Setup process zero user/kernel stacks.
131	lea	_C_LABEL(lwp0),%a0	| get lwp0
132	movl	%a0@(L_PCB),%a1		| XXXuvm_lwp_getuarea
133	lea	%a1@(USPACE-4),%sp	| set SSP to last word
134	movl	#USRSTACK3X-4,%a2
135	movl	%a2,%usp		| init user SP
136
137| Note curpcb was already set in _bootstrap().
138| Will do fpu initialization during autoconfig (see fpu.c)
139| The interrupt vector table and stack are now ready.
140| Interrupts will be enabled later, AFTER  autoconfiguration
141| is finished, to avoid spurrious interrupts.
142
143/*
144 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
145 * main() nevers returns; we exit to user mode from a forked process
146 * later on.
147 */
148	clrw	%sp@-			| tf_format,tf_vector
149	clrl	%sp@-			| tf_pc (filled in later)
150	movw	#PSL_USER,%sp@-		| tf_sr for user mode
151	clrl	%sp@-			| tf_stackadj
152	lea	%sp@(-64),%sp		| tf_regs[16]
153	movl	%a1,%a0@(L_MD_REGS)	| lwp0.l_md.md_regs = trapframe
154	jbsr	_C_LABEL(main)		| main(&trapframe)
155	PANIC("main() returned")
156
157| That is all the assembly startup code we need on the sun3x!
158| The rest of this is like the hp300/locore.s where possible.
159
160/*
161 * Trap/interrupt vector routines
162 */
163#include <m68k/m68k/trap_subr.s>
164
165GLOBAL(buserr)
166	tstl	_C_LABEL(nofault)	| device probe?
167	jeq	_C_LABEL(addrerr)	| no, handle as usual
168	movl	_C_LABEL(nofault),%sp@-	| yes,
169	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
170GLOBAL(addrerr)
171	clrl	%sp@-			| stack adjust count
172	moveml	#0xFFFF,%sp@-		| save user registers
173	movl	%usp,%a0		| save the user SP
174	movl	%a0,%sp@(FR_SP)		|   in the savearea
175	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
176	moveq	#0,%d0
177	movw	%a1@(10),%d0		| grab SSW for fault processing
178	btst	#12,%d0			| RB set?
179	jeq	LbeX0			| no, test RC
180	bset	#14,%d0			| yes, must set FB
181	movw	%d0,%a1@(10)		| for hardware too
182LbeX0:
183	btst	#13,%d0			| RC set?
184	jeq	LbeX1			| no, skip
185	bset	#15,%d0			| yes, must set FC
186	movw	%d0,%a1@(10)		| for hardware too
187LbeX1:
188	btst	#8,%d0			| data fault?
189	jeq	Lbe0			| no, check for hard cases
190	movl	%a1@(16),%d1		| fault address is as given in frame
191	jra	Lbe10			| thats it
192Lbe0:
193	btst	#4,%a1@(6)		| long (type B) stack frame?
194	jne	Lbe4			| yes, go handle
195	movl	%a1@(2),%d1		| no, can use save PC
196	btst	#14,%d0			| FB set?
197	jeq	Lbe3			| no, try FC
198	addql	#4,%d1			| yes, adjust address
199	jra	Lbe10			| done
200Lbe3:
201	btst	#15,%d0			| FC set?
202	jeq	Lbe10			| no, done
203	addql	#2,%d1			| yes, adjust address
204	jra	Lbe10			| done
205Lbe4:
206	movl	%a1@(36),%d1		| long format, use stage B address
207	btst	#15,%d0			| FC set?
208	jeq	Lbe10			| no, all done
209	subql	#2,%d1			| yes, adjust address
210Lbe10:
211	movl	%d1,%sp@-		| push fault VA
212	movl	%d0,%sp@-		| and padded SSW
213	movw	%a1@(6),%d0		| get frame format/vector offset
214	andw	#0x0FFF,%d0		| clear out frame format
215	cmpw	#12,%d0			| address error vector?
216	jeq	Lisaerr			| yes, go to it
217
218/* MMU-specific code to determine reason for bus error. */
219	movl	%d1,%a0			| fault address
220	movl	%sp@,%d0		| function code from ssw
221	btst	#8,%d0			| data fault?
222	jne	Lbe10a
223	movql	#1,%d0			| user program access FC
224					| (we dont separate data/program)
225	btst	#5,%a1@			| supervisor mode?
226	jeq	Lbe10a			| if no, done
227	movql	#5,%d0			| else supervisor program access
228Lbe10a:
229	ptestr	%d0,%a0@,#7		| do a table search
230	pmove	%psr,%sp@		| save result
231	movb	%sp@,%d1
232	btst	#2,%d1			| invalid? (incl. limit viol and berr)
233	jeq	Lmightnotbemerr		| no -> wp check
234	btst	#7,%d1			| is it MMU table berr?
235	jeq	Lismerr			| no, must be fast
236	jra	Lisberr1		| real bus err needs not be fast
237Lmightnotbemerr:
238	btst	#3,%d1			| write protect bit set?
239	jeq	Lisberr1		| no, must be bus error
240	movl	%sp@,%d0		| ssw into low word of d0
241	andw	#0xc0,%d0		| write protect is set on page:
242	cmpw	#0x40,%d0		| was it read cycle?
243	jeq	Lisberr1		| yes, was not WPE, must be bus err
244/* End of MMU-specific bus error code. */
245
246Lismerr:
247	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
248	jra	_ASM_LABEL(faultstkadj)	| and deal with it
249Lisaerr:
250	movl	#T_ADDRERR,%sp@-	| mark address error
251	jra	_ASM_LABEL(faultstkadj)	| and deal with it
252Lisberr1:
253	clrw	%sp@			| re-clear pad word
254Lisberr:
255	movl	#T_BUSERR,%sp@-		| mark bus error
256	jra	_ASM_LABEL(faultstkadj)	| and deal with it
257
258/*
259 * FP exceptions.
260 */
261GLOBAL(fpfline)
262	clrl	%sp@-			| stack adjust count
263	moveml	#0xFFFF,%sp@-		| save registers
264	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
265	jra	_ASM_LABEL(fault)	| do it
266
267GLOBAL(fpunsupp)
268	clrl	%sp@-			| stack adjust count
269	moveml	#0xFFFF,%sp@-		| save registers
270	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
271	jra	_ASM_LABEL(fault)	| do it
272
273/*
274 * Handles all other FP coprocessor exceptions.
275 * Note that since some FP exceptions generate mid-instruction frames
276 * and may cause signal delivery, we need to test for stack adjustment
277 * after the trap call.
278 */
279GLOBAL(fpfault)
280	clrl	%sp@-		| stack adjust count
281	moveml	#0xFFFF,%sp@-	| save user registers
282	movl	%usp,%a0	| and save
283	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
284	clrl	%sp@-		| no VA arg
285	movl	_C_LABEL(curpcb),%a0	| current pcb
286	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
287	fsave	%a0@		| save state
288	tstb	%a0@		| null state frame?
289	jeq	Lfptnull	| yes, safe
290	clrw	%d0		| no, need to tweak BIU
291	movb	%a0@(1),%d0	| get frame size
292	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
293Lfptnull:
294	fmovem	%fpsr,%sp@-	| push fpsr as code argument
295	frestore %a0@		| restore state
296	movl	#T_FPERR,%sp@-	| push type arg
297	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
298
299/*
300 * Other exceptions only cause four and six word stack frame and require
301 * no post-trap stack adjustment.
302 */
303GLOBAL(badtrap)
304	clrl	%sp@-			| stack adjust count
305	moveml	#0xFFFF,%sp@-		| save std frame regs
306	jbsr	_C_LABEL(straytrap)	| report
307	moveml	%sp@+,#0xFFFF		| restore regs
308	addql	#4,%sp			| stack adjust count
309	jra	_ASM_LABEL(rei)		| all done
310
311/*
312 * Trap 0 is for system calls
313 */
314GLOBAL(trap0)
315	clrl	%sp@-			| stack adjust count
316	moveml	#0xFFFF,%sp@-		| save user registers
317	movl	%usp,%a0		| save the user SP
318	movl	%a0,%sp@(FR_SP)		|   in the savearea
319	movl	%d0,%sp@-		| push syscall number
320	jbsr	_C_LABEL(syscall)	| handle it
321	addql	#4,%sp			| pop syscall arg
322	movl	%sp@(FR_SP),%a0		| grab and restore
323	movl	%a0,%usp		|   user SP
324	moveml	%sp@+,#0x7FFF		| restore most registers
325	addql	#8,%sp			| pop SP and stack adjust
326	jra	_ASM_LABEL(rei)		| all done
327
328/*
329 * Trap 12 is the entry point for the cachectl "syscall"
330 *	cachectl(command, addr, length)
331 * command in d0, addr in a1, length in d1
332 */
333GLOBAL(trap12)
334	movl	_C_LABEL(curlwp),%a0
335	movl	%a0@(L_PROC),%sp@-	| push curproc pointer
336	movl	%d1,%sp@-		| push length
337	movl	%a1,%sp@-		| push addr
338	movl	%d0,%sp@-		| push command
339	jbsr	_C_LABEL(cachectl1)	| do it
340	lea	%sp@(16),%sp		| pop args
341	jra	_ASM_LABEL(rei)		| all done
342
343/*
344 * Trace (single-step) trap.  Kernel-mode is special.
345 * User mode traps are simply passed on to trap().
346 */
347GLOBAL(trace)
348	clrl	%sp@-			| stack adjust count
349	moveml	#0xFFFF,%sp@-
350	moveq	#T_TRACE,%d0
351
352	| Check PSW and see what happen.
353	|   T=0 S=0	(should not happen)
354	|   T=1 S=0	trace trap from user mode
355	|   T=0 S=1	trace trap on a trap instruction
356	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
357
358	movw	%sp@(FR_HW),%d1		| get PSW
359	notw	%d1			| XXX no support for T0 on 680[234]0
360	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
361	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
362	jra	_ASM_LABEL(fault)	| no, user-mode fault
363
364/*
365 * Trap 15 is used for:
366 *	- GDB breakpoints (in user programs)
367 *	- KGDB breakpoints (in the kernel)
368 *	- trace traps for SUN binaries (not fully supported yet)
369 * User mode traps are simply passed to trap().
370 */
371GLOBAL(trap15)
372	clrl	%sp@-			| stack adjust count
373	moveml	#0xFFFF,%sp@-
374	moveq	#T_TRAP15,%d0
375	btst	#5,%sp@(FR_HW)		| was supervisor mode?
376	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
377	jra	_ASM_LABEL(fault)	| no, user-mode fault
378
379ASLOCAL(kbrkpt)
380	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
381	| Save the system sp rather than the user sp.
382	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
383	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
384	movl	%a6,%sp@(FR_SP)		|  from before trap
385
386	| If we are not on tmpstk switch to it.
387	| (so debugger can change the stack pointer)
388	movl	%a6,%d1
389	cmpl	#_ASM_LABEL(tmpstk),%d1
390	jls	Lbrkpt2 		| already on tmpstk
391	| Copy frame to the temporary stack
392	movl	%sp,%a0			| %a0=src
393	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
394	movl	%a1,%sp			| sp=new frame
395	moveq	#FR_SIZE,%d1
396Lbrkpt1:
397	movl	%a0@+,%a1@+
398	subql	#4,%d1
399	bgt	Lbrkpt1
400
401Lbrkpt2:
402	| Call the trap handler for the kernel debugger.
403	| Do not call trap() to handle it, so that we can
404	| set breakpoints in trap() if we want.  We know
405	| the trap type is either T_TRACE or T_BREAKPOINT.
406	movl	%d0,%sp@-		| push trap type
407	jbsr	_C_LABEL(trap_kdebug)
408	addql	#4,%sp			| pop args
409
410	| The stack pointer may have been modified, or
411	| data below it modified (by kgdb push call),
412	| so push the hardware frame at the current sp
413	| before restoring registers and returning.
414	movl	%sp@(FR_SP),%a0		| modified sp
415	lea	%sp@(FR_SIZE),%a1	| end of our frame
416	movl	%a1@-,%a0@-		| copy 2 longs with
417	movl	%a1@-,%a0@-		| ... predecrement
418	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
419	moveml	%sp@+,#0x7FFF		| restore all but sp
420	movl	%sp@,%sp		| ... and sp
421	rte				| all done
422
423/* Use common m68k sigreturn */
424#include <m68k/m68k/sigreturn.s>
425
426/*
427 * Interrupt handlers.  Most are auto-vectored,
428 * and hard-wired the same way on all sun3 models.
429 * Format in the stack is:
430 *   %d0,%d1,%a0,%a1, sr, pc, vo
431 */
432
433/* clock: see clock.c */
434#ifdef __ELF__
435	.align	4
436#else
437	.align	2
438#endif
439GLOBAL(_isr_clock)
440	INTERRUPT_SAVEREG
441	jbsr	_C_LABEL(clock_intr)
442	INTERRUPT_RESTOREREG
443	jra	_ASM_LABEL(rei)
444
445/* interrupt counters (needed by vmstat) */
446GLOBAL(intrnames)
447	.asciz	"spur"	| 0
448	.asciz	"lev1"	| 1
449	.asciz	"lev2"	| 2
450	.asciz	"lev3"	| 3
451	.asciz	"lev4"	| 4
452	.asciz	"clock"	| 5
453	.asciz	"lev6"	| 6
454	.asciz	"nmi"	| 7
455GLOBAL(eintrnames)
456
457	.data
458	.even
459GLOBAL(intrcnt)
460	.long	0,0,0,0,0,0,0,0
461GLOBAL(eintrcnt)
462	.text
463
464/*
465 * Emulation of VAX REI instruction.
466 *
467 * This code is (mostly) un-altered from the hp300 code,
468 * except that sun machines do not need a simulated SIR
469 * because they have a real software interrupt register.
470 *
471 * This code deals with checking for and servicing ASTs
472 * (profiling, scheduling) and software interrupts (network, softclock).
473 * We check for ASTs first, just like the VAX.  To avoid excess overhead
474 * the T_ASTFLT handling code will also check for software interrupts so we
475 * do not have to do it here.  After identifying that we need an AST we
476 * drop the IPL to allow device interrupts.
477 *
478 * This code is complicated by the fact that sendsig may have been called
479 * necessitating a stack cleanup.
480 */
481
482ASGLOBAL(rei)
483#ifdef	DIAGNOSTIC
484	tstl	_C_LABEL(panicstr)	| have we panicked?
485	jne	Ldorte			| yes, do not make matters worse
486#endif
487	tstl	_C_LABEL(astpending)	| AST pending?
488	jeq	Ldorte			| no, done
489Lrei1:
490	btst	#5,%sp@			| yes, are we returning to user mode?
491	jne	Ldorte			| no, done
492	movw	#PSL_LOWIPL,%sr		| lower SPL
493	clrl	%sp@-			| stack adjust
494	moveml	#0xFFFF,%sp@-		| save all registers
495	movl	%usp,%a1		| including
496	movl	%a1,%sp@(FR_SP)		|    the users SP
497	clrl	%sp@-			| VA == none
498	clrl	%sp@-			| code == none
499	movl	#T_ASTFLT,%sp@-		| type == async system trap
500	pea	%sp@(12)		| fp == address of trap frame
501	jbsr	_C_LABEL(trap)		| go handle it
502	lea	%sp@(16),%sp		| pop value args
503	movl	%sp@(FR_SP),%a0		| restore user SP
504	movl	%a0,%usp		|   from save area
505	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
506	jne	Laststkadj		| yes, go to it
507	moveml	%sp@+,#0x7FFF		| no, restore most user regs
508	addql	#8,%sp			| toss SP and stack adjust
509	rte				| and do real RTE
510Laststkadj:
511	lea	%sp@(FR_HW),%a1		| pointer to HW frame
512	addql	#8,%a1			| source pointer
513	movl	%a1,%a0			| source
514	addw	%d0,%a0			|  + hole size = dest pointer
515	movl	%a1@-,%a0@-		| copy
516	movl	%a1@-,%a0@-		|  8 bytes
517	movl	%a0,%sp@(FR_SP)		| new SSP
518	moveml	%sp@+,#0x7FFF		| restore user registers
519	movl	%sp@,%sp		| and our SP
520Ldorte:
521	rte				| real return
522
523/*
524 * Initialization is at the beginning of this file, because the
525 * kernel entry point needs to be at zero for compatibility with
526 * the Sun boot loader.  This works on Sun machines because the
527 * interrupt vector table for reset is NOT at address zero.
528 * (The MMU has a "boot" bit that forces access to the PROM)
529 */
530
531/*
532 * Use common m68k sigcode.
533 */
534#include <m68k/m68k/sigcode.s>
535#ifdef COMPAT_SUNOS
536#include <m68k/m68k/sunos_sigcode.s>
537#endif
538
539	.text
540
541/*
542 * Primitives
543 */
544
545/*
546 * Use common m68k support routines.
547 */
548#include <m68k/m68k/support.s>
549
550/*
551 * Use common m68k process/lwp switch and context save subroutines.
552 */
553#define FPCOPROC	/* XXX: Temp. Reqd. */
554#include <m68k/m68k/switch_subr.s>
555
556
557/* suline() */
558
559#ifdef DEBUG
560	.data
561ASGLOBAL(fulltflush)
562	.long	0
563ASGLOBAL(fullcflush)
564	.long	0
565	.text
566#endif
567
568ENTRY(ecacheon)
569	rts
570
571ENTRY(ecacheoff)
572	rts
573
574/*
575 * Load a new CPU Root Pointer (CRP) into the MMU.
576 *	void	loadcrp(struct mmu_rootptr *);
577 */
578ENTRY(loadcrp)
579	movl	%sp@(4),%a0		| arg1: &CRP
580	movl	#CACHE_CLR,%d0
581	movc	%d0,%cacr		| invalidate cache(s)
582	pflusha				| flush entire TLB
583	pmove	%a0@,%crp		| load new user root pointer
584	rts
585
586ENTRY(getcrp)
587	movl	%sp@(4),%a0		| arg1: &crp
588	pmove	%crp,%a0@		| *crpp = %crp
589	rts
590
591/*
592 * Get the physical address of the PTE for a given VA.
593 */
594ENTRY(ptest_addr)
595	movl	%sp@(4),%a1		| VA
596	ptestr	#5,%a1@,#7,%a0		| %a0 = addr of PTE
597	movl	%a0,%d0			| Result in %d0 (not a pointer return)
598	rts
599
600/*
601 * _delay(unsigned N)
602 * Delay for at least (N/256) microseconds.
603 * This routine depends on the variable:  delay_divisor
604 * which should be set based on the CPU clock rate.
605 * XXX: Currently this is set based on the CPU model,
606 * XXX: but this should be determined at run time...
607 */
608GLOBAL(_delay)
609	| %d0 = arg = (usecs << 8)
610	movl	%sp@(4),%d0
611	| %d1 = delay_divisor;
612	movl	_C_LABEL(delay_divisor),%d1
613	jra	L_delay			/* Jump into the loop! */
614
615	/*
616	 * Align the branch target of the loop to a half-line (8-byte)
617	 * boundary to minimize cache effects.  This guarantees both
618	 * that there will be no prefetch stalls due to cache line burst
619	 * operations and that the loop will run from a single cache
620	 * half-line.
621	 */
622#ifdef __ELF__
623	.align	8
624#else
625	.align	3
626#endif
627L_delay:
628	subl	%d1,%d0
629	jgt	L_delay
630	rts
631
632| Define some addresses, mostly so DDB can print useful info.
633| Not using _C_LABEL() here because these symbols are never
634| referenced by any C code, and if the leading underscore
635| ever goes away, these lines turn into syntax errors...
636	.set	_KERNBASE3X,KERNBASE3X
637	.set	_MONSTART,SUN3X_MONSTART
638	.set	_PROM_BASE,SUN3X_PROM_BASE
639	.set	_MONEND,SUN3X_MONEND
640
641|The end!
642