1/*	$NetBSD: locore.s,v 1.78 2024/01/17 12:33:51 thorpej Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
37 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
38 */
39
40#include "opt_compat_netbsd.h"
41#include "opt_compat_sunos.h"
42#include "opt_kgdb.h"
43#include "opt_lockdebug.h"
44
45#include "assym.h"
46#include <machine/asm.h>
47#include <machine/trap.h>
48
49| Remember this is a fun project!
50
51	.data
52GLOBAL(mon_crp)
53	.long	0,0
54
55| This is for kvm_mkdb, and should be the address of the beginning
56| of the kernel text segment (not necessarily the same as kernbase).
57	.text
58GLOBAL(kernel_text)
59
60| This is the entry point, as well as the end of the temporary stack
61| used during process switch (one 8K page ending at start)
62ASGLOBAL(tmpstk)
63ASGLOBAL(start)
64
65| The first step, after disabling interrupts, is to map enough of the kernel
66| into high virtual address space so that we can use position dependent code.
67| This is a tricky task on the sun3x because the MMU is already enabled and
68| the ROM monitor provides no indication of where the root MMU table is mapped.
69| Therefore we must use one of the 68030's 'transparent translation' registers
70| to define a range in the address space where the MMU translation is
71| turned off.  Once this is complete we can modify the MMU table directly
72| without the need for it to be mapped into virtual memory.
73| All code must be position independent until otherwise noted, as the
74| boot loader has loaded us into low memory but all the symbols in this
75| code have been linked high.
76	movw	#PSL_HIGHIPL,%sr	| no interrupts
77	movl	#KERNBASE3X,%a5		| for vtop conversion
78	lea	_C_LABEL(mon_crp),%a0	| where to store the CRP
79	subl	%a5,%a0
80	| Note: borrowing mon_crp for tt0 setup...
81	movl	#0x3F8107,%a0@		| map the low 1GB v=p with the
82	.long	0xf0100800		| transparent translation reg0
83					| [ pmove a0@, tt0 ]
84| In order to map the kernel into high memory we will copy the root table
85| entry which maps the 16 megabytes of memory starting at 0x0 into the
86| entry which maps the 16 megabytes starting at KERNBASE.
87	pmove	%crp,%a0@		| Get monitor CPU root pointer
88	movl	%a0@(4),%a1		| 2nd word is PA of level A table
89
90	movl	%a1,%a0			| compute the descriptor address
91	addl	#0x3e0,%a1		| for VA starting at KERNBASE
92	movl	%a0@,%a1@		| copy descriptor type
93	movl	%a0@(4),%a1@(4)		| copy physical address
94
95| Kernel is now double mapped at zero and KERNBASE.
96| Force a long jump to the relocated code (high VA).
97	movl	#IC_CLEAR,%d0		| Flush the I-cache
98	movc	%d0,%cacr
99	jmp L_high_code:l		| long jump
100
101L_high_code:
102| We are now running in the correctly relocated kernel, so
103| we are no longer restricted to position-independent code.
104| It is handy to leave transparent translation enabled while
105| for the low 1GB while _bootstrap() is doing its thing.
106
107| Do bootstrap stuff needed before main() gets called.
108| Our boot loader leaves a copy of the kernel's exec header
109| just before the start of the kernel text segment, so the
110| kernel can sanity-check the DDB symbols at [end...esym].
111| Pass the struct exec at tmpstk-32 to _bootstrap().
112| Also, make sure the initial frame pointer is zero so that
113| the backtrace algorithm used by KGDB terminates nicely.
114	lea	_ASM_LABEL(tmpstk)-32,%sp
115	movl	#0,%a6
116	jsr	_C_LABEL(_bootstrap)	| See locore2.c
117
118| Now turn off the transparent translation of the low 1GB.
119| (this also flushes the ATC)
120	clrl	%sp@-
121	.long	0xf0170800		| pmove	sp@,tt0
122	addql	#4,%sp
123
124| Now that _bootstrap() is done using the PROM functions,
125| we can safely set the sfc/dfc to something != FC_CONTROL
126	moveq	#FC_USERD,%d0		| make movs access "user data"
127	movc	%d0,%sfc		| space for copyin/copyout
128	movc	%d0,%dfc
129
130| Setup process zero user/kernel stacks.
131	lea	_C_LABEL(lwp0),%a0	| get lwp0
132	movl	%a0@(L_PCB),%a1		| XXXuvm_lwp_getuarea
133	lea	%a1@(USPACE-4),%sp	| set SSP to last word
134	movl	#USRSTACK3X-4,%a2
135	movl	%a2,%usp		| init user SP
136
137| Note curpcb was already set in _bootstrap().
138| Will do fpu initialization during autoconfig (see fpu.c)
139| The interrupt vector table and stack are now ready.
140| Interrupts will be enabled later, AFTER  autoconfiguration
141| is finished, to avoid spurrious interrupts.
142
143/*
144 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
145 * main() nevers returns; we exit to user mode from a forked process
146 * later on.
147 */
148	clrw	%sp@-			| tf_format,tf_vector
149	clrl	%sp@-			| tf_pc (filled in later)
150	movw	#PSL_USER,%sp@-		| tf_sr for user mode
151	clrl	%sp@-			| tf_stackadj
152	lea	%sp@(-64),%sp		| tf_regs[16]
153	movl	%a1,%a0@(L_MD_REGS)	| lwp0.l_md.md_regs = trapframe
154	jbsr	_C_LABEL(main)		| main(&trapframe)
155	PANIC("main() returned")
156
157| That is all the assembly startup code we need on the sun3x!
158| The rest of this is like the hp300/locore.s where possible.
159
160/*
161 * Trap/interrupt vector routines
162 */
163#include <m68k/m68k/trap_subr.s>
164
165GLOBAL(buserr)
166	tstl	_C_LABEL(nofault)	| device probe?
167	jeq	_C_LABEL(addrerr)	| no, handle as usual
168	movl	_C_LABEL(nofault),%sp@-	| yes,
169	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
170GLOBAL(addrerr)
171	clrl	%sp@-			| stack adjust count
172	moveml	#0xFFFF,%sp@-		| save user registers
173	movl	%usp,%a0		| save the user SP
174	movl	%a0,%sp@(FR_SP)		|   in the savearea
175	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
176	moveq	#0,%d0
177	movw	%a1@(10),%d0		| grab SSW for fault processing
178	btst	#12,%d0			| RB set?
179	jeq	LbeX0			| no, test RC
180	bset	#14,%d0			| yes, must set FB
181	movw	%d0,%a1@(10)		| for hardware too
182LbeX0:
183	btst	#13,%d0			| RC set?
184	jeq	LbeX1			| no, skip
185	bset	#15,%d0			| yes, must set FC
186	movw	%d0,%a1@(10)		| for hardware too
187LbeX1:
188	btst	#8,%d0			| data fault?
189	jeq	Lbe0			| no, check for hard cases
190	movl	%a1@(16),%d1		| fault address is as given in frame
191	jra	Lbe10			| thats it
192Lbe0:
193	btst	#4,%a1@(6)		| long (type B) stack frame?
194	jne	Lbe4			| yes, go handle
195	movl	%a1@(2),%d1		| no, can use save PC
196	btst	#14,%d0			| FB set?
197	jeq	Lbe3			| no, try FC
198	addql	#4,%d1			| yes, adjust address
199	jra	Lbe10			| done
200Lbe3:
201	btst	#15,%d0			| FC set?
202	jeq	Lbe10			| no, done
203	addql	#2,%d1			| yes, adjust address
204	jra	Lbe10			| done
205Lbe4:
206	movl	%a1@(36),%d1		| long format, use stage B address
207	btst	#15,%d0			| FC set?
208	jeq	Lbe10			| no, all done
209	subql	#2,%d1			| yes, adjust address
210Lbe10:
211	movl	%d1,%sp@-		| push fault VA
212	movl	%d0,%sp@-		| and padded SSW
213	movw	%a1@(6),%d0		| get frame format/vector offset
214	andw	#0x0FFF,%d0		| clear out frame format
215	cmpw	#12,%d0			| address error vector?
216	jeq	Lisaerr			| yes, go to it
217
218/* MMU-specific code to determine reason for bus error. */
219	movl	%d1,%a0			| fault address
220	movl	%sp@,%d0		| function code from ssw
221	btst	#8,%d0			| data fault?
222	jne	Lbe10a
223	movql	#1,%d0			| user program access FC
224					| (we dont separate data/program)
225	btst	#5,%a1@			| supervisor mode?
226	jeq	Lbe10a			| if no, done
227	movql	#5,%d0			| else supervisor program access
228Lbe10a:
229	ptestr	%d0,%a0@,#7		| do a table search
230	pmove	%psr,%sp@		| save result
231	movb	%sp@,%d1
232	btst	#2,%d1			| invalid? (incl. limit viol and berr)
233	jeq	Lmightnotbemerr		| no -> wp check
234	btst	#7,%d1			| is it MMU table berr?
235	jeq	Lismerr			| no, must be fast
236	jra	Lisberr1		| real bus err needs not be fast
237Lmightnotbemerr:
238	btst	#3,%d1			| write protect bit set?
239	jeq	Lisberr1		| no, must be bus error
240	movl	%sp@,%d0		| ssw into low word of d0
241	andw	#0xc0,%d0		| write protect is set on page:
242	cmpw	#0x40,%d0		| was it read cycle?
243	jeq	Lisberr1		| yes, was not WPE, must be bus err
244/* End of MMU-specific bus error code. */
245
246Lismerr:
247	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
248	jra	_ASM_LABEL(faultstkadj)	| and deal with it
249Lisaerr:
250	movl	#T_ADDRERR,%sp@-	| mark address error
251	jra	_ASM_LABEL(faultstkadj)	| and deal with it
252Lisberr1:
253	clrw	%sp@			| re-clear pad word
254Lisberr:
255	movl	#T_BUSERR,%sp@-		| mark bus error
256	jra	_ASM_LABEL(faultstkadj)	| and deal with it
257
258/*
259 * FP exceptions.
260 */
261GLOBAL(fpfline)
262	clrl	%sp@-			| stack adjust count
263	moveml	#0xFFFF,%sp@-		| save registers
264	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
265	jra	_ASM_LABEL(fault)	| do it
266
267GLOBAL(fpunsupp)
268	clrl	%sp@-			| stack adjust count
269	moveml	#0xFFFF,%sp@-		| save registers
270	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
271	jra	_ASM_LABEL(fault)	| do it
272
273/*
274 * Handles all other FP coprocessor exceptions.
275 * Note that since some FP exceptions generate mid-instruction frames
276 * and may cause signal delivery, we need to test for stack adjustment
277 * after the trap call.
278 */
279GLOBAL(fpfault)
280	clrl	%sp@-		| stack adjust count
281	moveml	#0xFFFF,%sp@-	| save user registers
282	movl	%usp,%a0	| and save
283	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
284	clrl	%sp@-		| no VA arg
285	movl	_C_LABEL(curpcb),%a0	| current pcb
286	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
287	fsave	%a0@		| save state
288	tstb	%a0@		| null state frame?
289	jeq	Lfptnull	| yes, safe
290	clrw	%d0		| no, need to tweak BIU
291	movb	%a0@(1),%d0	| get frame size
292	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
293Lfptnull:
294	fmovem	%fpsr,%sp@-	| push fpsr as code argument
295	frestore %a0@		| restore state
296	movl	#T_FPERR,%sp@-	| push type arg
297	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
298
299/*
300 * Other exceptions only cause four and six word stack frame and require
301 * no post-trap stack adjustment.
302 */
303GLOBAL(badtrap)
304	clrl	%sp@-			| stack adjust count
305	moveml	#0xFFFF,%sp@-		| save std frame regs
306	jbsr	_C_LABEL(straytrap)	| report
307	moveml	%sp@+,#0xFFFF		| restore regs
308	addql	#4,%sp			| stack adjust count
309	jra	_ASM_LABEL(rei)		| all done
310
311/*
312 * Trap 0 is for system calls
313 */
314GLOBAL(trap0)
315	clrl	%sp@-			| stack adjust count
316	moveml	#0xFFFF,%sp@-		| save user registers
317	movl	%usp,%a0		| save the user SP
318	movl	%a0,%sp@(FR_SP)		|   in the savearea
319	movl	%d0,%sp@-		| push syscall number
320	jbsr	_C_LABEL(syscall)	| handle it
321	addql	#4,%sp			| pop syscall arg
322	movl	%sp@(FR_SP),%a0		| grab and restore
323	movl	%a0,%usp		|   user SP
324	moveml	%sp@+,#0x7FFF		| restore most registers
325	addql	#8,%sp			| pop SP and stack adjust
326	jra	_ASM_LABEL(rei)		| all done
327
328/*
329 * Trap 12 is the entry point for the cachectl "syscall"
330 *	cachectl(command, addr, length)
331 * command in d0, addr in a1, length in d1
332 */
333GLOBAL(trap12)
334	movl	_C_LABEL(curlwp),%a0
335	movl	%a0@(L_PROC),%sp@-	| push curproc pointer
336	movl	%d1,%sp@-		| push length
337	movl	%a1,%sp@-		| push addr
338	movl	%d0,%sp@-		| push command
339	jbsr	_C_LABEL(cachectl1)	| do it
340	lea	%sp@(16),%sp		| pop args
341	jra	_ASM_LABEL(rei)		| all done
342
343/*
344 * Trace (single-step) trap.  Kernel-mode is special.
345 * User mode traps are simply passed on to trap().
346 */
347GLOBAL(trace)
348	clrl	%sp@-			| stack adjust count
349	moveml	#0xFFFF,%sp@-
350	moveq	#T_TRACE,%d0
351
352	| Check PSW and see what happen.
353	|   T=0 S=0	(should not happen)
354	|   T=1 S=0	trace trap from user mode
355	|   T=0 S=1	trace trap on a trap instruction
356	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
357
358	movw	%sp@(FR_HW),%d1		| get PSW
359	notw	%d1			| XXX no support for T0 on 680[234]0
360	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
361	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
362	jra	_ASM_LABEL(fault)	| no, user-mode fault
363
364/*
365 * Trap 15 is used for:
366 *	- GDB breakpoints (in user programs)
367 *	- KGDB breakpoints (in the kernel)
368 *	- trace traps for SUN binaries (not fully supported yet)
369 * User mode traps are simply passed to trap().
370 */
371GLOBAL(trap15)
372	clrl	%sp@-			| stack adjust count
373	moveml	#0xFFFF,%sp@-
374	moveq	#T_TRAP15,%d0
375	btst	#5,%sp@(FR_HW)		| was supervisor mode?
376	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
377	jra	_ASM_LABEL(fault)	| no, user-mode fault
378
379ASLOCAL(kbrkpt)
380	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
381	| Save the system sp rather than the user sp.
382	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
383	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
384	movl	%a6,%sp@(FR_SP)		|  from before trap
385
386	| If we are not on tmpstk switch to it.
387	| (so debugger can change the stack pointer)
388	movl	%a6,%d1
389	cmpl	#_ASM_LABEL(tmpstk),%d1
390	jls	Lbrkpt2 		| already on tmpstk
391	| Copy frame to the temporary stack
392	movl	%sp,%a0			| %a0=src
393	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
394	movl	%a1,%sp			| sp=new frame
395	moveq	#FR_SIZE,%d1
396Lbrkpt1:
397	movl	%a0@+,%a1@+
398	subql	#4,%d1
399	bgt	Lbrkpt1
400
401Lbrkpt2:
402	| Call the trap handler for the kernel debugger.
403	| Do not call trap() to handle it, so that we can
404	| set breakpoints in trap() if we want.  We know
405	| the trap type is either T_TRACE or T_BREAKPOINT.
406	movl	%d0,%sp@-		| push trap type
407	jbsr	_C_LABEL(trap_kdebug)
408	addql	#4,%sp			| pop args
409
410	| The stack pointer may have been modified, or
411	| data below it modified (by kgdb push call),
412	| so push the hardware frame at the current sp
413	| before restoring registers and returning.
414	movl	%sp@(FR_SP),%a0		| modified sp
415	lea	%sp@(FR_SIZE),%a1	| end of our frame
416	movl	%a1@-,%a0@-		| copy 2 longs with
417	movl	%a1@-,%a0@-		| ... predecrement
418	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
419	moveml	%sp@+,#0x7FFF		| restore all but sp
420	movl	%sp@,%sp		| ... and sp
421	rte				| all done
422
423/*
424 * Interrupt handlers.  Most are auto-vectored,
425 * and hard-wired the same way on all sun3 models.
426 * Format in the stack is:
427 *   %d0,%d1,%a0,%a1, sr, pc, vo
428 */
429
430/* clock: see clock.c */
431#ifdef __ELF__
432	.align	4
433#else
434	.align	2
435#endif
436GLOBAL(_isr_clock)
437	INTERRUPT_SAVEREG
438	jbsr	_C_LABEL(clock_intr)
439	INTERRUPT_RESTOREREG
440	jra	_ASM_LABEL(rei)
441
442/*
443 * Emulation of VAX REI instruction.
444 *
445 * This code is (mostly) un-altered from the hp300 code,
446 * except that sun machines do not need a simulated SIR
447 * because they have a real software interrupt register.
448 *
449 * This code deals with checking for and servicing ASTs
450 * (profiling, scheduling) and software interrupts (network, softclock).
451 * We check for ASTs first, just like the VAX.  To avoid excess overhead
452 * the T_ASTFLT handling code will also check for software interrupts so we
453 * do not have to do it here.  After identifying that we need an AST we
454 * drop the IPL to allow device interrupts.
455 *
456 * This code is complicated by the fact that sendsig may have been called
457 * necessitating a stack cleanup.
458 */
459
460ASGLOBAL(rei)
461#ifdef	DIAGNOSTIC
462	tstl	_C_LABEL(panicstr)	| have we panicked?
463	jne	Ldorte			| yes, do not make matters worse
464#endif
465	tstl	_C_LABEL(astpending)	| AST pending?
466	jeq	Ldorte			| no, done
467Lrei1:
468	btst	#5,%sp@			| yes, are we returning to user mode?
469	jne	Ldorte			| no, done
470	movw	#PSL_LOWIPL,%sr		| lower SPL
471	clrl	%sp@-			| stack adjust
472	moveml	#0xFFFF,%sp@-		| save all registers
473	movl	%usp,%a1		| including
474	movl	%a1,%sp@(FR_SP)		|    the users SP
475	clrl	%sp@-			| VA == none
476	clrl	%sp@-			| code == none
477	movl	#T_ASTFLT,%sp@-		| type == async system trap
478	pea	%sp@(12)		| fp == address of trap frame
479	jbsr	_C_LABEL(trap)		| go handle it
480	lea	%sp@(16),%sp		| pop value args
481	movl	%sp@(FR_SP),%a0		| restore user SP
482	movl	%a0,%usp		|   from save area
483	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
484	jne	Laststkadj		| yes, go to it
485	moveml	%sp@+,#0x7FFF		| no, restore most user regs
486	addql	#8,%sp			| toss SP and stack adjust
487	rte				| and do real RTE
488Laststkadj:
489	lea	%sp@(FR_HW),%a1		| pointer to HW frame
490	addql	#8,%a1			| source pointer
491	movl	%a1,%a0			| source
492	addw	%d0,%a0			|  + hole size = dest pointer
493	movl	%a1@-,%a0@-		| copy
494	movl	%a1@-,%a0@-		|  8 bytes
495	movl	%a0,%sp@(FR_SP)		| new SSP
496	moveml	%sp@+,#0x7FFF		| restore user registers
497	movl	%sp@,%sp		| and our SP
498Ldorte:
499	rte				| real return
500
501/*
502 * Initialization is at the beginning of this file, because the
503 * kernel entry point needs to be at zero for compatibility with
504 * the Sun boot loader.  This works on Sun machines because the
505 * interrupt vector table for reset is NOT at address zero.
506 * (The MMU has a "boot" bit that forces access to the PROM)
507 */
508
509/*
510 * Primitives
511 */
512
513/*
514 * Use common m68k process/lwp switch and context save subroutines.
515 */
516#define FPCOPROC	/* XXX: Temp. Reqd. */
517#include <m68k/m68k/switch_subr.s>
518
519
520/* suline() */
521
522#ifdef DEBUG
523	.data
524ASGLOBAL(fulltflush)
525	.long	0
526ASGLOBAL(fullcflush)
527	.long	0
528	.text
529#endif
530
531ENTRY(ecacheon)
532	rts
533
534ENTRY(ecacheoff)
535	rts
536
537/*
538 * Load a new CPU Root Pointer (CRP) into the MMU.
539 *	void	loadcrp(struct mmu_rootptr *);
540 */
541ENTRY(loadcrp)
542	movl	%sp@(4),%a0		| arg1: &CRP
543	movl	#CACHE_CLR,%d0
544	movc	%d0,%cacr		| invalidate cache(s)
545	pflusha				| flush entire TLB
546	pmove	%a0@,%crp		| load new user root pointer
547	rts
548
549ENTRY(getcrp)
550	movl	%sp@(4),%a0		| arg1: &crp
551	pmove	%crp,%a0@		| *crpp = %crp
552	rts
553
554/*
555 * Get the physical address of the PTE for a given VA.
556 */
557ENTRY(ptest_addr)
558	movl	%sp@(4),%a1		| VA
559	ptestr	#5,%a1@,#7,%a0		| %a0 = addr of PTE
560	movl	%a0,%d0			| Result in %d0 (not a pointer return)
561	rts
562
563/*
564 * _delay(unsigned N)
565 * Delay for at least (N/256) microseconds.
566 * This routine depends on the variable:  delay_divisor
567 * which should be set based on the CPU clock rate.
568 * XXX: Currently this is set based on the CPU model,
569 * XXX: but this should be determined at run time...
570 */
571GLOBAL(_delay)
572	| %d0 = arg = (usecs << 8)
573	movl	%sp@(4),%d0
574	| %d1 = delay_divisor;
575	movl	_C_LABEL(delay_divisor),%d1
576	jra	L_delay			/* Jump into the loop! */
577
578	/*
579	 * Align the branch target of the loop to a half-line (8-byte)
580	 * boundary to minimize cache effects.  This guarantees both
581	 * that there will be no prefetch stalls due to cache line burst
582	 * operations and that the loop will run from a single cache
583	 * half-line.
584	 */
585#ifdef __ELF__
586	.align	8
587#else
588	.align	3
589#endif
590L_delay:
591	subl	%d1,%d0
592	jgt	L_delay
593	rts
594
595| Define some addresses, mostly so DDB can print useful info.
596| Not using _C_LABEL() here because these symbols are never
597| referenced by any C code, and if the leading underscore
598| ever goes away, these lines turn into syntax errors...
599	.set	_KERNBASE3X,KERNBASE3X
600	.set	_MONSTART,SUN3X_MONSTART
601	.set	_PROM_BASE,SUN3X_PROM_BASE
602	.set	_MONEND,SUN3X_MONEND
603
604|The end!
605