1/*	$NetBSD: locore.s,v 1.107 2011/11/26 14:05:53 tsutsui Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah $Hdr: locore.s 1.66 92/12/22$
37 *
38 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
39 */
40
41#include "opt_compat_netbsd.h"
42#include "opt_compat_svr4.h"
43#include "opt_compat_sunos.h"
44#include "opt_ddb.h"
45#include "opt_fpsp.h"
46#include "opt_kgdb.h"
47#include "opt_lockdebug.h"
48#include "opt_m68k_arch.h"
49
50#include "ite.h"
51#include "fd.h"
52#include "par.h"
53#include "assym.h"
54#include "ksyms.h"
55
56#include <machine/asm.h>
57
58| This is for kvm_mkdb, and should be the address of the beginning
59| of the kernel text segment (not necessarily the same as kernbase).
60	.text
61GLOBAL(kernel_text)
62
63/*
64 * Temporary stack for a variety of purposes.
65 * Try and make this the first thing is the data segment so it
66 * is page aligned.  Note that if we overflow here, we run into
67 * our text segment.
68 */
69	.data
70	.space	PAGE_SIZE
71ASLOCAL(tmpstk)
72
73#include <x68k/x68k/vectors.s>
74
75	.text
76/*
77 * This is where we wind up if the kernel jumps to location 0.
78 * (i.e. a bogus PC)  This is known to immediately follow the vector
79 * table and is hence at 0x400 (see reset vector in vectors.s).
80 */
81	PANIC("kernel jump to zero")
82	/* NOTREACHED */
83
84/*
85 * Trap/interrupt vector routines
86 */
87#include <m68k/m68k/trap_subr.s>
88
89ENTRY_NOPROFILE(buserr)
90ENTRY_NOPROFILE(buserr60)		| XXX
91	tstl	_C_LABEL(nofault)	| device probe?
92	jeq	Lberr			| no, handle as usual
93	movl	_C_LABEL(nofault),%sp@-	| yes,
94	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
95Lberr:
96#if defined(M68040) || defined(M68060)
97	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040/060?
98	jne	_C_LABEL(addrerr)	| no, skip
99	clrl	%sp@-			| stack adjust count
100	moveml	#0xFFFF,%sp@-		| save user registers
101	movl	%usp,%a0		| save the user SP
102	movl	%a0,%sp@(FR_SP)		|   in the savearea
103	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
104#if defined(M68060)
105	cmpl	#CPU_68060,_C_LABEL(cputype) | 68060?
106	jne	Lbenot060
107	movel	%a1@(12),%d0		| grap FSLW
108	btst	#2,%d0			| branch prediction error?
109	jeq	Lnobpe			| no, skip
110	movc	%cacr,%d1
111	orl	#IC60_CABC,%d1		| clear all branch cache entries
112	movc	%d1,%cacr
113	movl	%d0,%d1
114	andl	#0x7ffd,%d1		| check other faults
115	jeq	_ASM_LABEL(faultstkadjnotrap2)
116Lnobpe:
117| XXX this is not needed.
118|	movl	%d0,%sp@		| code is FSLW now.
119
120| we need to adjust for misaligned addresses
121	movl	%a1@(8),%d1		| grab VA
122	btst	#27,%d0			| check for mis-aligned access
123	jeq	Lberr3			| no, skip
124	addl	#28,%d1			| yes, get into next page
125					| operand case: 3,
126					| instruction case: 4+12+12
127					| XXX instr. case not done yet
128	andl	#PG_FRAME,%d1		| and truncate
129Lberr3:
130	movl	%d1,%sp@-		| push fault VA
131	movl	%d0,%sp@-		| and FSLW
132	andw	#0x1f80,%d0
133	jeq	Lisberr
134	jra	Lismerr
135Lbenot060:
136#endif
137	moveq	#0,%d0
138	movw	%a1@(12),%d0		| grab SSW
139	movl	%a1@(20),%d1		| and fault VA
140	btst	#11,%d0			| check for mis-aligned access
141	jeq	Lberr2			| no, skip
142	addl	#3,%d1			| yes, get into next page
143	andl	#PG_FRAME,%d1		| and truncate
144Lberr2:
145	movl	%d1,%sp@-		| push fault VA
146	movl	%d0,%sp@-		| and padded SSW
147	btst	#10,%d0			| ATC bit set?
148	jeq	Lisberr			| no, must be a real bus error
149	movc	%dfc,%d1		| yes, get MMU fault
150	movc	%d0,%dfc		| store faulting function code
151	movl	%sp@(4),%a0		| get faulting address
152	.word	0xf568			| ptestr a0@
153	movc	%d1,%dfc
154	.long	0x4e7a0805		| movc mmusr,d0
155	movw	%d0,%sp@		| save (ONLY LOW 16 BITS!)
156	jra	Lismerr
157#endif
158ENTRY_NOPROFILE(addrerr)
159	clrl	%sp@-			| stack adjust count
160	moveml	#0xFFFF,%sp@-		| save user registers
161	movl	%usp,%a0		| save the user SP
162	movl	%a0,%sp@(FR_SP)		|   in the savearea
163	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
164#if defined(M68040) || defined(M68060)
165	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
166	jne	Lbenot040		| no, skip
167	movl	%a1@(8),%sp@-		| yes, push fault address
168	clrl	%sp@-			| no SSW for address fault
169	jra	Lisaerr			| go deal with it
170Lbenot040:
171#endif
172	moveq	#0,%d0
173	movw	%a1@(10),%d0		| grab SSW for fault processing
174	btst	#12,%d0			| RB set?
175	jeq	LbeX0			| no, test RC
176	bset	#14,%d0			| yes, must set FB
177	movw	%d0,%a1@(10)		| for hardware too
178LbeX0:
179	btst	#13,%d0			| RC set?
180	jeq	LbeX1			| no, skip
181	bset	#15,%d0			| yes, must set FC
182	movw	%d0,%a1@(10)		| for hardware too
183LbeX1:
184	btst	#8,%d0			| data fault?
185	jeq	Lbe0			| no, check for hard cases
186	movl	%a1@(16),%d1		| fault address is as given in frame
187	jra	Lbe10			| thats it
188Lbe0:
189	btst	#4,%a1@(6)		| long (type B) stack frame?
190	jne	Lbe4			| yes, go handle
191	movl	%a1@(2),%d1		| no, can use save PC
192	btst	#14,%d0			| FB set?
193	jeq	Lbe3			| no, try FC
194	addql	#4,%d1			| yes, adjust address
195	jra	Lbe10			| done
196Lbe3:
197	btst	#15,%d0			| FC set?
198	jeq	Lbe10			| no, done
199	addql	#2,%d1			| yes, adjust address
200	jra	Lbe10			| done
201Lbe4:
202	movl	%a1@(36),%d1		| long format, use stage B address
203	btst	#15,%d0			| FC set?
204	jeq	Lbe10			| no, all done
205	subql	#2,%d1			| yes, adjust address
206Lbe10:
207	movl	%d1,%sp@-		| push fault VA
208	movl	%d0,%sp@-		| and padded SSW
209	movw	%a1@(6),%d0		| get frame format/vector offset
210	andw	#0x0FFF,%d0		| clear out frame format
211	cmpw	#12,%d0			| address error vector?
212	jeq	Lisaerr			| yes, go to it
213	movl	%d1,%a0			| fault address
214	movl	%sp@,%d0		| function code from ssw
215	btst	#8,%d0			| data fault?
216	jne	Lbe10a
217	movql	#1,%d0			| user program access FC
218					| (we dont separate data/program)
219	btst	#5,%a1@			| supervisor mode?
220	jeq	Lbe10a			| if no, done
221	movql	#5,%d0			| else supervisor program access
222Lbe10a:
223	ptestr	%d0,%a0@,#7		| do a table search
224	pmove	%psr,%sp@		| save result
225	movb	%sp@,%d1
226	btst	#2,%d1			| invalid? (incl. limit viol and berr)
227	jeq	Lmightnotbemerr		| no -> wp check
228	btst	#7,%d1			| is it MMU table berr?
229	jeq	Lismerr			| no, must be fast
230	jra	Lisberr1		| real bus err needs not be fast
231Lmightnotbemerr:
232	btst	#3,%d1			| write protect bit set?
233	jeq	Lisberr1		| no, must be bus error
234	movl	%sp@,%d0		| ssw into low word of d0
235	andw	#0xc0,%d0		| write protect is set on page:
236	cmpw	#0x40,%d0		| was it read cycle?
237	jeq	Lisberr1		| yes, was not WPE, must be bus err
238Lismerr:
239	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
240	jra	_ASM_LABEL(faultstkadj)	| and deal with it
241Lisaerr:
242	movl	#T_ADDRERR,%sp@-	| mark address error
243	jra	_ASM_LABEL(faultstkadj)	| and deal with it
244Lisberr1:
245	clrw	%sp@			| re-clear pad word
246Lisberr:
247	movl	#T_BUSERR,%sp@-		| mark bus error
248	jra	_ASM_LABEL(faultstkadj)	| and deal with it
249
250/*
251 * FP exceptions.
252 */
253#include "opt_fpu_emulate.h"
254ENTRY_NOPROFILE(fpfline)
255#if defined(M68040)
256	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
257	jne	Lfp_unimp		| no, skip FPSP
258	cmpw	#0x202c,%sp@(6)		| format type 2?
259	jne	_C_LABEL(illinst)	| no, not an FP emulation
260#ifdef FPSP
261	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
262#else
263	clrl	%sp@-			| stack adjust count
264	moveml	#0xFFFF,%sp@-		| save registers
265	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
266	jra	_ASM_LABEL(fault)	| do it
267#endif
268Lfp_unimp:
269#endif
270#ifdef FPU_EMULATE
271	clrl	%sp@-			| stack adjust count
272	moveml	#0xFFFF,%sp@-		| save registers
273	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
274	jra	_ASM_LABEL(fault)	| do it
275#else
276	jra	_C_LABEL(illinst)
277#endif
278
279ENTRY_NOPROFILE(fpunsupp)
280#if defined(M68040)
281	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040?
282	jne	Lfp_unsupp		| no, skip FPSP
283#ifdef FPSP
284	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
285#else
286	clrl	%sp@-			| stack adjust count
287	moveml	#0xFFFF,%sp@-		| save registers
288	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
289	jra	_ASM_LABEL(fault)	| do it
290#endif
291Lfp_unsupp:
292#endif
293#ifdef FPU_EMULATE
294	clrl	%sp@-			| stack adjust count
295	moveml	#0xFFFF,%sp@-		| save registers
296	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
297	jra	_ASM_LABEL(fault)	| do it
298#else
299	jra	_C_LABEL(illinst)
300#endif
301
302/*
303 * Handles all other FP coprocessor exceptions.
304 * Note that since some FP exceptions generate mid-instruction frames
305 * and may cause signal delivery, we need to test for stack adjustment
306 * after the trap call.
307 */
308ENTRY_NOPROFILE(fpfault)
309	clrl	%sp@-		| stack adjust count
310	moveml	#0xFFFF,%sp@-	| save user registers
311	movl	%usp,%a0	| and save
312	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
313	clrl	%sp@-		| no VA arg
314	movl	_C_LABEL(curpcb),%a0 | current pcb
315	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
316	fsave	%a0@		| save state
317#if defined(M68040) || defined(M68060)
318	/* always null state frame on 68040, 68060 */
319	cmpl	#FPU_68040,_C_LABEL(fputype)
320	jge	Lfptnull
321#endif
322	tstb	%a0@		| null state frame?
323	jeq	Lfptnull	| yes, safe
324	clrw	%d0		| no, need to tweak BIU
325	movb	%a0@(1),%d0	| get frame size
326	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
327Lfptnull:
328	fmovem	%fpsr,%sp@-	| push fpsr as code argument
329	frestore %a0@		| restore state
330	movl	#T_FPERR,%sp@-	| push type arg
331	jra	_ASM_LABEL(faultstkadj)	| call trap and deal with stack cleanup
332
333/*
334 * Other exceptions only cause four and six word stack frame and require
335 * no post-trap stack adjustment.
336 */
337
338ENTRY_NOPROFILE(badtrap)
339	moveml	#0xC0C0,%sp@-		| save scratch regs
340	movw	%sp@(22),%sp@-		| push exception vector info
341	clrw	%sp@-
342	movl	%sp@(22),%sp@-		| and PC
343	jbsr	_C_LABEL(straytrap)	| report
344	addql	#8,%sp			| pop args
345	moveml	%sp@+,#0x0303		| restore regs
346	jra	_ASM_LABEL(rei)		| all done
347
348ENTRY_NOPROFILE(trap0)
349	clrl	%sp@-			| stack adjust count
350	moveml	#0xFFFF,%sp@-		| save user registers
351	movl	%usp,%a0		| save the user SP
352	movl	%a0,%sp@(FR_SP)		|   in the savearea
353	movl	%d0,%sp@-		| push syscall number
354	jbsr	_C_LABEL(syscall)	| handle it
355	addql	#4,%sp			| pop syscall arg
356	tstl	_C_LABEL(astpending)
357	jne	Lrei2
358	tstb	_C_LABEL(ssir)
359	jeq	Ltrap1
360	movw	#SPL1,%sr
361	tstb	_C_LABEL(ssir)
362	jne	Lsir1
363Ltrap1:
364	movl	%sp@(FR_SP),%a0		| grab and restore
365	movl	%a0,%usp		|   user SP
366	moveml	%sp@+,#0x7FFF		| restore most registers
367	addql	#8,%sp			| pop SP and stack adjust
368	rte
369
370/*
371 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
372 *	cachectl(command, addr, length)
373 * command in d0, addr in a1, length in d1
374 */
375ENTRY_NOPROFILE(trap12)
376	movl	_C_LABEL(curlwp),%a0
377	movl	%a0@(L_PROC),%sp@-	| push curproc pointer
378	movl	%d1,%sp@-		| push length
379	movl	%a1,%sp@-		| push addr
380	movl	%d0,%sp@-		| push command
381	jbsr	_C_LABEL(cachectl1)	| do it
382	lea	%sp@(16),%sp		| pop args
383	jra	_ASM_LABEL(rei)		| all done
384
385/*
386 * Trace (single-step) trap.  Kernel-mode is special.
387 * User mode traps are simply passed on to trap().
388 */
389ENTRY_NOPROFILE(trace)
390	clrl	%sp@-			| stack adjust count
391	moveml	#0xFFFF,%sp@-
392	moveq	#T_TRACE,%d0
393
394	| Check PSW and see what happen.
395	|   T=0 S=0	(should not happen)
396	|   T=1 S=0	trace trap from user mode
397	|   T=0 S=1	trace trap on a trap instruction
398	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
399
400	movw	%sp@(FR_HW),%d1		| get PSW
401	notw	%d1			| XXX no support for T0 on 680[234]0
402	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
403	jeq	Lkbrkpt			| yes, kernel breakpoint
404	jra	_ASM_LABEL(fault)	| no, user-mode fault
405
406/*
407 * Trap 15 is used for:
408 *	- GDB breakpoints (in user programs)
409 *	- KGDB breakpoints (in the kernel)
410 *	- trace traps for SUN binaries (not fully supported yet)
411 * User mode traps are simply passed to trap().
412 */
413ENTRY_NOPROFILE(trap15)
414	clrl	%sp@-			| stack adjust count
415	moveml	#0xFFFF,%sp@-
416	moveq	#T_TRAP15,%d0
417	movw	%sp@(FR_HW),%d1		| get PSW
418	andw	#PSL_S,%d1		| from system mode?
419	jne	Lkbrkpt			| yes, kernel breakpoint
420	jra	_ASM_LABEL(fault)	| no, user-mode fault
421
422Lkbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
423	| Save the system sp rather than the user sp.
424	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
425	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
426	movl	%a6,%sp@(FR_SP)		|  from before trap
427
428	| If were are not on tmpstk switch to it.
429	| (so debugger can change the stack pointer)
430	movl	%a6,%d1
431	cmpl	#_ASM_LABEL(tmpstk),%d1
432	jls	Lbrkpt2			| already on tmpstk
433	| Copy frame to the temporary stack
434	movl	%sp,%a0			| a0=src
435	lea	_ASM_LABEL(tmpstk)-96,%a1 | a1=dst
436	movl	%a1,%sp			| sp=new frame
437	moveq	#FR_SIZE,%d1
438Lbrkpt1:
439	movl	%a0@+,%a1@+
440	subql	#4,%d1
441	jgt	Lbrkpt1
442
443Lbrkpt2:
444	| Call the trap handler for the kernel debugger.
445	| Do not call trap() to do it, so that we can
446	| set breakpoints in trap() if we want.  We know
447	| the trap type is either T_TRACE or T_BREAKPOINT.
448	| If we have both DDB and KGDB, let KGDB see it first,
449	| because KGDB will just return 0 if not connected.
450	| Save args in d2, a2
451	movl	%d0,%d2			| trap type
452	movl	%sp,%a2			| frame ptr
453#ifdef KGDB
454	| Let KGDB handle it (if connected)
455	movl	%a2,%sp@-		| push frame ptr
456	movl	%d2,%sp@-		| push trap type
457	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
458	addql	#8,%sp			| pop args
459	cmpl	#0,%d0			| did kgdb handle it?
460	jne	Lbrkpt3			| yes, done
461#endif
462#ifdef DDB
463	| Let DDB handle it
464	movl	%a2,%sp@-		| push frame ptr
465	movl	%d2,%sp@-		| push trap type
466	jbsr	_C_LABEL(kdb_trap)	| handle the trap
467	addql	#8,%sp			| pop args
468#if 0	/* not needed on hp300 */
469	cmpl	#0,%d0			| did ddb handle it?
470	jne	Lbrkpt3			| yes, done
471#endif
472#endif
473	/* Sun 3 drops into PROM here. */
474Lbrkpt3:
475	| The stack pointer may have been modified, or
476	| data below it modified (by kgdb push call),
477	| so push the hardware frame at the current sp
478	| before restoring registers and returning.
479
480	movl	%sp@(FR_SP),%a0		| modified sp
481	lea	%sp@(FR_SIZE),%a1	| end of our frame
482	movl	%a1@-,%a0@-		| copy 2 longs with
483	movl	%a1@-,%a0@-		| ... predecrement
484	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
485	moveml	%sp@+,#0x7FFF		| restore all but sp
486	movl	%sp@,%sp		| ... and sp
487	rte				| all done
488
489/* Use common m68k sigreturn */
490#include <m68k/m68k/sigreturn.s>
491
492/*
493 * Provide a generic interrupt dispatcher, only handle hardclock (int6)
494 * specially, to improve performance
495 */
496
497ENTRY_NOPROFILE(spurintr)	/* level 0 */
498	addql	#1,_C_LABEL(intrcnt)+0
499	INTERRUPT_SAVEREG
500	CPUINFO_INCREMENT(CI_NINTR)
501	INTERRUPT_RESTOREREG
502	rte				| XXX mfpcure (x680x0 hardware bug)
503
504ENTRY_NOPROFILE(kbdtimer)
505	rte
506
507ENTRY_NOPROFILE(com0trap)
508#include "com.h"
509	INTERRUPT_SAVEREG
510#if NXCOM > 0
511	addql	#1,_C_LABEL(idepth)
512	movel	#0,%sp@-
513	jbsr	_C_LABEL(comintr)
514	addql	#4,%sp
515	subql	#1,_C_LABEL(idepth)
516#endif
517	CPUINFO_INCREMENT(CI_NINTR)
518	INTERRUPT_RESTOREREG
519	addql	#1,_C_LABEL(intrcnt)+36
520	jra	rei
521
522ENTRY_NOPROFILE(com1trap)
523	INTERRUPT_SAVEREG
524#if NXCOM > 1
525	addql	#1,_C_LABEL(idepth)
526	movel	#1,%sp@-
527	jbsr	_C_LABEL(comintr)
528	addql	#4,%sp
529	subql	#1,_C_LABEL(idepth)
530#endif
531	CPUINFO_INCREMENT(CI_NINTR)
532	INTERRUPT_RESTOREREG
533	addql	#1,_C_LABEL(intrcnt)+36
534	jra	rei
535
536ENTRY_NOPROFILE(intiotrap)
537	addql	#1,_C_LABEL(idepth)
538	INTERRUPT_SAVEREG
539	pea	%sp@(16-(FR_HW))	| XXX
540	jbsr	_C_LABEL(intio_intr)
541	addql	#4,%sp
542	CPUINFO_INCREMENT(CI_NINTR)
543	INTERRUPT_RESTOREREG
544	subql	#1,_C_LABEL(idepth)
545	jra	rei
546
547ENTRY_NOPROFILE(lev1intr)
548ENTRY_NOPROFILE(lev2intr)
549ENTRY_NOPROFILE(lev3intr)
550ENTRY_NOPROFILE(lev4intr)
551ENTRY_NOPROFILE(lev5intr)
552ENTRY_NOPROFILE(lev6intr)
553	addql	#1,_C_LABEL(idepth)
554	INTERRUPT_SAVEREG
555Lnotdma:
556	lea	_C_LABEL(intrcnt),%a0
557	movw	%sp@(22),%d0		| use vector offset
558	andw	#0xfff,%d0		|   sans frame type
559	addql	#1,%a0@(-0x60,%d0:w)	|     to increment apropos counter
560	movw	%sr,%sp@-		| push current SR value
561	clrw	%sp@-			|    padded to longword
562	jbsr	_C_LABEL(intrhand)	| handle interrupt
563	addql	#4,%sp			| pop SR
564	CPUINFO_INCREMENT(CI_NINTR)
565	INTERRUPT_RESTOREREG
566	subql	#1,_C_LABEL(idepth)
567	jra	_ASM_LABEL(rei)
568
569ENTRY_NOPROFILE(timertrap)
570	addql	#1,_C_LABEL(idepth)
571	INTERRUPT_SAVEREG		| save scratch registers
572	addql	#1,_C_LABEL(intrcnt)+32	| count hardclock interrupts
573	lea	%sp@(16),%a1		| a1 = &clockframe
574	movl	%a1,%sp@-
575	jbsr	_C_LABEL(hardclock)	| hardclock(&frame)
576	addql	#4,%sp
577	CPUINFO_INCREMENT(CI_NINTR)	| chalk up another interrupt
578	INTERRUPT_RESTOREREG		| restore scratch registers
579	subql	#1,_C_LABEL(idepth)
580	jra	_ASM_LABEL(rei)		| all done
581
582ENTRY_NOPROFILE(lev7intr)
583	addql	#1,_C_LABEL(idepth)
584	addql	#1,_C_LABEL(intrcnt)+28
585	clrl	%sp@-
586	moveml	#0xFFFF,%sp@-		| save registers
587	movl	%usp,%a0		| and save
588	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
589	jbsr	_C_LABEL(nmihand)	| call handler
590	movl	%sp@(FR_SP),%a0		| restore
591	movl	%a0,%usp		|   user SP
592	moveml	%sp@+,#0x7FFF		| and remaining registers
593	addql	#8,%sp			| pop SP and stack adjust
594	subql	#1,_C_LABEL(idepth)
595	jra	_ASM_LABEL(rei)		| all done
596
597/*
598 * floppy ejection trap
599 */
600
601ENTRY_NOPROFILE(fdeject)
602	jra	_ASM_LABEL(rei)
603
604/*
605 * Emulation of VAX REI instruction.
606 *
607 * This code deals with checking for and servicing ASTs
608 * (profiling, scheduling) and software interrupts (network, softclock).
609 * We check for ASTs first, just like the VAX.  To avoid excess overhead
610 * the T_ASTFLT handling code will also check for software interrupts so we
611 * do not have to do it here.  After identifing that we need an AST we
612 * drop the IPL to allow device interrupts.
613 *
614 * This code is complicated by the fact that sendsig may have been called
615 * necessitating a stack cleanup.
616 */
617ASENTRY_NOPROFILE(rei)
618	tstl	_C_LABEL(astpending)	| AST pending?
619	jeq	Lchksir			| no, go check for SIR
620Lrei1:
621	btst	#5,%sp@			| yes, are we returning to user mode?
622	jne	Lchksir			| no, go check for SIR
623	movw	#PSL_LOWIPL,%sr		| lower SPL
624	clrl	%sp@-			| stack adjust
625	moveml	#0xFFFF,%sp@-		| save all registers
626	movl	%usp,%a1		| including
627	movl	%a1,%sp@(FR_SP)		|    the users SP
628Lrei2:
629	clrl	%sp@-			| VA == none
630	clrl	%sp@-			| code == none
631	movl	#T_ASTFLT,%sp@-		| type == async system trap
632	pea	%sp@(12)		| fp = trap frame address
633	jbsr	_C_LABEL(trap)		| go handle it
634	lea	%sp@(16),%sp		| pop value args
635	movl	%sp@(FR_SP),%a0		| restore user SP
636	movl	%a0,%usp		|   from save area
637	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
638	jne	Laststkadj		| yes, go to it
639	moveml	%sp@+,#0x7FFF		| no, restore most user regs
640	addql	#8,%sp			| toss SP and stack adjust
641	rte				| and do real RTE
642Laststkadj:
643	lea	%sp@(FR_HW),%a1		| pointer to HW frame
644	addql	#8,%a1			| source pointer
645	movl	%a1,%a0			| source
646	addw	%d0,%a0			|  + hole size = dest pointer
647	movl	%a1@-,%a0@-		| copy
648	movl	%a1@-,%a0@-		|  8 bytes
649	movl	%a0,%sp@(FR_SP)		| new SSP
650	moveml	%sp@+,#0x7FFF		| restore user registers
651	movl	%sp@,%sp		| and our SP
652	rte				| and do real RTE
653Lchksir:
654	tstb	_C_LABEL(ssir)		| SIR pending?
655	jeq	Ldorte			| no, all done
656	movl	%d0,%sp@-		| need a scratch register
657	movw	%sp@(4),%d0		| get SR
658	andw	#PSL_IPL7,%d0		| mask all but IPL
659	jne	Lnosir			| came from interrupt, no can do
660	movl	%sp@+,%d0		| restore scratch register
661Lgotsir:
662	movw	#SPL1,%sr		| prevent others from servicing int
663	tstb	_C_LABEL(ssir)		| too late?
664	jeq	Ldorte			| yes, oh well...
665	clrl	%sp@-			| stack adjust
666	moveml	#0xFFFF,%sp@-		| save all registers
667	movl	%usp,%a1		| including
668	movl	%a1,%sp@(FR_SP)		|    the users SP
669Lsir1:
670	clrl	%sp@-			| VA == none
671	clrl	%sp@-			| code == none
672	movl	#T_SSIR,%sp@-		| type == software interrupt
673	pea	%sp@(12)		| fp = trap frame address
674	jbsr	_C_LABEL(trap)		| go handle it
675	lea	%sp@(16),%sp		| pop value args
676	movl	%sp@(FR_SP),%a0		| restore
677	movl	%a0,%usp		|   user SP
678	moveml	%sp@+,#0x7FFF		| and all remaining registers
679	addql	#8,%sp			| pop SP and stack adjust
680	rte
681Lnosir:
682	movl	%sp@+,%d0		| restore scratch register
683Ldorte:
684	rte				| real return
685
686/*
687 * Macro to relocate a symbol, used before MMU is enabled.
688 */
689#define	_RELOC(var, ar)	\
690	lea	var,ar;	\
691	addl	%a5,ar
692
693#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
694#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
695
696/*
697 * Initialization
698 *
699 * A4 contains the address of the end of the symtab
700 * A5 contains physical load point from boot
701 * VBR contains zero from ROM.  Exceptions will continue to vector
702 * through ROM until MMU is turned on at which time they will vector
703 * through our table (vectors.s).
704 */
705BSS(lowram,4)
706BSS(esym,4)
707
708GLOBAL(_verspad)
709	.word	0
710GLOBAL(boot_version)
711	.word	X68K_BOOTIF_VERS
712
713ASENTRY_NOPROFILE(start)
714	movw	#PSL_HIGHIPL,%sr	| no interrupts
715
716	addql	#4,%sp
717	movel	%sp@+,%a5		| firstpa
718	movel	%sp@+,%d5		| fphysize -- last page
719	movel	%sp@,%a4		| esym
720
721	RELOC(vectab,%a0)		| set Vector Base Register temporaly
722	movc	%a0,%vbr
723
724#if 0	/* XXX this should be done by the boot loader */
725	RELOC(edata, %a0)		| clear out BSS
726	movl	#_C_LABEL(end)-4,%d0	| (must be <= 256 kB)
727	subl	#_C_LABEL(edata),%d0
728	lsrl	#2,%d0
7291:	clrl	%a0@+
730	dbra	%d0,1b
731#endif
732
733	ASRELOC(tmpstk, %a0)
734	movl	%a0,%sp			| give ourselves a temporary stack
735	RELOC(esym, %a0)
736#if 1
737	movl	%a4,%a0@		| store end of symbol table
738#else
739	clrl	%a0@			| no symbol table, yet
740#endif
741	RELOC(lowram, %a0)
742	movl	%a5,%a0@		| store start of physical memory
743
744	movl	#CACHE_OFF,%d0
745	movc	%d0,%cacr		| clear and disable on-chip cache(s)
746
747/* determine our CPU/MMU combo - check for all regardless of kernel config */
748	movl	#0x200,%d0		| data freeze bit
749	movc	%d0,%cacr		|   only exists on 68030
750	movc	%cacr,%d0		| read it back
751	tstl	%d0			| zero?
752	jeq	Lnot68030		| yes, we have 68020/68040/68060
753	jra	Lstart1			| no, we have 68030
754Lnot68030:
755	bset	#31,%d0			| data cache enable bit
756	movc	%d0,%cacr		|   only exists on 68040/68060
757	movc	%cacr,%d0		| read it back
758	tstl	%d0			| zero?
759	jeq	Lis68020		| yes, we have 68020
760	moveq	#0,%d0			| now turn it back off
761	movec	%d0,%cacr		|   before we access any data
762	.word	0xf4d8			| cinva bc - invalidate caches XXX
763	bset	#30,%d0			| data cache no allocate mode bit
764	movc	%d0,%cacr		|   only exists on 68060
765	movc	%cacr,%d0		| read it back
766	tstl	%d0			| zero?
767	jeq	Lis68040		| yes, we have 68040
768	RELOC(mmutype, %a0)		| no, we have 68060
769	movl	#MMU_68040,%a0@		| with a 68040 compatible MMU
770	RELOC(cputype, %a0)
771	movl	#CPU_68060,%a0@		| and a 68060 CPU
772	jra	Lstart1
773Lis68040:
774	RELOC(mmutype, %a0)
775	movl	#MMU_68040,%a0@		| with a 68040 MMU
776	RELOC(cputype, %a0)
777	movl	#CPU_68040,%a0@		| and a 68040 CPU
778	jra	Lstart1
779Lis68020:
780	RELOC(mmutype, %a0)
781	movl	#MMU_68851,%a0@		| we have PMMU
782	RELOC(cputype, %a0)
783	movl	#CPU_68020,%a0@		| and a 68020 CPU
784
785Lstart1:
786/* initialize source/destination control registers for movs */
787	moveq	#FC_USERD,%d0		| user space
788	movc	%d0,%sfc		|   as source
789	movc	%d0,%dfc		|   and destination of transfers
790/* initialize memory sizes (for pmap_bootstrap) */
791	movl	%d5,%d1			| last page
792	moveq	#PGSHIFT,%d2
793	lsrl	%d2,%d1			| convert to page (click) number
794	RELOC(maxmem, %a0)
795	movl	%d1,%a0@		| save as maxmem
796	movl	%a5,%d0			| lowram value from ROM via boot
797	lsrl	%d2,%d0			| convert to page number
798	subl	%d0,%d1			| compute amount of RAM present
799	RELOC(physmem, %a0)
800	movl	%d1,%a0@		| and physmem
801/* configure kernel and lwp0 VA space so we can get going */
802#if NKSYMS || defined(DDB) || defined(LKM)
803	RELOC(esym,%a0)			| end of static kernel test/data/syms
804	movl	%a0@,%d5
805	jne	Lstart2
806#endif
807	movl	#_C_LABEL(end),%d5	| end of static kernel text/data
808Lstart2:
809	addl	#PAGE_SIZE-1,%d5
810	andl	#PG_FRAME,%d5		| round to a page
811	movl	%d5,%a4
812	addl	%a5,%a4			| convert to PA
813	pea	%a5@			| firstpa
814	pea	%a4@			| nextpa
815	RELOC(pmap_bootstrap,%a0)
816	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
817	addql	#8,%sp
818
819/*
820 * Prepare to enable MMU.
821 * Since the kernel is not mapped logical == physical we must insure
822 * that when the MMU is turned on, all prefetched addresses (including
823 * the PC) are valid.  In order guarantee that, we use the last physical
824 * page (which is conveniently mapped == VA) and load it up with enough
825 * code to defeat the prefetch, then we execute the jump back to here.
826 *
827 * Is this all really necessary, or am I paranoid??
828 */
829	RELOC(Sysseg_pa, %a0)		| system segment table addr
830	movl	%a0@,%d1		| read value (a PA)
831	RELOC(mmutype, %a0)
832	cmpl	#MMU_68040,%a0@		| 68040?
833	jne	Lmotommu1		| no, skip
834	.long	0x4e7b1807		| movc d1,srp
835	jra	Lstploaddone
836Lmotommu1:
837	RELOC(protorp, %a0)
838	movl	#0x80000202,%a0@	| nolimit + share global + 4 byte PTEs
839	movl	%d1,%a0@(4)		| + segtable address
840	pmove	%a0@,%srp		| load the supervisor root pointer
841	movl	#0x80000002,%a0@	| reinit upper half for CRP loads
842Lstploaddone:
843	RELOC(mmutype, %a0)
844	cmpl	#MMU_68040,%a0@		| 68040?
845	jne	Lmotommu2		| no, skip
846#include "opt_jupiter.h"
847#ifdef JUPITER
848	/* JUPITER-X: set system register "SUPER" bit */
849	movl	#0x0200a240,%d0		| translate DRAM area transparently
850	.long	0x4e7b0006		| movc d0,dtt0
851	lea	0x00c00000,%a0		| a0: graphic VRAM
852	lea	0x02c00000,%a1		| a1: graphic VRAM ( not JUPITER-X )
853					|     DRAM ( JUPITER-X )
854	movw	%a0@,%d0
855	movw	%d0,%d1
856	notw	%d1
857	movw	%d1,%a1@
858	movw	%d0,%a0@
859	cmpw	%a1@,%d1		| JUPITER-X?
860	jne	Ljupiterdone		| no, skip
861	movl	#0x0100a240,%d0		| to access system register
862	.long	0x4e7b0006		| movc d0,dtt0
863	movb	#0x01,0x01800003	| set "SUPER" bit
864Ljupiterdone:
865#endif /* JUPITER */
866	moveq	#0,%d0			| ensure TT regs are disabled
867	.long	0x4e7b0004		| movc d0,itt0
868	.long	0x4e7b0005		| movc d0,itt1
869	.long	0x4e7b0006		| movc d0,dtt0
870	.long	0x4e7b0007		| movc d0,dtt1
871	.word	0xf4d8			| cinva bc
872	.word	0xf518			| pflusha
873#if PGSHIFT == 13
874	movl	#0xc000,%d0
875#else
876	movl	#0x8000,%d0
877#endif
878	.long	0x4e7b0003		| movc d0,tc
879#ifdef M68060
880	RELOC(cputype, %a0)
881	cmpl	#CPU_68060,%a0@		| 68060?
882	jne	Lnot060cache
883	movl	#1,%d0
884	.long	0x4e7b0808		| movcl d0,pcr
885	movl	#0xa0808000,%d0
886	movc	%d0,%cacr		| enable store buffer, both caches
887	jmp	Lenab1
888Lnot060cache:
889#endif
890	movl	#0x80008000,%d0
891	movc	%d0,%cacr		| turn on both caches
892	jmp	Lenab1
893Lmotommu2:
894	pflusha
895#if PGSHIFT == 13
896	movl	#0x82d08b00,%sp@-	| value to load TC with
897#else
898	movl	#0x82c0aa00,%sp@-	| value to load TC with
899#endif
900	pmove	%sp@,%tc		| load it
901
902/*
903 * Should be running mapped from this point on
904 */
905Lenab1:
906/* set vector base in virtual address */
907	movl	#_C_LABEL(vectab),%d0	| set Vector Base Register
908	movc	%d0,%vbr
909	lea	_ASM_LABEL(tmpstk),%sp	| temporary stack
910/* call final pmap setup */
911	jbsr	_C_LABEL(pmap_bootstrap_finalize)
912/* set kernel stack, user SP */
913	movl	_C_LABEL(lwp0uarea),%a1	| grab lwp0 uarea
914	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
915	movl	#USRSTACK-4,%a2
916	movl	%a2,%usp		| init user SP
917
918/* detect FPU type */
919	jbsr	_C_LABEL(fpu_probe)
920	movl	%d0,_C_LABEL(fputype)
921	tstl	_C_LABEL(fputype)	| Have an FPU?
922	jeq	Lenab2			| No, skip.
923	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
924	movl	%a1,%sp@-
925	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill a1)
926	addql	#4,%sp
927Lenab2:
928	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
929	jeq	Ltbia040		| yes, cache already on
930	pflusha
931	tstl	_C_LABEL(mmutype)
932	jpl	Lenab3			| 68851 implies no d-cache
933	movl	#CACHE_ON,%d0
934	movc	%d0,%cacr		| clear cache(s)
935	jra	Lenab3
936Ltbia040:
937	.word	0xf518
938Lenab3:
939/* final setup for C code */
940	movl	%d7,_C_LABEL(boothowto)	| save reboot flags
941	movl	%d6,_C_LABEL(bootdev)	|   and boot device
942	jbsr	_C_LABEL(x68k_init)	| additional pre-main initialization
943
944/*
945 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
946 * main() nevers returns; we exit to user mode from a forked process
947 * later on.
948 */
949	clrw	%sp@-			| vector offset/frame type
950	clrl	%sp@-			| PC - filled in by "execve"
951	movw	#PSL_USER,%sp@-		| in user mode
952	clrl	%sp@-			| stack adjust count and padding
953	lea	%sp@(-64),%sp		| construct space for D0-D7/A0-A7
954	lea	_C_LABEL(lwp0),%a0	| save pointer to frame
955	movl	%sp,%a0@(L_MD_REGS)	|   in lwp0.p_md.md_regs
956
957	jra	_C_LABEL(main)		| main()
958
959	PANIC("main() returned")	| Yow!  Main returned!
960	/* NOTREACHED */
961
962/*
963 * Use common m68k sigcode.
964 */
965#include <m68k/m68k/sigcode.s>
966#ifdef COMPAT_SUNOS
967#include <m68k/m68k/sunos_sigcode.s>
968#endif
969#ifdef COMPAT_SVR4
970#include <m68k/m68k/svr4_sigcode.s>
971#endif
972
973/*
974 * Primitives
975 */
976
977/*
978 * Use common m68k support routines.
979 */
980#include <m68k/m68k/support.s>
981
982/*
983 * Use common m68k process/lwp switch and context save subroutines.
984 */
985#define FPCOPROC	/* XXX: Temp. Reqd. */
986#include <m68k/m68k/switch_subr.s>
987
988
989#if defined(M68040) || defined(M68060)
990ENTRY(suline)
991	movl	%sp@(4),%a0		| address to write
992	movl	_C_LABEL(curpcb),%a1	| current pcb
993	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
994	movl	%sp@(8),%a1		| address of line
995	movl	%a1@+,%d0		| get lword
996	movsl	%d0,%a0@+		| put lword
997	nop				| sync
998	movl	%a1@+,%d0		| get lword
999	movsl	%d0,%a0@+		| put lword
1000	nop				| sync
1001	movl	%a1@+,%d0		| get lword
1002	movsl	%d0,%a0@+		| put lword
1003	nop				| sync
1004	movl	%a1@+,%d0		| get lword
1005	movsl	%d0,%a0@+		| put lword
1006	nop				| sync
1007	moveq	#0,%d0			| indicate no fault
1008	jra	Lsldone
1009Lslerr:
1010	moveq	#-1,%d0
1011Lsldone:
1012	movl	_C_LABEL(curpcb),%a1	| current pcb
1013	clrl	%a1@(PCB_ONFAULT)	| clear fault address
1014	rts
1015#endif
1016
1017ENTRY(ecacheon)
1018	rts
1019
1020ENTRY(ecacheoff)
1021	rts
1022
1023/*
1024 * Load a new user segment table pointer.
1025 */
1026ENTRY(loadustp)
1027	movl	%sp@(4),%d0		| new USTP
1028	moveq	#PGSHIFT,%d1
1029	lsll	%d1,%d0			| convert to addr
1030#if defined(M68040) || defined(M68060)
1031	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1032	jne	LmotommuC		| no, skip
1033	.word	0xf518			| pflusha
1034	.long	0x4e7b0806		| movc d0,urp
1035#ifdef M68060
1036	cmpl	#CPU_68060,_C_LABEL(cputype)
1037	jne	Lldno60
1038	movc	%cacr,%d0
1039	orl	#IC60_CUBC,%d0		| clear user branch cache entries
1040	movc	%d0,%cacr
1041Lldno60:
1042#endif
1043	rts
1044LmotommuC:
1045#endif
1046	pflusha				| flush entire TLB
1047	lea	_C_LABEL(protorp),%a0	| CRP prototype
1048	movl	%d0,%a0@(4)		| stash USTP
1049	pmove	%a0@,%crp		| load root pointer
1050	movl	#CACHE_CLR,%d0
1051	movc	%d0,%cacr		| invalidate cache(s)
1052	rts
1053
1054ENTRY(ploadw)
1055#if defined(M68030)
1056	movl	%sp@(4),%a0		| address to load
1057#if defined(M68040) || defined(M68060)
1058	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1059	jeq	Lploadwskp		| yes, skip
1060#endif
1061	ploadw	#1,%a0@			| pre-load translation
1062Lploadwskp:
1063#endif
1064	rts
1065
1066/*
1067 * Set processor priority level calls.  Most are implemented with
1068 * inline asm expansions.  However, spl0 requires special handling
1069 * as we need to check for our emulated software interrupts.
1070 */
1071
1072ENTRY(spl0)
1073	moveq	#0,%d0
1074	movw	%sr,%d0			| get old SR for return
1075	movw	#PSL_LOWIPL,%sr		| restore new SR
1076	tstb	_C_LABEL(ssir)		| software interrupt pending?
1077	jeq	Lspldone		| no, all done
1078	subql	#4,%sp			| make room for RTE frame
1079	movl	%sp@(4),%sp@(2)		| position return address
1080	clrw	%sp@(6)			| set frame type 0
1081	movw	#PSL_LOWIPL,%sp@	| and new SR
1082	jra	Lgotsir			| go handle it
1083Lspldone:
1084	rts
1085
1086/*
1087 * _delay(u_int N)
1088 *
1089 * Delay for at least (N/256) microseconds.
1090 * This routine depends on the variable:  delay_divisor
1091 * which should be set based on the CPU clock rate.
1092 */
1093ENTRY_NOPROFILE(_delay)
1094	| d0 = arg = (usecs << 8)
1095	movl	%sp@(4),%d0
1096	| d1 = delay_divisor
1097	movl	_C_LABEL(delay_divisor),%d1
1098L_delay:
1099	subl	%d1,%d0
1100	jgt	L_delay
1101	rts
1102
1103/*
1104 * Handle the nitty-gritty of rebooting the machine.
1105 * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1106 * Note that we must be running in an address range that is mapped one-to-one
1107 * logical to physical so that the PC is still valid immediately after the MMU
1108 * is turned off.  We have conveniently mapped the last page of physical
1109 * memory this way.
1110 */
1111ENTRY_NOPROFILE(doboot)
1112	movw	#PSL_HIGHIPL,%sr	| cut off any interrupts
1113	subal	%a1,%a1			| a1 = 0
1114
1115	movl	#CACHE_OFF,%d0
1116#if defined(M68040) || defined(M68060)
1117	movl	_C_LABEL(mmutype),%d2	| d2 = mmutype
1118	addl	#(-1 * MMU_68040),%d2		| 68040?
1119	jne	Ldoboot0		| no, skip
1120	.word	0xf4f8			| cpusha bc - push and invalidate caches
1121	nop
1122	movl	#CACHE40_OFF,%d0
1123Ldoboot0:
1124#endif
1125	movc	%d0,%cacr		| disable on-chip cache(s)
1126
1127	| ok, turn off MMU..
1128Ldoreboot:
1129#if defined(M68040) || defined(M68060)
1130	tstl	%d2			| 68040?
1131	jne	LmotommuF		| no, skip
1132	movc	%a1,%cacr		| caches off
1133	.long	0x4e7b9003		| movc a1(=0),tc ; disable MMU
1134	jra	Ldoreboot1
1135LmotommuF:
1136#endif
1137	clrl	%sp@
1138	pmove	%sp@,%tc		| disable MMU
1139Ldoreboot1:
1140	moveml	0x00ff0000,#0x0101	| get RESET vectors in ROM
1141					|	(d0: ssp, a0: pc)
1142	moveml	#0x0101,%a1@		| put them at 0x0000 (for Xellent30)
1143	movc	%a1,%vbr		| reset Vector Base Register
1144	jmp	%a0@			| reboot X680x0
1145Lebootcode:
1146
1147/*
1148 * Misc. global variables.
1149 */
1150	.data
1151GLOBAL(machineid)
1152	.long	0		| default to X68030
1153
1154GLOBAL(mmutype)
1155	.long	MMU_68030	| default to 030 internal MMU
1156
1157GLOBAL(cputype)
1158	.long	CPU_68030	| default to 68030 CPU
1159
1160#ifdef M68K_MMU_HP
1161GLOBAL(ectype)
1162	.long	EC_NONE		| external cache type, default to none
1163#endif
1164
1165GLOBAL(fputype)
1166	.long	FPU_NONE
1167
1168GLOBAL(protorp)
1169	.long	0,0		| prototype root pointer
1170
1171GLOBAL(intiobase)
1172	.long	0		| KVA of base of internal IO space
1173
1174GLOBAL(intiolimit)
1175	.long	0		| KVA of end of internal IO space
1176
1177#ifdef DEBUG
1178ASGLOBAL(fulltflush)
1179	.long	0
1180
1181ASGLOBAL(fullcflush)
1182	.long	0
1183#endif
1184
1185/* interrupt counters */
1186
1187GLOBAL(intrnames)
1188	.asciz	"spur"
1189	.asciz	"lev1"
1190	.asciz	"lev2"
1191	.asciz	"lev3"
1192	.asciz	"lev4"
1193	.asciz	"lev5"
1194	.asciz	"lev6"
1195	.asciz	"nmi"
1196	.asciz	"clock"
1197	.asciz	"com"
1198GLOBAL(eintrnames)
1199	.even
1200
1201GLOBAL(intrcnt)
1202	.long	0,0,0,0,0,0,0,0,0,0
1203GLOBAL(eintrcnt)
1204