locore.s revision 1.55
1/* $NetBSD: locore.s,v 1.55 2007/05/18 10:18:26 tsutsui Exp $ */ 2 3/* 4 * Copyright (c) 1980, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah $Hdr: locore.s 1.66 92/12/22$ 36 * @(#)locore.s 8.6 (Berkeley) 5/27/94 37 */ 38/* 39 * Copyright (c) 1988 University of Utah. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. All advertising materials mentioning features or use of this software 54 * must display the following acknowledgement: 55 * This product includes software developed by the University of 56 * California, Berkeley and its contributors. 57 * 4. Neither the name of the University nor the names of its contributors 58 * may be used to endorse or promote products derived from this software 59 * without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 * 73 * from: Utah $Hdr: locore.s 1.66 92/12/22$ 74 * @(#)locore.s 8.6 (Berkeley) 5/27/94 75 */ 76 77#include "opt_compat_netbsd.h" 78#include "opt_compat_svr4.h" 79#include "opt_compat_sunos.h" 80#include "opt_kgdb.h" 81#include "opt_lockdebug.h" 82 83#include "assym.h" 84#include <machine/asm.h> 85#include <machine/trap.h> 86 87| Remember this is a fun project! 88 89 .data 90GLOBAL(mon_crp) 91 .long 0,0 92 93| This is for kvm_mkdb, and should be the address of the beginning 94| of the kernel text segment (not necessarily the same as kernbase). 95 .text 96GLOBAL(kernel_text) 97 98| This is the entry point, as well as the end of the temporary stack 99| used during process switch (one 8K page ending at start) 100ASGLOBAL(tmpstk) 101ASGLOBAL(start) 102 103| The first step, after disabling interrupts, is to map enough of the kernel 104| into high virtual address space so that we can use position dependent code. 105| This is a tricky task on the sun3x because the MMU is already enabled and 106| the ROM monitor provides no indication of where the root MMU table is mapped. 107| Therefore we must use one of the 68030's 'transparent translation' registers 108| to define a range in the address space where the MMU translation is 109| turned off. Once this is complete we can modify the MMU table directly 110| without the need for it to be mapped into virtual memory. 111| All code must be position independent until otherwise noted, as the 112| boot loader has loaded us into low memory but all the symbols in this 113| code have been linked high. 114 movw #PSL_HIGHIPL,%sr | no interrupts 115 movl #KERNBASE,%a5 | for vtop conversion 116 lea _C_LABEL(mon_crp),%a0 | where to store the CRP 117 subl %a5,%a0 118 | Note: borrowing mon_crp for tt0 setup... 119 movl #0x3F8107,%a0@ | map the low 1GB v=p with the 120 .long 0xf0100800 | transparent translation reg0 121 | [ pmove a0@, tt0 ] 122| In order to map the kernel into high memory we will copy the root table 123| entry which maps the 16 megabytes of memory starting at 0x0 into the 124| entry which maps the 16 megabytes starting at KERNBASE. 125 pmove %crp,%a0@ | Get monitor CPU root pointer 126 movl %a0@(4),%a1 | 2nd word is PA of level A table 127 128 movl %a1,%a0 | compute the descriptor address 129 addl #0x3e0,%a1 | for VA starting at KERNBASE 130 movl %a0@,%a1@ | copy descriptor type 131 movl %a0@(4),%a1@(4) | copy physical address 132 133| Kernel is now double mapped at zero and KERNBASE. 134| Force a long jump to the relocated code (high VA). 135 movl #IC_CLEAR,%d0 | Flush the I-cache 136 movc %d0,%cacr 137 jmp L_high_code:l | long jump 138 139L_high_code: 140| We are now running in the correctly relocated kernel, so 141| we are no longer restricted to position-independent code. 142| It is handy to leave transparent translation enabled while 143| for the low 1GB while _bootstrap() is doing its thing. 144 145| Do bootstrap stuff needed before main() gets called. 146| Our boot loader leaves a copy of the kernel's exec header 147| just before the start of the kernel text segment, so the 148| kernel can sanity-check the DDB symbols at [end...esym]. 149| Pass the struct exec at tmpstk-32 to _bootstrap(). 150| Also, make sure the initial frame pointer is zero so that 151| the backtrace algorithm used by KGDB terminates nicely. 152 lea _ASM_LABEL(tmpstk)-32,%sp 153 movl #0,%a6 154 jsr _C_LABEL(_bootstrap) | See locore2.c 155 156| Now turn off the transparent translation of the low 1GB. 157| (this also flushes the ATC) 158 clrl %sp@- 159 .long 0xf0170800 | pmove sp@,tt0 160 addql #4,%sp 161 162| Now that _bootstrap() is done using the PROM functions, 163| we can safely set the sfc/dfc to something != FC_CONTROL 164 moveq #FC_USERD,%d0 | make movs access "user data" 165 movc %d0,%sfc | space for copyin/copyout 166 movc %d0,%dfc 167 168| Setup process zero user/kernel stacks. 169 movl _C_LABEL(proc0paddr),%a1| get lwp0 pcb addr 170 lea %a1@(USPACE-4),%sp | set SSP to last word 171 movl #USRSTACK-4,%a2 172 movl %a2,%usp | init user SP 173 174| Note curpcb was already set in _bootstrap(). 175| Will do fpu initialization during autoconfig (see fpu.c) 176| The interrupt vector table and stack are now ready. 177| Interrupts will be enabled later, AFTER autoconfiguration 178| is finished, to avoid spurrious interrupts. 179 180/* 181 * Create a fake exception frame so that cpu_fork() can copy it. 182 * main() nevers returns; we exit to user mode from a forked process 183 * later on. 184 */ 185 clrw %sp@- | tf_format,tf_vector 186 clrl %sp@- | tf_pc (filled in later) 187 movw #PSL_USER,%sp@- | tf_sr for user mode 188 clrl %sp@- | tf_stackadj 189 lea %sp@(-64),%sp | tf_regs[16] 190 lea _C_LABEL(lwp0),%a0 | proc0.p_md.md_regs = 191 movl %a1,%a0@(L_MD_REGS) | trapframe 192 jbsr _C_LABEL(main) | main(&trapframe) 193 PANIC("main() returned") 194 195| That is all the assembly startup code we need on the sun3x! 196| The rest of this is like the hp300/locore.s where possible. 197 198/* 199 * Trap/interrupt vector routines 200 */ 201#include <m68k/m68k/trap_subr.s> 202 203GLOBAL(buserr) 204 tstl _C_LABEL(nofault) | device probe? 205 jeq _C_LABEL(addrerr) | no, handle as usual 206 movl _C_LABEL(nofault),%sp@- | yes, 207 jbsr _C_LABEL(longjmp) | longjmp(nofault) 208GLOBAL(addrerr) 209 clrl %sp@- | stack adjust count 210 moveml #0xFFFF,%sp@- | save user registers 211 movl %usp,%a0 | save the user SP 212 movl %a0,%sp@(FR_SP) | in the savearea 213 lea %sp@(FR_HW),%a1 | grab base of HW berr frame 214 moveq #0,%d0 215 movw %a1@(10),%d0 | grab SSW for fault processing 216 btst #12,%d0 | RB set? 217 jeq LbeX0 | no, test RC 218 bset #14,%d0 | yes, must set FB 219 movw %d0,%a1@(10) | for hardware too 220LbeX0: 221 btst #13,%d0 | RC set? 222 jeq LbeX1 | no, skip 223 bset #15,%d0 | yes, must set FC 224 movw %d0,%a1@(10) | for hardware too 225LbeX1: 226 btst #8,%d0 | data fault? 227 jeq Lbe0 | no, check for hard cases 228 movl %a1@(16),%d1 | fault address is as given in frame 229 jra Lbe10 | thats it 230Lbe0: 231 btst #4,%a1@(6) | long (type B) stack frame? 232 jne Lbe4 | yes, go handle 233 movl %a1@(2),%d1 | no, can use save PC 234 btst #14,%d0 | FB set? 235 jeq Lbe3 | no, try FC 236 addql #4,%d1 | yes, adjust address 237 jra Lbe10 | done 238Lbe3: 239 btst #15,%d0 | FC set? 240 jeq Lbe10 | no, done 241 addql #2,%d1 | yes, adjust address 242 jra Lbe10 | done 243Lbe4: 244 movl %a1@(36),%d1 | long format, use stage B address 245 btst #15,%d0 | FC set? 246 jeq Lbe10 | no, all done 247 subql #2,%d1 | yes, adjust address 248Lbe10: 249 movl %d1,%sp@- | push fault VA 250 movl %d0,%sp@- | and padded SSW 251 movw %a1@(6),%d0 | get frame format/vector offset 252 andw #0x0FFF,%d0 | clear out frame format 253 cmpw #12,%d0 | address error vector? 254 jeq Lisaerr | yes, go to it 255 256/* MMU-specific code to determine reason for bus error. */ 257 movl %d1,%a0 | fault address 258 movl %sp@,%d0 | function code from ssw 259 btst #8,%d0 | data fault? 260 jne Lbe10a 261 movql #1,%d0 | user program access FC 262 | (we dont separate data/program) 263 btst #5,%a1@ | supervisor mode? 264 jeq Lbe10a | if no, done 265 movql #5,%d0 | else supervisor program access 266Lbe10a: 267 ptestr %d0,%a0@,#7 | do a table search 268 pmove %psr,%sp@ | save result 269 movb %sp@,%d1 270 btst #2,%d1 | invalid? (incl. limit viol and berr) 271 jeq Lmightnotbemerr | no -> wp check 272 btst #7,%d1 | is it MMU table berr? 273 jeq Lismerr | no, must be fast 274 jra Lisberr1 | real bus err needs not be fast 275Lmightnotbemerr: 276 btst #3,%d1 | write protect bit set? 277 jeq Lisberr1 | no, must be bus error 278 movl %sp@,%d0 | ssw into low word of d0 279 andw #0xc0,%d0 | write protect is set on page: 280 cmpw #0x40,%d0 | was it read cycle? 281 jeq Lisberr1 | yes, was not WPE, must be bus err 282/* End of MMU-specific bus error code. */ 283 284Lismerr: 285 movl #T_MMUFLT,%sp@- | show that we are an MMU fault 286 jra _ASM_LABEL(faultstkadj) | and deal with it 287Lisaerr: 288 movl #T_ADDRERR,%sp@- | mark address error 289 jra _ASM_LABEL(faultstkadj) | and deal with it 290Lisberr1: 291 clrw %sp@ | re-clear pad word 292Lisberr: 293 movl #T_BUSERR,%sp@- | mark bus error 294 jra _ASM_LABEL(faultstkadj) | and deal with it 295 296/* 297 * FP exceptions. 298 */ 299GLOBAL(fpfline) 300 clrl %sp@- | stack adjust count 301 moveml #0xFFFF,%sp@- | save registers 302 moveq #T_FPEMULI,%d0 | denote as FP emulation trap 303 jra _ASM_LABEL(fault) | do it 304 305GLOBAL(fpunsupp) 306 clrl %sp@- | stack adjust count 307 moveml #0xFFFF,%sp@- | save registers 308 moveq #T_FPEMULD,%d0 | denote as FP emulation trap 309 jra _ASM_LABEL(fault) | do it 310 311/* 312 * Handles all other FP coprocessor exceptions. 313 * Note that since some FP exceptions generate mid-instruction frames 314 * and may cause signal delivery, we need to test for stack adjustment 315 * after the trap call. 316 */ 317GLOBAL(fpfault) 318 clrl %sp@- | stack adjust count 319 moveml #0xFFFF,%sp@- | save user registers 320 movl %usp,%a0 | and save 321 movl %a0,%sp@(FR_SP) | the user stack pointer 322 clrl %sp@- | no VA arg 323 movl _C_LABEL(curpcb),%a0 | current pcb 324 lea %a0@(PCB_FPCTX),%a0 | address of FP savearea 325 fsave %a0@ | save state 326 tstb %a0@ | null state frame? 327 jeq Lfptnull | yes, safe 328 clrw %d0 | no, need to tweak BIU 329 movb %a0@(1),%d0 | get frame size 330 bset #3,%a0@(0,%d0:w) | set exc_pend bit of BIU 331Lfptnull: 332 fmovem %fpsr,%sp@- | push fpsr as code argument 333 frestore %a0@ | restore state 334 movl #T_FPERR,%sp@- | push type arg 335 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup 336 337/* 338 * Other exceptions only cause four and six word stack frame and require 339 * no post-trap stack adjustment. 340 */ 341GLOBAL(badtrap) 342 clrl %sp@- | stack adjust count 343 moveml #0xFFFF,%sp@- | save std frame regs 344 jbsr _C_LABEL(straytrap) | report 345 moveml %sp@+,#0xFFFF | restore regs 346 addql #4,%sp | stack adjust count 347 jra _ASM_LABEL(rei) | all done 348 349/* 350 * Trap 0 is for system calls 351 */ 352GLOBAL(trap0) 353 clrl %sp@- | stack adjust count 354 moveml #0xFFFF,%sp@- | save user registers 355 movl %usp,%a0 | save the user SP 356 movl %a0,%sp@(FR_SP) | in the savearea 357 movl %d0,%sp@- | push syscall number 358 jbsr _C_LABEL(syscall) | handle it 359 addql #4,%sp | pop syscall arg 360 movl %sp@(FR_SP),%a0 | grab and restore 361 movl %a0,%usp | user SP 362 moveml %sp@+,#0x7FFF | restore most registers 363 addql #8,%sp | pop SP and stack adjust 364 jra _ASM_LABEL(rei) | all done 365 366/* 367 * Trap 12 is the entry point for the cachectl "syscall" 368 * cachectl(command, addr, length) 369 * command in d0, addr in a1, length in d1 370 */ 371GLOBAL(trap12) 372 movl _C_LABEL(curlwp),%a0 373 movl %a0@(L_PROC),%sp@- | push curproc pointer 374 movl %d1,%sp@- | push length 375 movl %a1,%sp@- | push addr 376 movl %d0,%sp@- | push command 377 jbsr _C_LABEL(cachectl1) | do it 378 lea %sp@(16),%sp | pop args 379 jra _ASM_LABEL(rei) | all done 380 381/* 382 * Trace (single-step) trap. Kernel-mode is special. 383 * User mode traps are simply passed on to trap(). 384 */ 385GLOBAL(trace) 386 clrl %sp@- | stack adjust count 387 moveml #0xFFFF,%sp@- 388 moveq #T_TRACE,%d0 389 390 | Check PSW and see what happen. 391 | T=0 S=0 (should not happen) 392 | T=1 S=0 trace trap from user mode 393 | T=0 S=1 trace trap on a trap instruction 394 | T=1 S=1 trace trap from system mode (kernel breakpoint) 395 396 movw %sp@(FR_HW),%d1 | get PSW 397 notw %d1 | XXX no support for T0 on 680[234]0 398 andw #PSL_TS,%d1 | from system mode (T=1, S=1)? 399 jeq _ASM_LABEL(kbrkpt) | yes, kernel brkpt 400 jra _ASM_LABEL(fault) | no, user-mode fault 401 402/* 403 * Trap 15 is used for: 404 * - GDB breakpoints (in user programs) 405 * - KGDB breakpoints (in the kernel) 406 * - trace traps for SUN binaries (not fully supported yet) 407 * User mode traps are simply passed to trap(). 408 */ 409GLOBAL(trap15) 410 clrl %sp@- | stack adjust count 411 moveml #0xFFFF,%sp@- 412 moveq #T_TRAP15,%d0 413 btst #5,%sp@(FR_HW) | was supervisor mode? 414 jne _ASM_LABEL(kbrkpt) | yes, kernel brkpt 415 jra _ASM_LABEL(fault) | no, user-mode fault 416 417ASLOCAL(kbrkpt) 418 | Kernel-mode breakpoint or trace trap. (%d0=trap_type) 419 | Save the system sp rather than the user sp. 420 movw #PSL_HIGHIPL,%sr | lock out interrupts 421 lea %sp@(FR_SIZE),%a6 | Save stack pointer 422 movl %a6,%sp@(FR_SP) | from before trap 423 424 | If we are not on tmpstk switch to it. 425 | (so debugger can change the stack pointer) 426 movl %a6,%d1 427 cmpl #_ASM_LABEL(tmpstk),%d1 428 jls Lbrkpt2 | already on tmpstk 429 | Copy frame to the temporary stack 430 movl %sp,%a0 | %a0=src 431 lea _ASM_LABEL(tmpstk)-96,%a1 | %a1=dst 432 movl %a1,%sp | sp=new frame 433 moveq #FR_SIZE,%d1 434Lbrkpt1: 435 movl %a0@+,%a1@+ 436 subql #4,%d1 437 bgt Lbrkpt1 438 439Lbrkpt2: 440 | Call the trap handler for the kernel debugger. 441 | Do not call trap() to handle it, so that we can 442 | set breakpoints in trap() if we want. We know 443 | the trap type is either T_TRACE or T_BREAKPOINT. 444 movl %d0,%sp@- | push trap type 445 jbsr _C_LABEL(trap_kdebug) 446 addql #4,%sp | pop args 447 448 | The stack pointer may have been modified, or 449 | data below it modified (by kgdb push call), 450 | so push the hardware frame at the current sp 451 | before restoring registers and returning. 452 movl %sp@(FR_SP),%a0 | modified sp 453 lea %sp@(FR_SIZE),%a1 | end of our frame 454 movl %a1@-,%a0@- | copy 2 longs with 455 movl %a1@-,%a0@- | ... predecrement 456 movl %a0,%sp@(FR_SP) | sp = h/w frame 457 moveml %sp@+,#0x7FFF | restore all but sp 458 movl %sp@,%sp | ... and sp 459 rte | all done 460 461/* Use common m68k sigreturn */ 462#include <m68k/m68k/sigreturn.s> 463 464/* 465 * Interrupt handlers. Most are auto-vectored, 466 * and hard-wired the same way on all sun3 models. 467 * Format in the stack is: 468 * %d0,%d1,%a0,%a1, sr, pc, vo 469 */ 470 471#define INTERRUPT_SAVEREG \ 472 moveml #0xC0C0,%sp@- 473 474#define INTERRUPT_RESTORE \ 475 moveml %sp@+,#0x0303 476 477/* 478 * This is the common auto-vector interrupt handler, 479 * for which the CPU provides the vector=0x18+level. 480 * These are installed in the interrupt vector table. 481 */ 482#ifdef __ELF__ 483 .align 4 484#else 485 .align 2 486#endif 487GLOBAL(_isr_autovec) 488 INTERRUPT_SAVEREG 489 jbsr _C_LABEL(isr_autovec) 490 INTERRUPT_RESTORE 491 jra _ASM_LABEL(rei) 492 493/* clock: see clock.c */ 494#ifdef __ELF__ 495 .align 4 496#else 497 .align 2 498#endif 499GLOBAL(_isr_clock) 500 INTERRUPT_SAVEREG 501 jbsr _C_LABEL(clock_intr) 502 INTERRUPT_RESTORE 503 jra _ASM_LABEL(rei) 504 505| Handler for all vectored interrupts (i.e. VME interrupts) 506#ifdef __ELF__ 507 .align 4 508#else 509 .align 2 510#endif 511GLOBAL(_isr_vectored) 512 INTERRUPT_SAVEREG 513 jbsr _C_LABEL(isr_vectored) 514 INTERRUPT_RESTORE 515 jra _ASM_LABEL(rei) 516 517#undef INTERRUPT_SAVEREG 518#undef INTERRUPT_RESTORE 519 520/* interrupt counters (needed by vmstat) */ 521GLOBAL(intrnames) 522 .asciz "spur" | 0 523 .asciz "lev1" | 1 524 .asciz "lev2" | 2 525 .asciz "lev3" | 3 526 .asciz "lev4" | 4 527 .asciz "clock" | 5 528 .asciz "lev6" | 6 529 .asciz "nmi" | 7 530GLOBAL(eintrnames) 531 532 .data 533 .even 534GLOBAL(intrcnt) 535 .long 0,0,0,0,0,0,0,0,0,0 536GLOBAL(eintrcnt) 537 .text 538 539/* 540 * Emulation of VAX REI instruction. 541 * 542 * This code is (mostly) un-altered from the hp300 code, 543 * except that sun machines do not need a simulated SIR 544 * because they have a real software interrupt register. 545 * 546 * This code deals with checking for and servicing ASTs 547 * (profiling, scheduling) and software interrupts (network, softclock). 548 * We check for ASTs first, just like the VAX. To avoid excess overhead 549 * the T_ASTFLT handling code will also check for software interrupts so we 550 * do not have to do it here. After identifying that we need an AST we 551 * drop the IPL to allow device interrupts. 552 * 553 * This code is complicated by the fact that sendsig may have been called 554 * necessitating a stack cleanup. 555 */ 556 557ASGLOBAL(rei) 558#ifdef DIAGNOSTIC 559 tstl _C_LABEL(panicstr) | have we paniced? 560 jne Ldorte | yes, do not make matters worse 561#endif 562 tstl _C_LABEL(astpending) | AST pending? 563 jeq Ldorte | no, done 564Lrei1: 565 btst #5,%sp@ | yes, are we returning to user mode? 566 jne Ldorte | no, done 567 movw #PSL_LOWIPL,%sr | lower SPL 568 clrl %sp@- | stack adjust 569 moveml #0xFFFF,%sp@- | save all registers 570 movl %usp,%a1 | including 571 movl %a1,%sp@(FR_SP) | the users SP 572 clrl %sp@- | VA == none 573 clrl %sp@- | code == none 574 movl #T_ASTFLT,%sp@- | type == async system trap 575 jbsr _C_LABEL(trap) | go handle it 576 lea %sp@(12),%sp | pop value args 577 movl %sp@(FR_SP),%a0 | restore user SP 578 movl %a0,%usp | from save area 579 movw %sp@(FR_ADJ),%d0 | need to adjust stack? 580 jne Laststkadj | yes, go to it 581 moveml %sp@+,#0x7FFF | no, restore most user regs 582 addql #8,%sp | toss SP and stack adjust 583 rte | and do real RTE 584Laststkadj: 585 lea %sp@(FR_HW),%a1 | pointer to HW frame 586 addql #8,%a1 | source pointer 587 movl %a1,%a0 | source 588 addw %d0,%a0 | + hole size = dest pointer 589 movl %a1@-,%a0@- | copy 590 movl %a1@-,%a0@- | 8 bytes 591 movl %a0,%sp@(FR_SP) | new SSP 592 moveml %sp@+,#0x7FFF | restore user registers 593 movl %sp@,%sp | and our SP 594Ldorte: 595 rte | real return 596 597/* 598 * Initialization is at the beginning of this file, because the 599 * kernel entry point needs to be at zero for compatibility with 600 * the Sun boot loader. This works on Sun machines because the 601 * interrupt vector table for reset is NOT at address zero. 602 * (The MMU has a "boot" bit that forces access to the PROM) 603 */ 604 605/* 606 * Use common m68k sigcode. 607 */ 608#include <m68k/m68k/sigcode.s> 609#ifdef COMPAT_SUNOS 610#include <m68k/m68k/sunos_sigcode.s> 611#endif 612#ifdef COMPAT_SVR4 613#include <m68k/m68k/svr4_sigcode.s> 614#endif 615 616 .text 617 618/* 619 * Primitives 620 */ 621 622/* 623 * Use common m68k support routines. 624 */ 625#include <m68k/m68k/support.s> 626 627/* 628 * Use common m68k process/lwp switch and context save subroutines. 629 */ 630#define FPCOPROC /* XXX: Temp. Reqd. */ 631#include <m68k/m68k/switch_subr.s> 632 633 634/* suline() */ 635 636#ifdef DEBUG 637 .data 638ASGLOBAL(fulltflush) 639 .long 0 640ASGLOBAL(fullcflush) 641 .long 0 642 .text 643#endif 644 645ENTRY(ecacheon) 646 rts 647 648ENTRY(ecacheoff) 649 rts 650 651/* 652 * Get callers current SP value. 653 * Note that simply taking the address of a local variable in a C function 654 * doesn't work because callee saved registers may be outside the stack frame 655 * defined by A6 (e.g. GCC generated code). 656 * 657 * [I don't think the ENTRY() macro will do the right thing with this -- glass] 658 */ 659GLOBAL(getsp) 660 movl %sp,%d0 | get current SP 661 addql #4,%d0 | compensate for return address 662 movl %d0,%a0 663 rts 664 665ENTRY(getsfc) 666 movc %sfc,%d0 667 movl %d0,%a0 668 rts 669 670ENTRY(getdfc) 671 movc %dfc,%d0 672 movl %d0,%a0 673 rts 674 675ENTRY(getvbr) 676 movc %vbr,%d0 677 movl %d0,%a0 678 rts 679 680ENTRY(setvbr) 681 movl %sp@(4),%d0 682 movc %d0,%vbr 683 rts 684 685/* 686 * Load a new CPU Root Pointer (CRP) into the MMU. 687 * void loadcrp(struct mmu_rootptr *); 688 */ 689ENTRY(loadcrp) 690 movl %sp@(4),%a0 | arg1: &CRP 691 movl #CACHE_CLR,%d0 692 movc %d0,%cacr | invalidate cache(s) 693 pflusha | flush entire TLB 694 pmove %a0@,%crp | load new user root pointer 695 rts 696 697ENTRY(getcrp) 698 movl %sp@(4),%a0 | arg1: &crp 699 pmove %crp,%a0@ | *crpp = %crp 700 rts 701 702/* 703 * Get the physical address of the PTE for a given VA. 704 */ 705ENTRY(ptest_addr) 706 movl %sp@(4),%a1 | VA 707 ptestr #5,%a1@,#7,%a0 | %a0 = addr of PTE 708 movl %a0,%d0 | Result in %d0 (not a pointer return) 709 rts 710 711/* 712 * Set processor priority level calls. Most are implemented with 713 * inline asm expansions. However, we need one instantiation here 714 * in case some non-optimized code makes external references. 715 * Most places will use the inlined functions param.h supplies. 716 */ 717 718ENTRY(_getsr) 719 clrl %d0 720 movw %sr,%d0 721 movl %a1,%d0 722 rts 723 724ENTRY(_spl) 725 clrl %d0 726 movw %sr,%d0 727 movl %sp@(4),%d1 728 movw %d1,%sr 729 rts 730 731ENTRY(_splraise) 732 clrl %d0 733 movw %sr,%d0 734 movl %d0,%d1 735 andl #PSL_HIGHIPL,%d1 | old &= PSL_HIGHIPL 736 cmpl %sp@(4),%d1 | (old - new) 737 bge Lsplr 738 movl %sp@(4),%d1 739 movw %d1,%sr 740Lsplr: 741 rts 742 743/* 744 * Save and restore 68881 state. 745 */ 746ENTRY(m68881_save) 747 movl %sp@(4),%a0 | save area pointer 748 fsave %a0@ | save state 749 tstb %a0@ | null state frame? 750 jeq Lm68881sdone | yes, all done 751 fmovem %fp0-%fp7,%a0@(FPF_REGS) | save FP general regs 752 fmovem %fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control regs 753Lm68881sdone: 754 rts 755 756ENTRY(m68881_restore) 757 movl %sp@(4),%a0 | save area pointer 758 tstb %a0@ | null state frame? 759 jeq Lm68881rdone | yes, easy 760 fmovem %a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control regs 761 fmovem %a0@(FPF_REGS),%fp0-%fp7 | restore FP general regs 762Lm68881rdone: 763 frestore %a0@ | restore state 764 rts 765 766/* 767 * _delay(unsigned N) 768 * Delay for at least (N/256) microseconds. 769 * This routine depends on the variable: delay_divisor 770 * which should be set based on the CPU clock rate. 771 * XXX: Currently this is set based on the CPU model, 772 * XXX: but this should be determined at run time... 773 */ 774GLOBAL(_delay) 775 | %d0 = arg = (usecs << 8) 776 movl %sp@(4),%d0 777 | %d1 = delay_divisor; 778 movl _C_LABEL(delay_divisor),%d1 779 jra L_delay /* Jump into the loop! */ 780 781 /* 782 * Align the branch target of the loop to a half-line (8-byte) 783 * boundary to minimize cache effects. This guarantees both 784 * that there will be no prefetch stalls due to cache line burst 785 * operations and that the loop will run from a single cache 786 * half-line. 787 */ 788#ifdef __ELF__ 789 .align 8 790#else 791 .align 3 792#endif 793L_delay: 794 subl %d1,%d0 795 jgt L_delay 796 rts 797 798| Define some addresses, mostly so DDB can print useful info. 799| Not using _C_LABEL() here because these symbols are never 800| referenced by any C code, and if the leading underscore 801| ever goes away, these lines turn into syntax errors... 802 .set _KERNBASE,KERNBASE 803 .set _MONSTART,SUN3X_MONSTART 804 .set _PROM_BASE,SUN3X_PROM_BASE 805 .set _MONEND,SUN3X_MONEND 806 807|The end! 808