1/* arch/sparc/kernel/entry.S: Sparc trap low-level entry points. 2 * 3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) 8 */ 9 10#include <linux/errno.h> 11 12#include <asm/head.h> 13#include <asm/asi.h> 14#include <asm/smp.h> 15#include <asm/contregs.h> 16#include <asm/ptrace.h> 17#include <asm/asm-offsets.h> 18#include <asm/psr.h> 19#include <asm/vaddrs.h> 20#include <asm/memreg.h> 21#include <asm/page.h> 22#include <asm/pgtable.h> 23#include <asm/pgtsun4c.h> 24#include <asm/winmacro.h> 25#include <asm/signal.h> 26#include <asm/obio.h> 27#include <asm/mxcc.h> 28#include <asm/thread_info.h> 29#include <asm/param.h> 30#include <asm/unistd.h> 31 32#include <asm/asmmacro.h> 33 34#define curptr g6 35 36/* These are just handy. */ 37#define _SV save %sp, -STACKFRAME_SZ, %sp 38#define _RS restore 39 40#define FLUSH_ALL_KERNEL_WINDOWS \ 41 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \ 42 _RS; _RS; _RS; _RS; _RS; _RS; _RS; 43 44 .text 45 46#ifdef CONFIG_KGDB 47 .align 4 48 .globl arch_kgdb_breakpoint 49 .type arch_kgdb_breakpoint,#function 50arch_kgdb_breakpoint: 51 ta 0x7d 52 retl 53 nop 54 .size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint 55#endif 56 57#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 58 .align 4 59 .globl floppy_hardint 60floppy_hardint: 61 /* 62 * This code cannot touch registers %l0 %l1 and %l2 63 * because SAVE_ALL depends on their values. It depends 64 * on %l3 also, but we regenerate it before a call. 65 * Other registers are: 66 * %l3 -- base address of fdc registers 67 * %l4 -- pdma_vaddr 68 * %l5 -- scratch for ld/st address 69 * %l6 -- pdma_size 70 * %l7 -- scratch [floppy byte, ld/st address, aux. data] 71 */ 72 73 /* Do we have work to do? */ 74 sethi %hi(doing_pdma), %l7 75 ld [%l7 + %lo(doing_pdma)], %l7 76 cmp %l7, 0 77 be floppy_dosoftint 78 nop 79 80 /* Load fdc register base */ 81 sethi %hi(fdc_status), %l3 82 ld [%l3 + %lo(fdc_status)], %l3 83 84 /* Setup register addresses */ 85 sethi %hi(pdma_vaddr), %l5 ! transfer buffer 86 ld [%l5 + %lo(pdma_vaddr)], %l4 87 sethi %hi(pdma_size), %l5 ! bytes to go 88 ld [%l5 + %lo(pdma_size)], %l6 89next_byte: 90 ldub [%l3], %l7 91 92 andcc %l7, 0x80, %g0 ! Does fifo still have data 93 bz floppy_fifo_emptied ! fifo has been emptied... 94 andcc %l7, 0x20, %g0 ! in non-dma mode still? 95 bz floppy_overrun ! nope, overrun 96 andcc %l7, 0x40, %g0 ! 0=write 1=read 97 bz floppy_write 98 sub %l6, 0x1, %l6 99 100 /* Ok, actually read this byte */ 101 ldub [%l3 + 1], %l7 102 orcc %g0, %l6, %g0 103 stb %l7, [%l4] 104 bne next_byte 105 add %l4, 0x1, %l4 106 107 b floppy_tdone 108 nop 109 110floppy_write: 111 /* Ok, actually write this byte */ 112 ldub [%l4], %l7 113 orcc %g0, %l6, %g0 114 stb %l7, [%l3 + 1] 115 bne next_byte 116 add %l4, 0x1, %l4 117 118 /* fall through... */ 119floppy_tdone: 120 sethi %hi(pdma_vaddr), %l5 121 st %l4, [%l5 + %lo(pdma_vaddr)] 122 sethi %hi(pdma_size), %l5 123 st %l6, [%l5 + %lo(pdma_size)] 124 /* Flip terminal count pin */ 125 set auxio_register, %l7 126 ld [%l7], %l7 127 128 set sparc_cpu_model, %l5 129 ld [%l5], %l5 130 subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */ 131 be 1f 132 ldub [%l7], %l5 133 134 or %l5, 0xc2, %l5 135 stb %l5, [%l7] 136 andn %l5, 0x02, %l5 137 b 2f 138 nop 139 1401: 141 or %l5, 0xf4, %l5 142 stb %l5, [%l7] 143 andn %l5, 0x04, %l5 144 1452: 146 /* Kill some time so the bits set */ 147 WRITE_PAUSE 148 WRITE_PAUSE 149 150 stb %l5, [%l7] 151 152 /* Prevent recursion */ 153 sethi %hi(doing_pdma), %l7 154 b floppy_dosoftint 155 st %g0, [%l7 + %lo(doing_pdma)] 156 157 /* We emptied the FIFO, but we haven't read everything 158 * as of yet. Store the current transfer address and 159 * bytes left to read so we can continue when the next 160 * fast IRQ comes in. 161 */ 162floppy_fifo_emptied: 163 sethi %hi(pdma_vaddr), %l5 164 st %l4, [%l5 + %lo(pdma_vaddr)] 165 sethi %hi(pdma_size), %l7 166 st %l6, [%l7 + %lo(pdma_size)] 167 168 /* Restore condition codes */ 169 wr %l0, 0x0, %psr 170 WRITE_PAUSE 171 172 jmp %l1 173 rett %l2 174 175floppy_overrun: 176 sethi %hi(pdma_vaddr), %l5 177 st %l4, [%l5 + %lo(pdma_vaddr)] 178 sethi %hi(pdma_size), %l5 179 st %l6, [%l5 + %lo(pdma_size)] 180 /* Prevent recursion */ 181 sethi %hi(doing_pdma), %l7 182 st %g0, [%l7 + %lo(doing_pdma)] 183 184 /* fall through... */ 185floppy_dosoftint: 186 rd %wim, %l3 187 SAVE_ALL 188 189 /* Set all IRQs off. */ 190 or %l0, PSR_PIL, %l4 191 wr %l4, 0x0, %psr 192 WRITE_PAUSE 193 wr %l4, PSR_ET, %psr 194 WRITE_PAUSE 195 196 mov 11, %o0 ! floppy irq level (unused anyway) 197 mov %g0, %o1 ! devid is not used in fast interrupts 198 call sparc_floppy_irq 199 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs 200 201 RESTORE_ALL 202 203#endif /* (CONFIG_BLK_DEV_FD) */ 204 205 /* Bad trap handler */ 206 .globl bad_trap_handler 207bad_trap_handler: 208 SAVE_ALL 209 210 wr %l0, PSR_ET, %psr 211 WRITE_PAUSE 212 213 add %sp, STACKFRAME_SZ, %o0 ! pt_regs 214 call do_hw_interrupt 215 mov %l7, %o1 ! trap number 216 217 RESTORE_ALL 218 219/* For now all IRQ's not registered get sent here. handler_irq() will 220 * see if a routine is registered to handle this interrupt and if not 221 * it will say so on the console. 222 */ 223 224 .align 4 225 .globl real_irq_entry, patch_handler_irq 226real_irq_entry: 227 SAVE_ALL 228 229#ifdef CONFIG_SMP 230 .globl patchme_maybe_smp_msg 231 232 cmp %l7, 12 233patchme_maybe_smp_msg: 234 bgu maybe_smp4m_msg 235 nop 236#endif 237 238real_irq_continue: 239 or %l0, PSR_PIL, %g2 240 wr %g2, 0x0, %psr 241 WRITE_PAUSE 242 wr %g2, PSR_ET, %psr 243 WRITE_PAUSE 244 mov %l7, %o0 ! irq level 245patch_handler_irq: 246 call handler_irq 247 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr 248 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq 249 wr %g2, PSR_ET, %psr ! keep ET up 250 WRITE_PAUSE 251 252 RESTORE_ALL 253 254#ifdef CONFIG_SMP 255 /* SMP per-cpu ticker interrupts are handled specially. */ 256smp4m_ticker: 257 bne real_irq_continue+4 258 or %l0, PSR_PIL, %g2 259 wr %g2, 0x0, %psr 260 WRITE_PAUSE 261 wr %g2, PSR_ET, %psr 262 WRITE_PAUSE 263 call smp4m_percpu_timer_interrupt 264 add %sp, STACKFRAME_SZ, %o0 265 wr %l0, PSR_ET, %psr 266 WRITE_PAUSE 267 RESTORE_ALL 268 269 /* Here is where we check for possible SMP IPI passed to us 270 * on some level other than 15 which is the NMI and only used 271 * for cross calls. That has a separate entry point below. 272 */ 273maybe_smp4m_msg: 274 GET_PROCESSOR4M_ID(o3) 275 sethi %hi(sun4m_irq_percpu), %l5 276 sll %o3, 2, %o3 277 or %l5, %lo(sun4m_irq_percpu), %o5 278 sethi %hi(0x40000000), %o2 279 ld [%o5 + %o3], %o1 280 ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending 281 andcc %o3, %o2, %g0 282 be,a smp4m_ticker 283 cmp %l7, 14 284 st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x40000000 285 WRITE_PAUSE 286 ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending 287 WRITE_PAUSE 288 or %l0, PSR_PIL, %l4 289 wr %l4, 0x0, %psr 290 WRITE_PAUSE 291 wr %l4, PSR_ET, %psr 292 WRITE_PAUSE 293 call smp_reschedule_irq 294 nop 295 296 RESTORE_ALL 297 298 .align 4 299 .globl linux_trap_ipi15_sun4m 300linux_trap_ipi15_sun4m: 301 SAVE_ALL 302 sethi %hi(0x80000000), %o2 303 GET_PROCESSOR4M_ID(o0) 304 sethi %hi(sun4m_irq_percpu), %l5 305 or %l5, %lo(sun4m_irq_percpu), %o5 306 sll %o0, 2, %o0 307 ld [%o5 + %o0], %o5 308 ld [%o5 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending 309 andcc %o3, %o2, %g0 310 be 1f ! Must be an NMI async memory error 311 st %o2, [%o5 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x80000000 312 WRITE_PAUSE 313 ld [%o5 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending 314 WRITE_PAUSE 315 or %l0, PSR_PIL, %l4 316 wr %l4, 0x0, %psr 317 WRITE_PAUSE 318 wr %l4, PSR_ET, %psr 319 WRITE_PAUSE 320 call smp4m_cross_call_irq 321 nop 322 b ret_trap_lockless_ipi 323 clr %l6 3241: 325 /* NMI async memory error handling. */ 326 sethi %hi(0x80000000), %l4 327 sethi %hi(sun4m_irq_global), %o5 328 ld [%o5 + %lo(sun4m_irq_global)], %l5 329 st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000 330 WRITE_PAUSE 331 ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending 332 WRITE_PAUSE 333 or %l0, PSR_PIL, %l4 334 wr %l4, 0x0, %psr 335 WRITE_PAUSE 336 wr %l4, PSR_ET, %psr 337 WRITE_PAUSE 338 call sun4m_nmi 339 nop 340 st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000 341 WRITE_PAUSE 342 ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending 343 WRITE_PAUSE 344 RESTORE_ALL 345 346 .globl smp4d_ticker 347 /* SMP per-cpu ticker interrupts are handled specially. */ 348smp4d_ticker: 349 SAVE_ALL 350 or %l0, PSR_PIL, %g2 351 sethi %hi(CC_ICLR), %o0 352 sethi %hi(1 << 14), %o1 353 or %o0, %lo(CC_ICLR), %o0 354 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */ 355 wr %g2, 0x0, %psr 356 WRITE_PAUSE 357 wr %g2, PSR_ET, %psr 358 WRITE_PAUSE 359 call smp4d_percpu_timer_interrupt 360 add %sp, STACKFRAME_SZ, %o0 361 wr %l0, PSR_ET, %psr 362 WRITE_PAUSE 363 RESTORE_ALL 364 365 .align 4 366 .globl linux_trap_ipi15_sun4d 367linux_trap_ipi15_sun4d: 368 SAVE_ALL 369 sethi %hi(CC_BASE), %o4 370 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2 371 or %o4, (CC_EREG - CC_BASE), %o0 372 ldda [%o0] ASI_M_MXCC, %o0 373 andcc %o0, %o2, %g0 374 bne 1f 375 sethi %hi(BB_STAT2), %o2 376 lduba [%o2] ASI_M_CTL, %o2 377 andcc %o2, BB_STAT2_MASK, %g0 378 bne 2f 379 or %o4, (CC_ICLR - CC_BASE), %o0 380 sethi %hi(1 << 15), %o1 381 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */ 382 or %l0, PSR_PIL, %l4 383 wr %l4, 0x0, %psr 384 WRITE_PAUSE 385 wr %l4, PSR_ET, %psr 386 WRITE_PAUSE 387 call smp4d_cross_call_irq 388 nop 389 b ret_trap_lockless_ipi 390 clr %l6 391 3921: /* MXCC error */ 3932: /* BB error */ 394 /* Disable PIL 15 */ 395 set CC_IMSK, %l4 396 lduha [%l4] ASI_M_MXCC, %l5 397 sethi %hi(1 << 15), %l7 398 or %l5, %l7, %l5 399 stha %l5, [%l4] ASI_M_MXCC 4001: b,a 1b 401 402#ifdef CONFIG_SPARC_LEON 403 404 .globl smpleon_ticker 405 /* SMP per-cpu ticker interrupts are handled specially. */ 406smpleon_ticker: 407 SAVE_ALL 408 or %l0, PSR_PIL, %g2 409 wr %g2, 0x0, %psr 410 WRITE_PAUSE 411 wr %g2, PSR_ET, %psr 412 WRITE_PAUSE 413 call leon_percpu_timer_interrupt 414 add %sp, STACKFRAME_SZ, %o0 415 wr %l0, PSR_ET, %psr 416 WRITE_PAUSE 417 RESTORE_ALL 418 419 .align 4 420 .globl linux_trap_ipi15_leon 421linux_trap_ipi15_leon: 422 SAVE_ALL 423 or %l0, PSR_PIL, %l4 424 wr %l4, 0x0, %psr 425 WRITE_PAUSE 426 wr %l4, PSR_ET, %psr 427 WRITE_PAUSE 428 call leon_cross_call_irq 429 nop 430 b ret_trap_lockless_ipi 431 clr %l6 432 433#endif /* CONFIG_SPARC_LEON */ 434 435#endif /* CONFIG_SMP */ 436 437 /* This routine handles illegal instructions and privileged 438 * instruction attempts from user code. 439 */ 440 .align 4 441 .globl bad_instruction 442bad_instruction: 443 sethi %hi(0xc1f80000), %l4 444 ld [%l1], %l5 445 sethi %hi(0x81d80000), %l7 446 and %l5, %l4, %l5 447 cmp %l5, %l7 448 be 1f 449 SAVE_ALL 450 451 wr %l0, PSR_ET, %psr ! re-enable traps 452 WRITE_PAUSE 453 454 add %sp, STACKFRAME_SZ, %o0 455 mov %l1, %o1 456 mov %l2, %o2 457 call do_illegal_instruction 458 mov %l0, %o3 459 460 RESTORE_ALL 461 4621: /* unimplemented flush - just skip */ 463 jmpl %l2, %g0 464 rett %l2 + 4 465 466 .align 4 467 .globl priv_instruction 468priv_instruction: 469 SAVE_ALL 470 471 wr %l0, PSR_ET, %psr 472 WRITE_PAUSE 473 474 add %sp, STACKFRAME_SZ, %o0 475 mov %l1, %o1 476 mov %l2, %o2 477 call do_priv_instruction 478 mov %l0, %o3 479 480 RESTORE_ALL 481 482 /* This routine handles unaligned data accesses. */ 483 .align 4 484 .globl mna_handler 485mna_handler: 486 andcc %l0, PSR_PS, %g0 487 be mna_fromuser 488 nop 489 490 SAVE_ALL 491 492 wr %l0, PSR_ET, %psr 493 WRITE_PAUSE 494 495 ld [%l1], %o1 496 call kernel_unaligned_trap 497 add %sp, STACKFRAME_SZ, %o0 498 499 RESTORE_ALL 500 501mna_fromuser: 502 SAVE_ALL 503 504 wr %l0, PSR_ET, %psr ! re-enable traps 505 WRITE_PAUSE 506 507 ld [%l1], %o1 508 call user_unaligned_trap 509 add %sp, STACKFRAME_SZ, %o0 510 511 RESTORE_ALL 512 513 /* This routine handles floating point disabled traps. */ 514 .align 4 515 .globl fpd_trap_handler 516fpd_trap_handler: 517 SAVE_ALL 518 519 wr %l0, PSR_ET, %psr ! re-enable traps 520 WRITE_PAUSE 521 522 add %sp, STACKFRAME_SZ, %o0 523 mov %l1, %o1 524 mov %l2, %o2 525 call do_fpd_trap 526 mov %l0, %o3 527 528 RESTORE_ALL 529 530 /* This routine handles Floating Point Exceptions. */ 531 .align 4 532 .globl fpe_trap_handler 533fpe_trap_handler: 534 set fpsave_magic, %l5 535 cmp %l1, %l5 536 be 1f 537 sethi %hi(fpsave), %l5 538 or %l5, %lo(fpsave), %l5 539 cmp %l1, %l5 540 bne 2f 541 sethi %hi(fpsave_catch2), %l5 542 or %l5, %lo(fpsave_catch2), %l5 543 wr %l0, 0x0, %psr 544 WRITE_PAUSE 545 jmp %l5 546 rett %l5 + 4 5471: 548 sethi %hi(fpsave_catch), %l5 549 or %l5, %lo(fpsave_catch), %l5 550 wr %l0, 0x0, %psr 551 WRITE_PAUSE 552 jmp %l5 553 rett %l5 + 4 554 5552: 556 SAVE_ALL 557 558 wr %l0, PSR_ET, %psr ! re-enable traps 559 WRITE_PAUSE 560 561 add %sp, STACKFRAME_SZ, %o0 562 mov %l1, %o1 563 mov %l2, %o2 564 call do_fpe_trap 565 mov %l0, %o3 566 567 RESTORE_ALL 568 569 /* This routine handles Tag Overflow Exceptions. */ 570 .align 4 571 .globl do_tag_overflow 572do_tag_overflow: 573 SAVE_ALL 574 575 wr %l0, PSR_ET, %psr ! re-enable traps 576 WRITE_PAUSE 577 578 add %sp, STACKFRAME_SZ, %o0 579 mov %l1, %o1 580 mov %l2, %o2 581 call handle_tag_overflow 582 mov %l0, %o3 583 584 RESTORE_ALL 585 586 /* This routine handles Watchpoint Exceptions. */ 587 .align 4 588 .globl do_watchpoint 589do_watchpoint: 590 SAVE_ALL 591 592 wr %l0, PSR_ET, %psr ! re-enable traps 593 WRITE_PAUSE 594 595 add %sp, STACKFRAME_SZ, %o0 596 mov %l1, %o1 597 mov %l2, %o2 598 call handle_watchpoint 599 mov %l0, %o3 600 601 RESTORE_ALL 602 603 /* This routine handles Register Access Exceptions. */ 604 .align 4 605 .globl do_reg_access 606do_reg_access: 607 SAVE_ALL 608 609 wr %l0, PSR_ET, %psr ! re-enable traps 610 WRITE_PAUSE 611 612 add %sp, STACKFRAME_SZ, %o0 613 mov %l1, %o1 614 mov %l2, %o2 615 call handle_reg_access 616 mov %l0, %o3 617 618 RESTORE_ALL 619 620 /* This routine handles Co-Processor Disabled Exceptions. */ 621 .align 4 622 .globl do_cp_disabled 623do_cp_disabled: 624 SAVE_ALL 625 626 wr %l0, PSR_ET, %psr ! re-enable traps 627 WRITE_PAUSE 628 629 add %sp, STACKFRAME_SZ, %o0 630 mov %l1, %o1 631 mov %l2, %o2 632 call handle_cp_disabled 633 mov %l0, %o3 634 635 RESTORE_ALL 636 637 /* This routine handles Co-Processor Exceptions. */ 638 .align 4 639 .globl do_cp_exception 640do_cp_exception: 641 SAVE_ALL 642 643 wr %l0, PSR_ET, %psr ! re-enable traps 644 WRITE_PAUSE 645 646 add %sp, STACKFRAME_SZ, %o0 647 mov %l1, %o1 648 mov %l2, %o2 649 call handle_cp_exception 650 mov %l0, %o3 651 652 RESTORE_ALL 653 654 /* This routine handles Hardware Divide By Zero Exceptions. */ 655 .align 4 656 .globl do_hw_divzero 657do_hw_divzero: 658 SAVE_ALL 659 660 wr %l0, PSR_ET, %psr ! re-enable traps 661 WRITE_PAUSE 662 663 add %sp, STACKFRAME_SZ, %o0 664 mov %l1, %o1 665 mov %l2, %o2 666 call handle_hw_divzero 667 mov %l0, %o3 668 669 RESTORE_ALL 670 671 .align 4 672 .globl do_flush_windows 673do_flush_windows: 674 SAVE_ALL 675 676 wr %l0, PSR_ET, %psr 677 WRITE_PAUSE 678 679 andcc %l0, PSR_PS, %g0 680 bne dfw_kernel 681 nop 682 683 call flush_user_windows 684 nop 685 686 /* Advance over the trap instruction. */ 687 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 688 add %l1, 0x4, %l2 689 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 690 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 691 692 RESTORE_ALL 693 694 .globl flush_patch_one 695 696 /* We get these for debugging routines using __builtin_return_address() */ 697dfw_kernel: 698flush_patch_one: 699 FLUSH_ALL_KERNEL_WINDOWS 700 701 /* Advance over the trap instruction. */ 702 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 703 add %l1, 0x4, %l2 704 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 705 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 706 707 RESTORE_ALL 708 709 /* The getcc software trap. The user wants the condition codes from 710 * the %psr in register %g1. 711 */ 712 713 .align 4 714 .globl getcc_trap_handler 715getcc_trap_handler: 716 srl %l0, 20, %g1 ! give user 717 and %g1, 0xf, %g1 ! only ICC bits in %psr 718 jmp %l2 ! advance over trap instruction 719 rett %l2 + 0x4 ! like this... 720 721 /* The setcc software trap. The user has condition codes in %g1 722 * that it would like placed in the %psr. Be careful not to flip 723 * any unintentional bits! 724 */ 725 726 .align 4 727 .globl setcc_trap_handler 728setcc_trap_handler: 729 sll %g1, 0x14, %l4 730 set PSR_ICC, %l5 731 andn %l0, %l5, %l0 ! clear ICC bits in %psr 732 and %l4, %l5, %l4 ! clear non-ICC bits in user value 733 or %l4, %l0, %l4 ! or them in... mix mix mix 734 735 wr %l4, 0x0, %psr ! set new %psr 736 WRITE_PAUSE ! TI scumbags... 737 738 jmp %l2 ! advance over trap instruction 739 rett %l2 + 0x4 ! like this... 740 741 .align 4 742 .globl linux_trap_nmi_sun4c 743linux_trap_nmi_sun4c: 744 SAVE_ALL 745 746 /* Ugh, we need to clear the IRQ line. This is now 747 * a very sun4c specific trap handler... 748 */ 749 sethi %hi(interrupt_enable), %l5 750 ld [%l5 + %lo(interrupt_enable)], %l5 751 ldub [%l5], %l6 752 andn %l6, INTS_ENAB, %l6 753 stb %l6, [%l5] 754 755 /* Now it is safe to re-enable traps without recursion. */ 756 or %l0, PSR_PIL, %l0 757 wr %l0, PSR_ET, %psr 758 WRITE_PAUSE 759 760 /* Now call the c-code with the pt_regs frame ptr and the 761 * memory error registers as arguments. The ordering chosen 762 * here is due to unlatching semantics. 763 */ 764 sethi %hi(AC_SYNC_ERR), %o0 765 add %o0, 0x4, %o0 766 lda [%o0] ASI_CONTROL, %o2 ! sync vaddr 767 sub %o0, 0x4, %o0 768 lda [%o0] ASI_CONTROL, %o1 ! sync error 769 add %o0, 0xc, %o0 770 lda [%o0] ASI_CONTROL, %o4 ! async vaddr 771 sub %o0, 0x4, %o0 772 lda [%o0] ASI_CONTROL, %o3 ! async error 773 call sparc_lvl15_nmi 774 add %sp, STACKFRAME_SZ, %o0 775 776 RESTORE_ALL 777 778 .align 4 779 .globl invalid_segment_patch1_ff 780 .globl invalid_segment_patch2_ff 781invalid_segment_patch1_ff: cmp %l4, 0xff 782invalid_segment_patch2_ff: mov 0xff, %l3 783 784 .align 4 785 .globl invalid_segment_patch1_1ff 786 .globl invalid_segment_patch2_1ff 787invalid_segment_patch1_1ff: cmp %l4, 0x1ff 788invalid_segment_patch2_1ff: mov 0x1ff, %l3 789 790 .align 4 791 .globl num_context_patch1_16, num_context_patch2_16 792num_context_patch1_16: mov 0x10, %l7 793num_context_patch2_16: mov 0x10, %l7 794 795 .align 4 796 .globl vac_linesize_patch_32 797vac_linesize_patch_32: subcc %l7, 32, %l7 798 799 .align 4 800 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on 801 802/* 803 * Ugly, but we cant use hardware flushing on the sun4 and we'd require 804 * two instructions (Anton) 805 */ 806vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 807 808vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG 809 810 .globl invalid_segment_patch1, invalid_segment_patch2 811 .globl num_context_patch1 812 .globl vac_linesize_patch, vac_hwflush_patch1 813 .globl vac_hwflush_patch2 814 815 .align 4 816 .globl sun4c_fault 817 818! %l0 = %psr 819! %l1 = %pc 820! %l2 = %npc 821! %l3 = %wim 822! %l7 = 1 for textfault 823! We want error in %l5, vaddr in %l6 824sun4c_fault: 825 sethi %hi(AC_SYNC_ERR), %l4 826 add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6 827 lda [%l6] ASI_CONTROL, %l5 ! Address 828 lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit 829 830 andn %l5, 0xfff, %l5 ! Encode all info into l7 831 srl %l6, 14, %l4 832 833 and %l4, 2, %l4 834 or %l5, %l4, %l4 835 836 or %l4, %l7, %l7 ! l7 = [addr,write,txtfault] 837 838 andcc %l0, PSR_PS, %g0 839 be sun4c_fault_fromuser 840 andcc %l7, 1, %g0 ! Text fault? 841 842 be 1f 843 sethi %hi(KERNBASE), %l4 844 845 mov %l1, %l5 ! PC 846 8471: 848 cmp %l5, %l4 849 blu sun4c_fault_fromuser 850 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4 851 852 /* If the kernel references a bum kernel pointer, or a pte which 853 * points to a non existant page in ram, we will run this code 854 * _forever_ and lock up the machine!!!!! So we must check for 855 * this condition, the AC_SYNC_ERR bits are what we must examine. 856 * Also a parity error would make this happen as well. So we just 857 * check that we are in fact servicing a tlb miss and not some 858 * other type of fault for the kernel. 859 */ 860 andcc %l6, 0x80, %g0 861 be sun4c_fault_fromuser 862 and %l5, %l4, %l5 863 864 /* Test for NULL pte_t * in vmalloc area. */ 865 sethi %hi(VMALLOC_START), %l4 866 cmp %l5, %l4 867 blu,a invalid_segment_patch1 868 lduXa [%l5] ASI_SEGMAP, %l4 869 870 sethi %hi(swapper_pg_dir), %l4 871 srl %l5, SUN4C_PGDIR_SHIFT, %l6 872 or %l4, %lo(swapper_pg_dir), %l4 873 sll %l6, 2, %l6 874 ld [%l4 + %l6], %l4 875 andcc %l4, PAGE_MASK, %g0 876 be sun4c_fault_fromuser 877 lduXa [%l5] ASI_SEGMAP, %l4 878 879invalid_segment_patch1: 880 cmp %l4, 0x7f 881 bne 1f 882 sethi %hi(sun4c_kfree_ring), %l4 883 or %l4, %lo(sun4c_kfree_ring), %l4 884 ld [%l4 + 0x18], %l3 885 deccc %l3 ! do we have a free entry? 886 bcs,a 2f ! no, unmap one. 887 sethi %hi(sun4c_kernel_ring), %l4 888 889 st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries-- 890 891 ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next 892 st %l5, [%l6 + 0x08] ! entry->vaddr = address 893 894 ld [%l6 + 0x00], %l3 ! next = entry->next 895 ld [%l6 + 0x04], %l7 ! entry->prev 896 897 st %l7, [%l3 + 0x04] ! next->prev = entry->prev 898 st %l3, [%l7 + 0x00] ! entry->prev->next = next 899 900 sethi %hi(sun4c_kernel_ring), %l4 901 or %l4, %lo(sun4c_kernel_ring), %l4 902 ! head = &sun4c_kernel_ring.ringhd 903 904 ld [%l4 + 0x00], %l7 ! head->next 905 906 st %l4, [%l6 + 0x04] ! entry->prev = head 907 st %l7, [%l6 + 0x00] ! entry->next = head->next 908 st %l6, [%l7 + 0x04] ! head->next->prev = entry 909 910 st %l6, [%l4 + 0x00] ! head->next = entry 911 912 ld [%l4 + 0x18], %l3 913 inc %l3 ! sun4c_kernel_ring.num_entries++ 914 st %l3, [%l4 + 0x18] 915 b 4f 916 ld [%l6 + 0x08], %l5 917 9182: 919 or %l4, %lo(sun4c_kernel_ring), %l4 920 ! head = &sun4c_kernel_ring.ringhd 921 922 ld [%l4 + 0x04], %l6 ! entry = head->prev 923 924 ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr 925 926 ! Flush segment from the cache. 927 sethi %hi((64 * 1024)), %l7 9289: 929vac_hwflush_patch1: 930vac_linesize_patch: 931 subcc %l7, 16, %l7 932 bne 9b 933vac_hwflush_patch2: 934 sta %g0, [%l3 + %l7] ASI_FLUSHSEG 935 936 st %l5, [%l6 + 0x08] ! entry->vaddr = address 937 938 ld [%l6 + 0x00], %l5 ! next = entry->next 939 ld [%l6 + 0x04], %l7 ! entry->prev 940 941 st %l7, [%l5 + 0x04] ! next->prev = entry->prev 942 st %l5, [%l7 + 0x00] ! entry->prev->next = next 943 st %l4, [%l6 + 0x04] ! entry->prev = head 944 945 ld [%l4 + 0x00], %l7 ! head->next 946 947 st %l7, [%l6 + 0x00] ! entry->next = head->next 948 st %l6, [%l7 + 0x04] ! head->next->prev = entry 949 st %l6, [%l4 + 0x00] ! head->next = entry 950 951 mov %l3, %l5 ! address = tmp 952 9534: 954num_context_patch1: 955 mov 0x08, %l7 956 957 ld [%l6 + 0x08], %l4 958 ldub [%l6 + 0x0c], %l3 959 or %l4, %l3, %l4 ! encode new vaddr/pseg into l4 960 961 sethi %hi(AC_CONTEXT), %l3 962 lduba [%l3] ASI_CONTROL, %l6 963 964 /* Invalidate old mapping, instantiate new mapping, 965 * for each context. Registers l6/l7 are live across 966 * this loop. 967 */ 9683: deccc %l7 969 sethi %hi(AC_CONTEXT), %l3 970 stba %l7, [%l3] ASI_CONTROL 971invalid_segment_patch2: 972 mov 0x7f, %l3 973 stXa %l3, [%l5] ASI_SEGMAP 974 andn %l4, 0x1ff, %l3 975 bne 3b 976 stXa %l4, [%l3] ASI_SEGMAP 977 978 sethi %hi(AC_CONTEXT), %l3 979 stba %l6, [%l3] ASI_CONTROL 980 981 andn %l4, 0x1ff, %l5 982 9831: 984 sethi %hi(VMALLOC_START), %l4 985 cmp %l5, %l4 986 987 bgeu 1f 988 mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7 989 990 sethi %hi(KERNBASE), %l6 991 992 sub %l5, %l6, %l4 993 srl %l4, PAGE_SHIFT, %l4 994 sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3 995 or %l3, %l4, %l3 996 997 sethi %hi(PAGE_SIZE), %l4 998 9992: 1000 sta %l3, [%l5] ASI_PTE 1001 deccc %l7 1002 inc %l3 1003 bne 2b 1004 add %l5, %l4, %l5 1005 1006 b 7f 1007 sethi %hi(sun4c_kernel_faults), %l4 1008 10091: 1010 srl %l5, SUN4C_PGDIR_SHIFT, %l3 1011 sethi %hi(swapper_pg_dir), %l4 1012 or %l4, %lo(swapper_pg_dir), %l4 1013 sll %l3, 2, %l3 1014 ld [%l4 + %l3], %l4 1015 and %l4, PAGE_MASK, %l4 1016 1017 srl %l5, (PAGE_SHIFT - 2), %l6 1018 and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6 1019 add %l6, %l4, %l6 1020 1021 sethi %hi(PAGE_SIZE), %l4 1022 10232: 1024 ld [%l6], %l3 1025 deccc %l7 1026 sta %l3, [%l5] ASI_PTE 1027 add %l6, 0x4, %l6 1028 bne 2b 1029 add %l5, %l4, %l5 1030 1031 sethi %hi(sun4c_kernel_faults), %l4 10327: 1033 ld [%l4 + %lo(sun4c_kernel_faults)], %l3 1034 inc %l3 1035 st %l3, [%l4 + %lo(sun4c_kernel_faults)] 1036 1037 /* Restore condition codes */ 1038 wr %l0, 0x0, %psr 1039 WRITE_PAUSE 1040 jmp %l1 1041 rett %l2 1042 1043sun4c_fault_fromuser: 1044 SAVE_ALL 1045 nop 1046 1047 mov %l7, %o1 ! Decode the info from %l7 1048 mov %l7, %o2 1049 and %o1, 1, %o1 ! arg2 = text_faultp 1050 mov %l7, %o3 1051 and %o2, 2, %o2 ! arg3 = writep 1052 andn %o3, 0xfff, %o3 ! arg4 = faulting address 1053 1054 wr %l0, PSR_ET, %psr 1055 WRITE_PAUSE 1056 1057 call do_sun4c_fault 1058 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 1059 1060 RESTORE_ALL 1061 1062 .align 4 1063 .globl srmmu_fault 1064srmmu_fault: 1065 mov 0x400, %l5 1066 mov 0x300, %l4 1067 1068 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first 1069 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last 1070 1071 andn %l6, 0xfff, %l6 1072 srl %l5, 6, %l5 ! and encode all info into l7 1073 1074 and %l5, 2, %l5 1075 or %l5, %l6, %l6 1076 1077 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault] 1078 1079 SAVE_ALL 1080 1081 mov %l7, %o1 1082 mov %l7, %o2 1083 and %o1, 1, %o1 ! arg2 = text_faultp 1084 mov %l7, %o3 1085 and %o2, 2, %o2 ! arg3 = writep 1086 andn %o3, 0xfff, %o3 ! arg4 = faulting address 1087 1088 wr %l0, PSR_ET, %psr 1089 WRITE_PAUSE 1090 1091 call do_sparc_fault 1092 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 1093 1094 RESTORE_ALL 1095 1096 .align 4 1097 .globl sys_nis_syscall 1098sys_nis_syscall: 1099 mov %o7, %l5 1100 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1101 call c_sys_nis_syscall 1102 mov %l5, %o7 1103 1104 .align 4 1105 .globl sys_execve 1106sys_execve: 1107 mov %o7, %l5 1108 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1109 call sparc_execve 1110 mov %l5, %o7 1111 1112 .globl sunos_execv 1113sunos_execv: 1114 st %g0, [%sp + STACKFRAME_SZ + PT_I2] 1115 1116 call sparc_execve 1117 add %sp, STACKFRAME_SZ, %o0 1118 1119 b ret_sys_call 1120 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 1121 1122 .align 4 1123 .globl sys_sparc_pipe 1124sys_sparc_pipe: 1125 mov %o7, %l5 1126 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg 1127 call sparc_pipe 1128 mov %l5, %o7 1129 1130 .align 4 1131 .globl sys_sigaltstack 1132sys_sigaltstack: 1133 mov %o7, %l5 1134 mov %fp, %o2 1135 call do_sigaltstack 1136 mov %l5, %o7 1137 1138 .align 4 1139 .globl sys_sigstack 1140sys_sigstack: 1141 mov %o7, %l5 1142 mov %fp, %o2 1143 call do_sys_sigstack 1144 mov %l5, %o7 1145 1146 .align 4 1147 .globl sys_sigreturn 1148sys_sigreturn: 1149 call do_sigreturn 1150 add %sp, STACKFRAME_SZ, %o0 1151 1152 ld [%curptr + TI_FLAGS], %l5 1153 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1154 be 1f 1155 nop 1156 1157 call syscall_trace 1158 nop 1159 11601: 1161 /* We don't want to muck with user registers like a 1162 * normal syscall, just return. 1163 */ 1164 RESTORE_ALL 1165 1166 .align 4 1167 .globl sys_rt_sigreturn 1168sys_rt_sigreturn: 1169 call do_rt_sigreturn 1170 add %sp, STACKFRAME_SZ, %o0 1171 1172 ld [%curptr + TI_FLAGS], %l5 1173 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1174 be 1f 1175 nop 1176 1177 add %sp, STACKFRAME_SZ, %o0 1178 call syscall_trace 1179 mov 1, %o1 1180 11811: 1182 /* We are returning to a signal handler. */ 1183 RESTORE_ALL 1184 1185 .align 4 1186 .globl sys_fork, flush_patch_two 1187sys_fork: 1188 mov %o7, %l5 1189flush_patch_two: 1190 FLUSH_ALL_KERNEL_WINDOWS; 1191 ld [%curptr + TI_TASK], %o4 1192 rd %psr, %g4 1193 WRITE_PAUSE 1194 mov SIGCHLD, %o0 ! arg0: clone flags 1195 rd %wim, %g5 1196 WRITE_PAUSE 1197 mov %fp, %o1 ! arg1: usp 1198 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1199 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr 1200 mov 0, %o3 1201 call sparc_do_fork 1202 mov %l5, %o7 1203 1204 /* Whee, kernel threads! */ 1205 .globl sys_clone, flush_patch_three 1206sys_clone: 1207 mov %o7, %l5 1208flush_patch_three: 1209 FLUSH_ALL_KERNEL_WINDOWS; 1210 ld [%curptr + TI_TASK], %o4 1211 rd %psr, %g4 1212 WRITE_PAUSE 1213 1214 /* arg0,1: flags,usp -- loaded already */ 1215 cmp %o1, 0x0 ! Is new_usp NULL? 1216 rd %wim, %g5 1217 WRITE_PAUSE 1218 be,a 1f 1219 mov %fp, %o1 ! yes, use callers usp 1220 andn %o1, 7, %o1 ! no, align to 8 bytes 12211: 1222 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1223 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr 1224 mov 0, %o3 1225 call sparc_do_fork 1226 mov %l5, %o7 1227 1228 /* Whee, real vfork! */ 1229 .globl sys_vfork, flush_patch_four 1230sys_vfork: 1231flush_patch_four: 1232 FLUSH_ALL_KERNEL_WINDOWS; 1233 ld [%curptr + TI_TASK], %o4 1234 rd %psr, %g4 1235 WRITE_PAUSE 1236 rd %wim, %g5 1237 WRITE_PAUSE 1238 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] 1239 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 1240 mov %fp, %o1 1241 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 1242 sethi %hi(sparc_do_fork), %l1 1243 mov 0, %o3 1244 jmpl %l1 + %lo(sparc_do_fork), %g0 1245 add %sp, STACKFRAME_SZ, %o2 1246 1247 .align 4 1248linux_sparc_ni_syscall: 1249 sethi %hi(sys_ni_syscall), %l7 1250 b syscall_is_too_hard 1251 or %l7, %lo(sys_ni_syscall), %l7 1252 1253linux_fast_syscall: 1254 andn %l7, 3, %l7 1255 mov %i0, %o0 1256 mov %i1, %o1 1257 mov %i2, %o2 1258 jmpl %l7 + %g0, %g0 1259 mov %i3, %o3 1260 1261linux_syscall_trace: 1262 add %sp, STACKFRAME_SZ, %o0 1263 call syscall_trace 1264 mov 0, %o1 1265 cmp %o0, 0 1266 bne 3f 1267 mov -ENOSYS, %o0 1268 mov %i0, %o0 1269 mov %i1, %o1 1270 mov %i2, %o2 1271 mov %i3, %o3 1272 b 2f 1273 mov %i4, %o4 1274 1275 .globl ret_from_fork 1276ret_from_fork: 1277 call schedule_tail 1278 mov %g3, %o0 1279 b ret_sys_call 1280 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 1281 1282 /* Linux native system calls enter here... */ 1283 .align 4 1284 .globl linux_sparc_syscall 1285linux_sparc_syscall: 1286 sethi %hi(PSR_SYSCALL), %l4 1287 or %l0, %l4, %l0 1288 /* Direct access to user regs, must faster. */ 1289 cmp %g1, NR_syscalls 1290 bgeu linux_sparc_ni_syscall 1291 sll %g1, 2, %l4 1292 ld [%l7 + %l4], %l7 1293 andcc %l7, 1, %g0 1294 bne linux_fast_syscall 1295 /* Just do first insn from SAVE_ALL in the delay slot */ 1296 1297syscall_is_too_hard: 1298 SAVE_ALL_HEAD 1299 rd %wim, %l3 1300 1301 wr %l0, PSR_ET, %psr 1302 mov %i0, %o0 1303 mov %i1, %o1 1304 mov %i2, %o2 1305 1306 ld [%curptr + TI_FLAGS], %l5 1307 mov %i3, %o3 1308 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1309 mov %i4, %o4 1310 bne linux_syscall_trace 1311 mov %i0, %l5 13122: 1313 call %l7 1314 mov %i5, %o5 1315 13163: 1317 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1318 1319ret_sys_call: 1320 ld [%curptr + TI_FLAGS], %l6 1321 cmp %o0, -ERESTART_RESTARTBLOCK 1322 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 1323 set PSR_C, %g2 1324 bgeu 1f 1325 andcc %l6, _TIF_SYSCALL_TRACE, %g0 1326 1327 /* System call success, clear Carry condition code. */ 1328 andn %g3, %g2, %g3 1329 clr %l6 1330 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1331 bne linux_syscall_trace2 1332 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1333 add %l1, 0x4, %l2 /* npc = npc+4 */ 1334 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1335 b ret_trap_entry 1336 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 13371: 1338 /* System call failure, set Carry condition code. 1339 * Also, get abs(errno) to return to the process. 1340 */ 1341 sub %g0, %o0, %o0 1342 or %g3, %g2, %g3 1343 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1344 mov 1, %l6 1345 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] 1346 bne linux_syscall_trace2 1347 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ 1348 add %l1, 0x4, %l2 /* npc = npc+4 */ 1349 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1350 b ret_trap_entry 1351 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1352 1353linux_syscall_trace2: 1354 add %sp, STACKFRAME_SZ, %o0 1355 mov 1, %o1 1356 call syscall_trace 1357 add %l1, 0x4, %l2 /* npc = npc+4 */ 1358 st %l1, [%sp + STACKFRAME_SZ + PT_PC] 1359 b ret_trap_entry 1360 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1361 1362 1363/* Saving and restoring the FPU state is best done from lowlevel code. 1364 * 1365 * void fpsave(unsigned long *fpregs, unsigned long *fsr, 1366 * void *fpqueue, unsigned long *fpqdepth) 1367 */ 1368 1369 .globl fpsave 1370fpsave: 1371 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state 1372 ld [%o1], %g1 1373 set 0x2000, %g4 1374 andcc %g1, %g4, %g0 1375 be 2f 1376 mov 0, %g2 1377 1378 /* We have an fpqueue to save. */ 13791: 1380 std %fq, [%o2] 1381fpsave_magic: 1382 st %fsr, [%o1] 1383 ld [%o1], %g3 1384 andcc %g3, %g4, %g0 1385 add %g2, 1, %g2 1386 bne 1b 1387 add %o2, 8, %o2 1388 13892: 1390 st %g2, [%o3] 1391 1392 std %f0, [%o0 + 0x00] 1393 std %f2, [%o0 + 0x08] 1394 std %f4, [%o0 + 0x10] 1395 std %f6, [%o0 + 0x18] 1396 std %f8, [%o0 + 0x20] 1397 std %f10, [%o0 + 0x28] 1398 std %f12, [%o0 + 0x30] 1399 std %f14, [%o0 + 0x38] 1400 std %f16, [%o0 + 0x40] 1401 std %f18, [%o0 + 0x48] 1402 std %f20, [%o0 + 0x50] 1403 std %f22, [%o0 + 0x58] 1404 std %f24, [%o0 + 0x60] 1405 std %f26, [%o0 + 0x68] 1406 std %f28, [%o0 + 0x70] 1407 retl 1408 std %f30, [%o0 + 0x78] 1409 1410 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd 1411 * code for pointing out this possible deadlock, while we save state 1412 * above we could trap on the fsr store so our low level fpu trap 1413 * code has to know how to deal with this. 1414 */ 1415fpsave_catch: 1416 b fpsave_magic + 4 1417 st %fsr, [%o1] 1418 1419fpsave_catch2: 1420 b fpsave + 4 1421 st %fsr, [%o1] 1422 1423 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ 1424 1425 .globl fpload 1426fpload: 1427 ldd [%o0 + 0x00], %f0 1428 ldd [%o0 + 0x08], %f2 1429 ldd [%o0 + 0x10], %f4 1430 ldd [%o0 + 0x18], %f6 1431 ldd [%o0 + 0x20], %f8 1432 ldd [%o0 + 0x28], %f10 1433 ldd [%o0 + 0x30], %f12 1434 ldd [%o0 + 0x38], %f14 1435 ldd [%o0 + 0x40], %f16 1436 ldd [%o0 + 0x48], %f18 1437 ldd [%o0 + 0x50], %f20 1438 ldd [%o0 + 0x58], %f22 1439 ldd [%o0 + 0x60], %f24 1440 ldd [%o0 + 0x68], %f26 1441 ldd [%o0 + 0x70], %f28 1442 ldd [%o0 + 0x78], %f30 1443 ld [%o1], %fsr 1444 retl 1445 nop 1446 1447 /* __ndelay and __udelay take two arguments: 1448 * 0 - nsecs or usecs to delay 1449 * 1 - per_cpu udelay_val (loops per jiffy) 1450 * 1451 * Note that ndelay gives HZ times higher resolution but has a 10ms 1452 * limit. udelay can handle up to 1s. 1453 */ 1454 .globl __ndelay 1455__ndelay: 1456 save %sp, -STACKFRAME_SZ, %sp 1457 mov %i0, %o0 1458 call .umul ! round multiplier up so large ns ok 1459 mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) 1460 call .umul 1461 mov %i1, %o1 ! udelay_val 1462 ba delay_continue 1463 mov %o1, %o0 ! >>32 later for better resolution 1464 1465 .globl __udelay 1466__udelay: 1467 save %sp, -STACKFRAME_SZ, %sp 1468 mov %i0, %o0 1469 sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok 1470 call .umul 1471 or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 1472 call .umul 1473 mov %i1, %o1 ! udelay_val 1474 sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, 1475 or %g0, %lo(0x028f4b62), %l0 1476 addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 1477 bcs,a 3f 1478 add %o1, 0x01, %o1 14793: 1480 call .umul 1481 mov HZ, %o0 ! >>32 earlier for wider range 1482 1483delay_continue: 1484 cmp %o0, 0x0 14851: 1486 bne 1b 1487 subcc %o0, 1, %o0 1488 1489 ret 1490 restore 1491 1492 /* Handle a software breakpoint */ 1493 /* We have to inform parent that child has stopped */ 1494 .align 4 1495 .globl breakpoint_trap 1496breakpoint_trap: 1497 rd %wim,%l3 1498 SAVE_ALL 1499 wr %l0, PSR_ET, %psr 1500 WRITE_PAUSE 1501 1502 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls 1503 call sparc_breakpoint 1504 add %sp, STACKFRAME_SZ, %o0 1505 1506 RESTORE_ALL 1507 1508#ifdef CONFIG_KGDB 1509 .align 4 1510 .globl kgdb_trap_low 1511 .type kgdb_trap_low,#function 1512kgdb_trap_low: 1513 rd %wim,%l3 1514 SAVE_ALL 1515 wr %l0, PSR_ET, %psr 1516 WRITE_PAUSE 1517 1518 call kgdb_trap 1519 add %sp, STACKFRAME_SZ, %o0 1520 1521 RESTORE_ALL 1522 .size kgdb_trap_low,.-kgdb_trap_low 1523#endif 1524 1525 .align 4 1526 .globl flush_patch_exception 1527flush_patch_exception: 1528 FLUSH_ALL_KERNEL_WINDOWS; 1529 ldd [%o0], %o6 1530 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h 1531 mov 1, %g1 ! signal EFAULT condition 1532 1533 .align 4 1534 .globl kill_user_windows, kuw_patch1_7win 1535 .globl kuw_patch1 1536kuw_patch1_7win: sll %o3, 6, %o3 1537 1538 /* No matter how much overhead this routine has in the worst 1539 * case scenerio, it is several times better than taking the 1540 * traps with the old method of just doing flush_user_windows(). 1541 */ 1542kill_user_windows: 1543 ld [%g6 + TI_UWINMASK], %o0 ! get current umask 1544 orcc %g0, %o0, %g0 ! if no bits set, we are done 1545 be 3f ! nothing to do 1546 rd %psr, %o5 ! must clear interrupts 1547 or %o5, PSR_PIL, %o4 ! or else that could change 1548 wr %o4, 0x0, %psr ! the uwinmask state 1549 WRITE_PAUSE ! burn them cycles 15501: 1551 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state 1552 orcc %g0, %o0, %g0 ! did an interrupt come in? 1553 be 4f ! yep, we are done 1554 rd %wim, %o3 ! get current wim 1555 srl %o3, 1, %o4 ! simulate a save 1556kuw_patch1: 1557 sll %o3, 7, %o3 ! compute next wim 1558 or %o4, %o3, %o3 ! result 1559 andncc %o0, %o3, %o0 ! clean this bit in umask 1560 bne kuw_patch1 ! not done yet 1561 srl %o3, 1, %o4 ! begin another save simulation 1562 wr %o3, 0x0, %wim ! set the new wim 1563 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask 15644: 1565 wr %o5, 0x0, %psr ! re-enable interrupts 1566 WRITE_PAUSE ! burn baby burn 15673: 1568 retl ! return 1569 st %g0, [%g6 + TI_W_SAVED] ! no windows saved 1570 1571 .align 4 1572 .globl restore_current 1573restore_current: 1574 LOAD_CURRENT(g6, o0) 1575 retl 1576 nop 1577 1578#ifdef CONFIG_PCI 1579#include <asm/pcic.h> 1580 1581 .align 4 1582 .globl linux_trap_ipi15_pcic 1583linux_trap_ipi15_pcic: 1584 rd %wim, %l3 1585 SAVE_ALL 1586 1587 /* 1588 * First deactivate NMI 1589 * or we cannot drop ET, cannot get window spill traps. 1590 * The busy loop is necessary because the PIO error 1591 * sometimes does not go away quickly and we trap again. 1592 */ 1593 sethi %hi(pcic_regs), %o1 1594 ld [%o1 + %lo(pcic_regs)], %o2 1595 1596 ! Get pending status for printouts later. 1597 ld [%o2 + PCI_SYS_INT_PENDING], %o0 1598 1599 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 1600 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR] 16011: 1602 ld [%o2 + PCI_SYS_INT_PENDING], %o1 1603 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 1604 bne 1b 1605 nop 1606 1607 or %l0, PSR_PIL, %l4 1608 wr %l4, 0x0, %psr 1609 WRITE_PAUSE 1610 wr %l4, PSR_ET, %psr 1611 WRITE_PAUSE 1612 1613 call pcic_nmi 1614 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs 1615 RESTORE_ALL 1616 1617 .globl pcic_nmi_trap_patch 1618pcic_nmi_trap_patch: 1619 sethi %hi(linux_trap_ipi15_pcic), %l3 1620 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 1621 rd %psr, %l0 1622 .word 0 1623 1624#endif /* CONFIG_PCI */ 1625 1626 .globl flushw_all 1627flushw_all: 1628 save %sp, -0x40, %sp 1629 save %sp, -0x40, %sp 1630 save %sp, -0x40, %sp 1631 save %sp, -0x40, %sp 1632 save %sp, -0x40, %sp 1633 save %sp, -0x40, %sp 1634 save %sp, -0x40, %sp 1635 restore 1636 restore 1637 restore 1638 restore 1639 restore 1640 restore 1641 ret 1642 restore 1643 1644/* End of entry.S */ 1645