1/* 2 * ultra.S: Don't expand these all over the place... 3 * 4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net) 5 */ 6 7#include <asm/asi.h> 8#include <asm/pgtable.h> 9#include <asm/page.h> 10#include <asm/spitfire.h> 11#include <asm/mmu_context.h> 12#include <asm/mmu.h> 13#include <asm/pil.h> 14#include <asm/head.h> 15#include <asm/thread_info.h> 16#include <asm/cacheflush.h> 17#include <asm/hypervisor.h> 18#include <asm/cpudata.h> 19 20 /* Basically, most of the Spitfire vs. Cheetah madness 21 * has to do with the fact that Cheetah does not support 22 * IMMU flushes out of the secondary context. Someone needs 23 * to throw a south lake birthday party for the folks 24 * in Microelectronics who refused to fix this shit. 25 */ 26 27 /* This file is meant to be read efficiently by the CPU, not humans. 28 * Staraj sie tego nikomu nie pierdolnac... 29 */ 30 .text 31 .align 32 32 .globl __flush_tlb_mm 33__flush_tlb_mm: /* 18 insns */ 34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ 35 ldxa [%o1] ASI_DMMU, %g2 36 cmp %g2, %o0 37 bne,pn %icc, __spitfire_flush_tlb_mm_slow 38 mov 0x50, %g3 39 stxa %g0, [%g3] ASI_DMMU_DEMAP 40 stxa %g0, [%g3] ASI_IMMU_DEMAP 41 sethi %hi(KERNBASE), %g3 42 flush %g3 43 retl 44 nop 45 nop 46 nop 47 nop 48 nop 49 nop 50 nop 51 nop 52 nop 53 nop 54 55 .align 32 56 .globl __flush_tlb_pending 57__flush_tlb_pending: /* 26 insns */ 58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 59 rdpr %pstate, %g7 60 sllx %o1, 3, %o1 61 andn %g7, PSTATE_IE, %g2 62 wrpr %g2, %pstate 63 mov SECONDARY_CONTEXT, %o4 64 ldxa [%o4] ASI_DMMU, %g2 65 stxa %o0, [%o4] ASI_DMMU 661: sub %o1, (1 << 3), %o1 67 ldx [%o2 + %o1], %o3 68 andcc %o3, 1, %g0 69 andn %o3, 1, %o3 70 be,pn %icc, 2f 71 or %o3, 0x10, %o3 72 stxa %g0, [%o3] ASI_IMMU_DEMAP 732: stxa %g0, [%o3] ASI_DMMU_DEMAP 74 membar #Sync 75 brnz,pt %o1, 1b 76 nop 77 stxa %g2, [%o4] ASI_DMMU 78 sethi %hi(KERNBASE), %o4 79 flush %o4 80 retl 81 wrpr %g7, 0x0, %pstate 82 nop 83 nop 84 nop 85 nop 86 87 .align 32 88 .globl __flush_tlb_kernel_range 89__flush_tlb_kernel_range: /* 16 insns */ 90 /* %o0=start, %o1=end */ 91 cmp %o0, %o1 92 be,pn %xcc, 2f 93 sethi %hi(PAGE_SIZE), %o4 94 sub %o1, %o0, %o3 95 sub %o3, %o4, %o3 96 or %o0, 0x20, %o0 ! Nucleus 971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP 98 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP 99 membar #Sync 100 brnz,pt %o3, 1b 101 sub %o3, %o4, %o3 1022: sethi %hi(KERNBASE), %o3 103 flush %o3 104 retl 105 nop 106 nop 107 108__spitfire_flush_tlb_mm_slow: 109 rdpr %pstate, %g1 110 wrpr %g1, PSTATE_IE, %pstate 111 stxa %o0, [%o1] ASI_DMMU 112 stxa %g0, [%g3] ASI_DMMU_DEMAP 113 stxa %g0, [%g3] ASI_IMMU_DEMAP 114 flush %g6 115 stxa %g2, [%o1] ASI_DMMU 116 sethi %hi(KERNBASE), %o1 117 flush %o1 118 retl 119 wrpr %g1, 0, %pstate 120 121/* 122 * The following code flushes one page_size worth. 123 */ 124 .section .kprobes.text, "ax" 125 .align 32 126 .globl __flush_icache_page 127__flush_icache_page: /* %o0 = phys_page */ 128 srlx %o0, PAGE_SHIFT, %o0 129 sethi %uhi(PAGE_OFFSET), %g1 130 sllx %o0, PAGE_SHIFT, %o0 131 sethi %hi(PAGE_SIZE), %g2 132 sllx %g1, 32, %g1 133 add %o0, %g1, %o0 1341: subcc %g2, 32, %g2 135 bne,pt %icc, 1b 136 flush %o0 + %g2 137 retl 138 nop 139 140#ifdef DCACHE_ALIASING_POSSIBLE 141 142#if (PAGE_SHIFT != 13) 143#error only page shift of 13 is supported by dcache flush 144#endif 145 146#define DTAG_MASK 0x3 147 148 /* This routine is Spitfire specific so the hardcoded 149 * D-cache size and line-size are OK. 150 */ 151 .align 64 152 .globl __flush_dcache_page 153__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 154 sethi %uhi(PAGE_OFFSET), %g1 155 sllx %g1, 32, %g1 156 sub %o0, %g1, %o0 ! physical address 157 srlx %o0, 11, %o0 ! make D-cache TAG 158 sethi %hi(1 << 14), %o2 ! D-cache size 159 sub %o2, (1 << 5), %o2 ! D-cache line size 1601: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG 161 andcc %o3, DTAG_MASK, %g0 ! Valid? 162 be,pn %xcc, 2f ! Nope, branch 163 andn %o3, DTAG_MASK, %o3 ! Clear valid bits 164 cmp %o3, %o0 ! TAG match? 165 bne,pt %xcc, 2f ! Nope, branch 166 nop 167 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG 168 membar #Sync 1692: brnz,pt %o2, 1b 170 sub %o2, (1 << 5), %o2 ! D-cache line size 171 172 /* The I-cache does not snoop local stores so we 173 * better flush that too when necessary. 174 */ 175 brnz,pt %o1, __flush_icache_page 176 sllx %o0, 11, %o0 177 retl 178 nop 179 180#endif /* DCACHE_ALIASING_POSSIBLE */ 181 182 .previous 183 184 /* Cheetah specific versions, patched at boot time. */ 185__cheetah_flush_tlb_mm: /* 19 insns */ 186 rdpr %pstate, %g7 187 andn %g7, PSTATE_IE, %g2 188 wrpr %g2, 0x0, %pstate 189 wrpr %g0, 1, %tl 190 mov PRIMARY_CONTEXT, %o2 191 mov 0x40, %g3 192 ldxa [%o2] ASI_DMMU, %g2 193 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 194 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 195 or %o0, %o1, %o0 /* Preserve nucleus page size fields */ 196 stxa %o0, [%o2] ASI_DMMU 197 stxa %g0, [%g3] ASI_DMMU_DEMAP 198 stxa %g0, [%g3] ASI_IMMU_DEMAP 199 stxa %g2, [%o2] ASI_DMMU 200 sethi %hi(KERNBASE), %o2 201 flush %o2 202 wrpr %g0, 0, %tl 203 retl 204 wrpr %g7, 0x0, %pstate 205 206__cheetah_flush_tlb_pending: /* 27 insns */ 207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 208 rdpr %pstate, %g7 209 sllx %o1, 3, %o1 210 andn %g7, PSTATE_IE, %g2 211 wrpr %g2, 0x0, %pstate 212 wrpr %g0, 1, %tl 213 mov PRIMARY_CONTEXT, %o4 214 ldxa [%o4] ASI_DMMU, %g2 215 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 216 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 217 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ 218 stxa %o0, [%o4] ASI_DMMU 2191: sub %o1, (1 << 3), %o1 220 ldx [%o2 + %o1], %o3 221 andcc %o3, 1, %g0 222 be,pn %icc, 2f 223 andn %o3, 1, %o3 224 stxa %g0, [%o3] ASI_IMMU_DEMAP 2252: stxa %g0, [%o3] ASI_DMMU_DEMAP 226 membar #Sync 227 brnz,pt %o1, 1b 228 nop 229 stxa %g2, [%o4] ASI_DMMU 230 sethi %hi(KERNBASE), %o4 231 flush %o4 232 wrpr %g0, 0, %tl 233 retl 234 wrpr %g7, 0x0, %pstate 235 236#ifdef DCACHE_ALIASING_POSSIBLE 237__cheetah_flush_dcache_page: /* 11 insns */ 238 sethi %uhi(PAGE_OFFSET), %g1 239 sllx %g1, 32, %g1 240 sub %o0, %g1, %o0 241 sethi %hi(PAGE_SIZE), %o4 2421: subcc %o4, (1 << 5), %o4 243 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE 244 membar #Sync 245 bne,pt %icc, 1b 246 nop 247 retl /* I-cache flush never needed on Cheetah, see callers. */ 248 nop 249#endif /* DCACHE_ALIASING_POSSIBLE */ 250 251 /* Hypervisor specific versions, patched at boot time. */ 252__hypervisor_tlb_tl0_error: 253 save %sp, -192, %sp 254 mov %i0, %o0 255 call hypervisor_tlbop_error 256 mov %i1, %o1 257 ret 258 restore 259 260__hypervisor_flush_tlb_mm: /* 10 insns */ 261 mov %o0, %o2 /* ARG2: mmu context */ 262 mov 0, %o0 /* ARG0: CPU lists unimplemented */ 263 mov 0, %o1 /* ARG1: CPU lists unimplemented */ 264 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 265 mov HV_FAST_MMU_DEMAP_CTX, %o5 266 ta HV_FAST_TRAP 267 brnz,pn %o0, __hypervisor_tlb_tl0_error 268 mov HV_FAST_MMU_DEMAP_CTX, %o1 269 retl 270 nop 271 272__hypervisor_flush_tlb_pending: /* 16 insns */ 273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 274 sllx %o1, 3, %g1 275 mov %o2, %g2 276 mov %o0, %g3 2771: sub %g1, (1 << 3), %g1 278 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ 279 mov %g3, %o1 /* ARG1: mmu context */ 280 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 281 srlx %o0, PAGE_SHIFT, %o0 282 sllx %o0, PAGE_SHIFT, %o0 283 ta HV_MMU_UNMAP_ADDR_TRAP 284 brnz,pn %o0, __hypervisor_tlb_tl0_error 285 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 286 brnz,pt %g1, 1b 287 nop 288 retl 289 nop 290 291__hypervisor_flush_tlb_kernel_range: /* 16 insns */ 292 /* %o0=start, %o1=end */ 293 cmp %o0, %o1 294 be,pn %xcc, 2f 295 sethi %hi(PAGE_SIZE), %g3 296 mov %o0, %g1 297 sub %o1, %g1, %g2 298 sub %g2, %g3, %g2 2991: add %g1, %g2, %o0 /* ARG0: virtual address */ 300 mov 0, %o1 /* ARG1: mmu context */ 301 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 302 ta HV_MMU_UNMAP_ADDR_TRAP 303 brnz,pn %o0, __hypervisor_tlb_tl0_error 304 mov HV_MMU_UNMAP_ADDR_TRAP, %o1 305 brnz,pt %g2, 1b 306 sub %g2, %g3, %g2 3072: retl 308 nop 309 310#ifdef DCACHE_ALIASING_POSSIBLE 311__hypervisor_flush_dcache_page: /* 2 insns */ 312 retl 313 nop 314#endif 315 316tlb_patch_one: 3171: lduw [%o1], %g1 318 stw %g1, [%o0] 319 flush %o0 320 subcc %o2, 1, %o2 321 add %o1, 4, %o1 322 bne,pt %icc, 1b 323 add %o0, 4, %o0 324 retl 325 nop 326 327 .globl cheetah_patch_cachetlbops 328cheetah_patch_cachetlbops: 329 save %sp, -128, %sp 330 331 sethi %hi(__flush_tlb_mm), %o0 332 or %o0, %lo(__flush_tlb_mm), %o0 333 sethi %hi(__cheetah_flush_tlb_mm), %o1 334 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 335 call tlb_patch_one 336 mov 19, %o2 337 338 sethi %hi(__flush_tlb_pending), %o0 339 or %o0, %lo(__flush_tlb_pending), %o0 340 sethi %hi(__cheetah_flush_tlb_pending), %o1 341 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 342 call tlb_patch_one 343 mov 27, %o2 344 345#ifdef DCACHE_ALIASING_POSSIBLE 346 sethi %hi(__flush_dcache_page), %o0 347 or %o0, %lo(__flush_dcache_page), %o0 348 sethi %hi(__cheetah_flush_dcache_page), %o1 349 or %o1, %lo(__cheetah_flush_dcache_page), %o1 350 call tlb_patch_one 351 mov 11, %o2 352#endif /* DCACHE_ALIASING_POSSIBLE */ 353 354 ret 355 restore 356 357#ifdef CONFIG_SMP 358 /* These are all called by the slaves of a cross call, at 359 * trap level 1, with interrupts fully disabled. 360 * 361 * Register usage: 362 * %g5 mm->context (all tlb flushes) 363 * %g1 address arg 1 (tlb page and range flushes) 364 * %g7 address arg 2 (tlb range flush only) 365 * 366 * %g6 scratch 1 367 * %g2 scratch 2 368 * %g3 scratch 3 369 * %g4 scratch 4 370 */ 371 .align 32 372 .globl xcall_flush_tlb_mm 373xcall_flush_tlb_mm: /* 21 insns */ 374 mov PRIMARY_CONTEXT, %g2 375 ldxa [%g2] ASI_DMMU, %g3 376 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 377 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 378 or %g5, %g4, %g5 /* Preserve nucleus page size fields */ 379 stxa %g5, [%g2] ASI_DMMU 380 mov 0x40, %g4 381 stxa %g0, [%g4] ASI_DMMU_DEMAP 382 stxa %g0, [%g4] ASI_IMMU_DEMAP 383 stxa %g3, [%g2] ASI_DMMU 384 retry 385 nop 386 nop 387 nop 388 nop 389 nop 390 nop 391 nop 392 nop 393 nop 394 nop 395 396 .globl xcall_flush_tlb_pending 397xcall_flush_tlb_pending: /* 21 insns */ 398 /* %g5=context, %g1=nr, %g7=vaddrs[] */ 399 sllx %g1, 3, %g1 400 mov PRIMARY_CONTEXT, %g4 401 ldxa [%g4] ASI_DMMU, %g2 402 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 403 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 404 or %g5, %g4, %g5 405 mov PRIMARY_CONTEXT, %g4 406 stxa %g5, [%g4] ASI_DMMU 4071: sub %g1, (1 << 3), %g1 408 ldx [%g7 + %g1], %g5 409 andcc %g5, 0x1, %g0 410 be,pn %icc, 2f 411 412 andn %g5, 0x1, %g5 413 stxa %g0, [%g5] ASI_IMMU_DEMAP 4142: stxa %g0, [%g5] ASI_DMMU_DEMAP 415 membar #Sync 416 brnz,pt %g1, 1b 417 nop 418 stxa %g2, [%g4] ASI_DMMU 419 retry 420 nop 421 422 .globl xcall_flush_tlb_kernel_range 423xcall_flush_tlb_kernel_range: /* 25 insns */ 424 sethi %hi(PAGE_SIZE - 1), %g2 425 or %g2, %lo(PAGE_SIZE - 1), %g2 426 andn %g1, %g2, %g1 427 andn %g7, %g2, %g7 428 sub %g7, %g1, %g3 429 add %g2, 1, %g2 430 sub %g3, %g2, %g3 431 or %g1, 0x20, %g1 ! Nucleus 4321: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP 433 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP 434 membar #Sync 435 brnz,pt %g3, 1b 436 sub %g3, %g2, %g3 437 retry 438 nop 439 nop 440 nop 441 nop 442 nop 443 nop 444 nop 445 nop 446 nop 447 nop 448 nop 449 450 /* This runs in a very controlled environment, so we do 451 * not need to worry about BH races etc. 452 */ 453 .globl xcall_sync_tick 454xcall_sync_tick: 455 456661: rdpr %pstate, %g2 457 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate 458 .section .sun4v_2insn_patch, "ax" 459 .word 661b 460 nop 461 nop 462 .previous 463 464 rdpr %pil, %g2 465 wrpr %g0, PIL_NORMAL_MAX, %pil 466 sethi %hi(109f), %g7 467 b,pt %xcc, etrap_irq 468109: or %g7, %lo(109b), %g7 469#ifdef CONFIG_TRACE_IRQFLAGS 470 call trace_hardirqs_off 471 nop 472#endif 473 call smp_synchronize_tick_client 474 nop 475 b rtrap_xcall 476 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 477 478 .globl xcall_fetch_glob_regs 479xcall_fetch_glob_regs: 480 sethi %hi(global_reg_snapshot), %g1 481 or %g1, %lo(global_reg_snapshot), %g1 482 __GET_CPUID(%g2) 483 sllx %g2, 6, %g3 484 add %g1, %g3, %g1 485 rdpr %tstate, %g7 486 stx %g7, [%g1 + GR_SNAP_TSTATE] 487 rdpr %tpc, %g7 488 stx %g7, [%g1 + GR_SNAP_TPC] 489 rdpr %tnpc, %g7 490 stx %g7, [%g1 + GR_SNAP_TNPC] 491 stx %o7, [%g1 + GR_SNAP_O7] 492 stx %i7, [%g1 + GR_SNAP_I7] 493 /* Don't try this at home kids... */ 494 rdpr %cwp, %g2 495 sub %g2, 1, %g7 496 wrpr %g7, %cwp 497 mov %i7, %g7 498 wrpr %g2, %cwp 499 stx %g7, [%g1 + GR_SNAP_RPC] 500 sethi %hi(trap_block), %g7 501 or %g7, %lo(trap_block), %g7 502 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 503 add %g7, %g2, %g7 504 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 505 stx %g3, [%g1 + GR_SNAP_THREAD] 506 retry 507 508#ifdef DCACHE_ALIASING_POSSIBLE 509 .align 32 510 .globl xcall_flush_dcache_page_cheetah 511xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ 512 sethi %hi(PAGE_SIZE), %g3 5131: subcc %g3, (1 << 5), %g3 514 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE 515 membar #Sync 516 bne,pt %icc, 1b 517 nop 518 retry 519 nop 520#endif /* DCACHE_ALIASING_POSSIBLE */ 521 522 .globl xcall_flush_dcache_page_spitfire 523xcall_flush_dcache_page_spitfire: /* %g1 == physical page address 524 %g7 == kernel page virtual address 525 %g5 == (page->mapping != NULL) */ 526#ifdef DCACHE_ALIASING_POSSIBLE 527 srlx %g1, (13 - 2), %g1 ! Form tag comparitor 528 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K 529 sub %g3, (1 << 5), %g3 ! D$ linesize == 32 5301: ldxa [%g3] ASI_DCACHE_TAG, %g2 531 andcc %g2, 0x3, %g0 532 be,pn %xcc, 2f 533 andn %g2, 0x3, %g2 534 cmp %g2, %g1 535 536 bne,pt %xcc, 2f 537 nop 538 stxa %g0, [%g3] ASI_DCACHE_TAG 539 membar #Sync 5402: cmp %g3, 0 541 bne,pt %xcc, 1b 542 sub %g3, (1 << 5), %g3 543 544 brz,pn %g5, 2f 545#endif /* DCACHE_ALIASING_POSSIBLE */ 546 sethi %hi(PAGE_SIZE), %g3 547 5481: flush %g7 549 subcc %g3, (1 << 5), %g3 550 bne,pt %icc, 1b 551 add %g7, (1 << 5), %g7 552 5532: retry 554 nop 555 nop 556 557 /* %g5: error 558 * %g6: tlb op 559 */ 560__hypervisor_tlb_xcall_error: 561 mov %g5, %g4 562 mov %g6, %g5 563 ba,pt %xcc, etrap 564 rd %pc, %g7 565 mov %l4, %o0 566 call hypervisor_tlbop_error_xcall 567 mov %l5, %o1 568 ba,a,pt %xcc, rtrap 569 570 .globl __hypervisor_xcall_flush_tlb_mm 571__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ 572 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ 573 mov %o0, %g2 574 mov %o1, %g3 575 mov %o2, %g4 576 mov %o3, %g1 577 mov %o5, %g7 578 clr %o0 /* ARG0: CPU lists unimplemented */ 579 clr %o1 /* ARG1: CPU lists unimplemented */ 580 mov %g5, %o2 /* ARG2: mmu context */ 581 mov HV_MMU_ALL, %o3 /* ARG3: flags */ 582 mov HV_FAST_MMU_DEMAP_CTX, %o5 583 ta HV_FAST_TRAP 584 mov HV_FAST_MMU_DEMAP_CTX, %g6 585 brnz,pn %o0, __hypervisor_tlb_xcall_error 586 mov %o0, %g5 587 mov %g2, %o0 588 mov %g3, %o1 589 mov %g4, %o2 590 mov %g1, %o3 591 mov %g7, %o5 592 membar #Sync 593 retry 594 595 .globl __hypervisor_xcall_flush_tlb_pending 596__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ 597 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ 598 sllx %g1, 3, %g1 599 mov %o0, %g2 600 mov %o1, %g3 601 mov %o2, %g4 6021: sub %g1, (1 << 3), %g1 603 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ 604 mov %g5, %o1 /* ARG1: mmu context */ 605 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 606 srlx %o0, PAGE_SHIFT, %o0 607 sllx %o0, PAGE_SHIFT, %o0 608 ta HV_MMU_UNMAP_ADDR_TRAP 609 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 610 brnz,a,pn %o0, __hypervisor_tlb_xcall_error 611 mov %o0, %g5 612 brnz,pt %g1, 1b 613 nop 614 mov %g2, %o0 615 mov %g3, %o1 616 mov %g4, %o2 617 membar #Sync 618 retry 619 620 .globl __hypervisor_xcall_flush_tlb_kernel_range 621__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ 622 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ 623 sethi %hi(PAGE_SIZE - 1), %g2 624 or %g2, %lo(PAGE_SIZE - 1), %g2 625 andn %g1, %g2, %g1 626 andn %g7, %g2, %g7 627 sub %g7, %g1, %g3 628 add %g2, 1, %g2 629 sub %g3, %g2, %g3 630 mov %o0, %g2 631 mov %o1, %g4 632 mov %o2, %g7 6331: add %g1, %g3, %o0 /* ARG0: virtual address */ 634 mov 0, %o1 /* ARG1: mmu context */ 635 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 636 ta HV_MMU_UNMAP_ADDR_TRAP 637 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 638 brnz,pn %o0, __hypervisor_tlb_xcall_error 639 mov %o0, %g5 640 sethi %hi(PAGE_SIZE), %o2 641 brnz,pt %g3, 1b 642 sub %g3, %o2, %g3 643 mov %g2, %o0 644 mov %g4, %o1 645 mov %g7, %o2 646 membar #Sync 647 retry 648 649 /* These just get rescheduled to PIL vectors. */ 650 .globl xcall_call_function 651xcall_call_function: 652 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint 653 retry 654 655 .globl xcall_call_function_single 656xcall_call_function_single: 657 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint 658 retry 659 660 .globl xcall_receive_signal 661xcall_receive_signal: 662 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint 663 retry 664 665 .globl xcall_capture 666xcall_capture: 667 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 668 retry 669 670 .globl xcall_new_mmu_context_version 671xcall_new_mmu_context_version: 672 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint 673 retry 674 675#ifdef CONFIG_KGDB 676 .globl xcall_kgdb_capture 677xcall_kgdb_capture: 678 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint 679 retry 680#endif 681 682#endif /* CONFIG_SMP */ 683 684 685 .globl hypervisor_patch_cachetlbops 686hypervisor_patch_cachetlbops: 687 save %sp, -128, %sp 688 689 sethi %hi(__flush_tlb_mm), %o0 690 or %o0, %lo(__flush_tlb_mm), %o0 691 sethi %hi(__hypervisor_flush_tlb_mm), %o1 692 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 693 call tlb_patch_one 694 mov 10, %o2 695 696 sethi %hi(__flush_tlb_pending), %o0 697 or %o0, %lo(__flush_tlb_pending), %o0 698 sethi %hi(__hypervisor_flush_tlb_pending), %o1 699 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 700 call tlb_patch_one 701 mov 16, %o2 702 703 sethi %hi(__flush_tlb_kernel_range), %o0 704 or %o0, %lo(__flush_tlb_kernel_range), %o0 705 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 706 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 707 call tlb_patch_one 708 mov 16, %o2 709 710#ifdef DCACHE_ALIASING_POSSIBLE 711 sethi %hi(__flush_dcache_page), %o0 712 or %o0, %lo(__flush_dcache_page), %o0 713 sethi %hi(__hypervisor_flush_dcache_page), %o1 714 or %o1, %lo(__hypervisor_flush_dcache_page), %o1 715 call tlb_patch_one 716 mov 2, %o2 717#endif /* DCACHE_ALIASING_POSSIBLE */ 718 719#ifdef CONFIG_SMP 720 sethi %hi(xcall_flush_tlb_mm), %o0 721 or %o0, %lo(xcall_flush_tlb_mm), %o0 722 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 723 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 724 call tlb_patch_one 725 mov 21, %o2 726 727 sethi %hi(xcall_flush_tlb_pending), %o0 728 or %o0, %lo(xcall_flush_tlb_pending), %o0 729 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 730 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 731 call tlb_patch_one 732 mov 21, %o2 733 734 sethi %hi(xcall_flush_tlb_kernel_range), %o0 735 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 736 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 737 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 738 call tlb_patch_one 739 mov 25, %o2 740#endif /* CONFIG_SMP */ 741 742 ret 743 restore 744