1/* 2 * rtrap.S: Preparing for return from trap on Sparc V9. 3 * 4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 9#include <asm/asi.h> 10#include <asm/pstate.h> 11#include <asm/ptrace.h> 12#include <asm/spitfire.h> 13#include <asm/head.h> 14#include <asm/visasm.h> 15#include <asm/processor.h> 16 17#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) 18#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) 19#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) 20 21 .text 22 .align 32 23__handle_softirq: 24 call do_softirq 25 nop 26 ba,a,pt %xcc, __handle_softirq_continue 27 nop 28__handle_preemption: 29 call schedule 30 wrpr %g0, RTRAP_PSTATE, %pstate 31 ba,pt %xcc, __handle_preemption_continue 32 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 33 34__handle_user_windows: 35 call fault_in_user_windows 36 wrpr %g0, RTRAP_PSTATE, %pstate 37 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 38 /* Redo sched+sig checks */ 39 ldx [%g6 + TI_FLAGS], %l0 40 andcc %l0, _TIF_NEED_RESCHED, %g0 41 42 be,pt %xcc, 1f 43 nop 44 call schedule 45 wrpr %g0, RTRAP_PSTATE, %pstate 46 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 47 ldx [%g6 + TI_FLAGS], %l0 48 491: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 50 be,pt %xcc, __handle_user_windows_continue 51 nop 52 mov %l5, %o1 53 add %sp, PTREGS_OFF, %o0 54 mov %l0, %o2 55 56 call do_notify_resume 57 wrpr %g0, RTRAP_PSTATE, %pstate 58 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 59 /* Signal delivery can modify pt_regs tstate, so we must 60 * reload it. 61 */ 62 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 63 sethi %hi(0xf << 20), %l4 64 and %l1, %l4, %l4 65 ba,pt %xcc, __handle_user_windows_continue 66 67 andn %l1, %l4, %l1 68__handle_userfpu: 69 rd %fprs, %l5 70 andcc %l5, FPRS_FEF, %g0 71 sethi %hi(TSTATE_PEF), %o0 72 be,a,pn %icc, __handle_userfpu_continue 73 andn %l1, %o0, %l1 74 ba,a,pt %xcc, __handle_userfpu_continue 75 76__handle_signal: 77 mov %l5, %o1 78 add %sp, PTREGS_OFF, %o0 79 mov %l0, %o2 80 call do_notify_resume 81 wrpr %g0, RTRAP_PSTATE, %pstate 82 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 83 84 /* Signal delivery can modify pt_regs tstate, so we must 85 * reload it. 86 */ 87 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 88 sethi %hi(0xf << 20), %l4 89 and %l1, %l4, %l4 90 ba,pt %xcc, __handle_signal_continue 91 andn %l1, %l4, %l1 92 93 /* When returning from a NMI (%pil==15) interrupt we want to 94 * avoid running softirqs, doing IRQ tracing, preempting, etc. 95 */ 96 .globl rtrap_nmi 97rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 98 sethi %hi(0xf << 20), %l4 99 and %l1, %l4, %l4 100 andn %l1, %l4, %l1 101 srl %l4, 20, %l4 102 ba,pt %xcc, rtrap_no_irq_enable 103 wrpr %l4, %pil 104 105 .align 64 106 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall 107rtrap_irq: 108rtrap: 109#ifndef CONFIG_SMP 110 sethi %hi(__cpu_data), %l0 111 lduw [%l0 + %lo(__cpu_data)], %l1 112#else 113 sethi %hi(__cpu_data), %l0 114 or %l0, %lo(__cpu_data), %l0 115 lduw [%l0 + %g5], %l1 116#endif 117 cmp %l1, 0 118 119 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ 120 bne,pn %icc, __handle_softirq 121 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 122__handle_softirq_continue: 123rtrap_xcall: 124 sethi %hi(0xf << 20), %l4 125 and %l1, %l4, %l4 126 andn %l1, %l4, %l1 127 srl %l4, 20, %l4 128#ifdef CONFIG_TRACE_IRQFLAGS 129 brnz,pn %l4, rtrap_no_irq_enable 130 nop 131 call trace_hardirqs_on 132 nop 133 /* Do not actually set the %pil here. We will do that 134 * below after we clear PSTATE_IE in the %pstate register. 135 * If we re-enable interrupts here, we can recurse down 136 * the hardirq stack potentially endlessly, causing a 137 * stack overflow. 138 * 139 * It is tempting to put this test and trace_hardirqs_on 140 * call at the 'rt_continue' label, but that will not work 141 * as that path hits unconditionally and we do not want to 142 * execute this in NMI return paths, for example. 143 */ 144#endif 145rtrap_no_irq_enable: 146 andcc %l1, TSTATE_PRIV, %l3 147 bne,pn %icc, to_kernel 148 nop 149 150 /* We must hold IRQs off and atomically test schedule+signal 151 * state, then hold them off all the way back to userspace. 152 * If we are returning to kernel, none of this matters. Note 153 * that we are disabling interrupts via PSTATE_IE, not using 154 * %pil. 155 * 156 * If we do not do this, there is a window where we would do 157 * the tests, later the signal/resched event arrives but we do 158 * not process it since we are still in kernel mode. It would 159 * take until the next local IRQ before the signal/resched 160 * event would be handled. 161 * 162 * This also means that if we have to deal with user 163 * windows, we have to redo all of these sched+signal checks 164 * with IRQs disabled. 165 */ 166to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 167 wrpr 0, %pil 168__handle_preemption_continue: 169 ldx [%g6 + TI_FLAGS], %l0 170 sethi %hi(_TIF_USER_WORK_MASK), %o0 171 or %o0, %lo(_TIF_USER_WORK_MASK), %o0 172 andcc %l0, %o0, %g0 173 sethi %hi(TSTATE_PEF), %o0 174 be,pt %xcc, user_nowork 175 andcc %l1, %o0, %g0 176 andcc %l0, _TIF_NEED_RESCHED, %g0 177 bne,pn %xcc, __handle_preemption 178 andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 179 bne,pn %xcc, __handle_signal 180__handle_signal_continue: 181 ldub [%g6 + TI_WSAVED], %o2 182 brnz,pn %o2, __handle_user_windows 183 nop 184__handle_user_windows_continue: 185 sethi %hi(TSTATE_PEF), %o0 186 andcc %l1, %o0, %g0 187 188 /* This fpdepth clear is necessary for non-syscall rtraps only */ 189user_nowork: 190 bne,pn %xcc, __handle_userfpu 191 stb %g0, [%g6 + TI_FPDEPTH] 192__handle_userfpu_continue: 193 194rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 195 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2 196 197 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 198 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 199 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 200 brz,pt %l3, 1f 201 mov %g6, %l2 202 203 /* Must do this before thread reg is clobbered below. */ 204 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) 2051: 206 ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 207 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 208 209 /* Normal globals are restored, go to trap globals. */ 210661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate 211 nop 212 .section .sun4v_2insn_patch, "ax" 213 .word 661b 214 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 215 SET_GL(1) 216 .previous 217 218 mov %l2, %g6 219 220 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 221 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 222 223 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 224 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 225 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 226 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 227 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6 228 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7 229 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2 230 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2 231 232 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3 233 wr %o3, %g0, %y 234 wrpr %l4, 0x0, %pil 235 wrpr %g0, 0x1, %tl 236 andn %l1, TSTATE_SYSCALL, %l1 237 wrpr %l1, %g0, %tstate 238 wrpr %l2, %g0, %tpc 239 wrpr %o2, %g0, %tnpc 240 241 brnz,pn %l3, kern_rtt 242 mov PRIMARY_CONTEXT, %l7 243 244661: ldxa [%l7 + %l7] ASI_DMMU, %l0 245 .section .sun4v_1insn_patch, "ax" 246 .word 661b 247 ldxa [%l7 + %l7] ASI_MMU, %l0 248 .previous 249 250 sethi %hi(sparc64_kern_pri_nuc_bits), %l1 251 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 252 or %l0, %l1, %l0 253 254661: stxa %l0, [%l7] ASI_DMMU 255 .section .sun4v_1insn_patch, "ax" 256 .word 661b 257 stxa %l0, [%l7] ASI_MMU 258 .previous 259 260 sethi %hi(KERNBASE), %l7 261 flush %l7 262 rdpr %wstate, %l1 263 rdpr %otherwin, %l2 264 srl %l1, 3, %l1 265 266 wrpr %l2, %g0, %canrestore 267 wrpr %l1, %g0, %wstate 268 brnz,pt %l2, user_rtt_restore 269 wrpr %g0, %g0, %otherwin 270 271 ldx [%g6 + TI_FLAGS], %g3 272 wr %g0, ASI_AIUP, %asi 273 rdpr %cwp, %g1 274 andcc %g3, _TIF_32BIT, %g0 275 sub %g1, 1, %g1 276 bne,pt %xcc, user_rtt_fill_32bit 277 wrpr %g1, %cwp 278 ba,a,pt %xcc, user_rtt_fill_64bit 279 280user_rtt_fill_fixup: 281 rdpr %cwp, %g1 282 add %g1, 1, %g1 283 wrpr %g1, 0x0, %cwp 284 285 rdpr %wstate, %g2 286 sll %g2, 3, %g2 287 wrpr %g2, 0x0, %wstate 288 289 /* We know %canrestore and %otherwin are both zero. */ 290 291 sethi %hi(sparc64_kern_pri_context), %g2 292 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 293 mov PRIMARY_CONTEXT, %g1 294 295661: stxa %g2, [%g1] ASI_DMMU 296 .section .sun4v_1insn_patch, "ax" 297 .word 661b 298 stxa %g2, [%g1] ASI_MMU 299 .previous 300 301 sethi %hi(KERNBASE), %g1 302 flush %g1 303 304 or %g4, FAULT_CODE_WINFIXUP, %g4 305 stb %g4, [%g6 + TI_FAULT_CODE] 306 stx %g5, [%g6 + TI_FAULT_ADDR] 307 308 mov %g6, %l1 309 wrpr %g0, 0x0, %tl 310 311661: nop 312 .section .sun4v_1insn_patch, "ax" 313 .word 661b 314 SET_GL(0) 315 .previous 316 317 wrpr %g0, RTRAP_PSTATE, %pstate 318 319 mov %l1, %g6 320 ldx [%g6 + TI_TASK], %g4 321 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) 322 call do_sparc64_fault 323 add %sp, PTREGS_OFF, %o0 324 ba,pt %xcc, rtrap 325 nop 326 327user_rtt_pre_restore: 328 add %g1, 1, %g1 329 wrpr %g1, 0x0, %cwp 330 331user_rtt_restore: 332 restore 333 rdpr %canrestore, %g1 334 wrpr %g1, 0x0, %cleanwin 335 retry 336 nop 337 338kern_rtt: rdpr %canrestore, %g1 339 brz,pn %g1, kern_rtt_fill 340 nop 341kern_rtt_restore: 342 stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC] 343 restore 344 retry 345 346to_kernel: 347#ifdef CONFIG_PREEMPT 348 ldsw [%g6 + TI_PRE_COUNT], %l5 349 brnz %l5, kern_fpucheck 350 ldx [%g6 + TI_FLAGS], %l5 351 andcc %l5, _TIF_NEED_RESCHED, %g0 352 be,pt %xcc, kern_fpucheck 353 nop 354 cmp %l4, 0 355 bne,pn %xcc, kern_fpucheck 356 sethi %hi(PREEMPT_ACTIVE), %l6 357 stw %l6, [%g6 + TI_PRE_COUNT] 358 call schedule 359 nop 360 ba,pt %xcc, rtrap 361 stw %g0, [%g6 + TI_PRE_COUNT] 362#endif 363kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 364 brz,pt %l5, rt_continue 365 srl %l5, 1, %o0 366 add %g6, TI_FPSAVED, %l6 367 ldub [%l6 + %o0], %l2 368 sub %l5, 2, %l5 369 370 add %g6, TI_GSR, %o1 371 andcc %l2, (FPRS_FEF|FPRS_DU), %g0 372 be,pt %icc, 2f 373 and %l2, FPRS_DL, %l6 374 andcc %l2, FPRS_FEF, %g0 375 be,pn %icc, 5f 376 sll %o0, 3, %o5 377 rd %fprs, %g1 378 379 wr %g1, FPRS_FEF, %fprs 380 ldx [%o1 + %o5], %g1 381 add %g6, TI_XFSR, %o1 382 sll %o0, 8, %o2 383 add %g6, TI_FPREGS, %o3 384 brz,pn %l6, 1f 385 add %g6, TI_FPREGS+0x40, %o4 386 387 membar #Sync 388 ldda [%o3 + %o2] ASI_BLK_P, %f0 389 ldda [%o4 + %o2] ASI_BLK_P, %f16 390 membar #Sync 3911: andcc %l2, FPRS_DU, %g0 392 be,pn %icc, 1f 393 wr %g1, 0, %gsr 394 add %o2, 0x80, %o2 395 membar #Sync 396 ldda [%o3 + %o2] ASI_BLK_P, %f32 397 ldda [%o4 + %o2] ASI_BLK_P, %f48 3981: membar #Sync 399 ldx [%o1 + %o5], %fsr 4002: stb %l5, [%g6 + TI_FPDEPTH] 401 ba,pt %xcc, rt_continue 402 nop 4035: wr %g0, FPRS_FEF, %fprs 404 sll %o0, 8, %o2 405 406 add %g6, TI_FPREGS+0x80, %o3 407 add %g6, TI_FPREGS+0xc0, %o4 408 membar #Sync 409 ldda [%o3 + %o2] ASI_BLK_P, %f32 410 ldda [%o4 + %o2] ASI_BLK_P, %f48 411 membar #Sync 412 wr %g0, FPRS_DU, %fprs 413 ba,pt %xcc, rt_continue 414 stb %l5, [%g6 + TI_FPDEPTH] 415