1/* $Id: trampoline.S,v 1.1.1.1 2008/10/15 03:26:19 james26_jang Exp $ 2 * trampoline.S: Jump start slave processors on sparc64. 3 * 4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 5 */ 6 7#include <asm/head.h> 8#include <asm/asi.h> 9#include <asm/lsu.h> 10#include <asm/dcr.h> 11#include <asm/dcu.h> 12#include <asm/pstate.h> 13#include <asm/page.h> 14#include <asm/pgtable.h> 15#include <asm/spitfire.h> 16#include <asm/processor.h> 17#include <asm/asm_offsets.h> 18 19 .data 20 .align 8 21call_method: 22 .asciz "call-method" 23 .align 8 24itlb_load: 25 .asciz "SUNW,itlb-load" 26 .align 8 27dtlb_load: 28 .asciz "SUNW,dtlb-load" 29 30 .text 31 .align 8 32 .globl sparc64_cpu_startup, sparc64_cpu_startup_end 33sparc64_cpu_startup: 34 flushw 35 36 BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup) 37 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup) 38 39 ba,pt %xcc, spitfire_startup 40 nop 41 42cheetah_plus_startup: 43 /* Preserve OBP choosen DCU and DCR register settings. */ 44 ba,pt %xcc, cheetah_generic_startup 45 nop 46 47cheetah_startup: 48 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 49 wr %g1, %asr18 50 51 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 52 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 53 sllx %g5, 32, %g5 54 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 55 stxa %g5, [%g0] ASI_DCU_CONTROL_REG 56 membar #Sync 57 58cheetah_generic_startup: 59 mov TSB_EXTENSION_P, %g3 60 stxa %g0, [%g3] ASI_DMMU 61 stxa %g0, [%g3] ASI_IMMU 62 membar #Sync 63 64 mov TSB_EXTENSION_S, %g3 65 stxa %g0, [%g3] ASI_DMMU 66 membar #Sync 67 68 mov TSB_EXTENSION_N, %g3 69 stxa %g0, [%g3] ASI_DMMU 70 stxa %g0, [%g3] ASI_IMMU 71 membar #Sync 72 73 /* Disable STICK_INT interrupts. */ 74 sethi %hi(0x80000000), %g5 75 sllx %g5, 32, %g5 76 wr %g5, %asr25 77 78 ba,pt %xcc, startup_continue 79 nop 80 81spitfire_startup: 82 mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1 83 stxa %g1, [%g0] ASI_LSU_CONTROL 84 membar #Sync 85 86startup_continue: 87 wrpr %g0, 15, %pil 88 wr %g0, 0, %tick_cmpr 89 90 /* Call OBP by hand to lock KERNBASE into i/d tlbs. */ 91 mov %o0, %l0 92 93 sethi %hi(prom_entry_lock), %g2 941: ldstub [%g2 + %lo(prom_entry_lock)], %g1 95 brnz,pn %g1, 1b 96 membar #StoreLoad | #StoreStore 97 98 sethi %hi(p1275buf), %g2 99 or %g2, %lo(p1275buf), %g2 100 ldx [%g2 + 0x10], %l2 101 mov %sp, %l1 102 add %l2, -(192 + 128), %sp 103 flushw 104 105 sethi %hi(call_method), %g2 106 or %g2, %lo(call_method), %g2 107 stx %g2, [%sp + 2047 + 128 + 0x00] 108 mov 5, %g2 109 stx %g2, [%sp + 2047 + 128 + 0x08] 110 mov 1, %g2 111 stx %g2, [%sp + 2047 + 128 + 0x10] 112 sethi %hi(itlb_load), %g2 113 or %g2, %lo(itlb_load), %g2 114 stx %g2, [%sp + 2047 + 128 + 0x18] 115 sethi %hi(mmu_ihandle_cache), %g2 116 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 117 stx %g2, [%sp + 2047 + 128 + 0x20] 118 sethi %hi(KERNBASE), %g2 119 stx %g2, [%sp + 2047 + 128 + 0x28] 120 sethi %hi(kern_locked_tte_data), %g2 121 ldx [%g2 + %lo(kern_locked_tte_data)], %g2 122 stx %g2, [%sp + 2047 + 128 + 0x30] 123 124 mov 15, %g2 125 BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 126 127 mov 63, %g2 1281: 129 stx %g2, [%sp + 2047 + 128 + 0x38] 130 sethi %hi(p1275buf), %g2 131 or %g2, %lo(p1275buf), %g2 132 ldx [%g2 + 0x08], %o1 133 call %o1 134 add %sp, (2047 + 128), %o0 135 136 sethi %hi(call_method), %g2 137 or %g2, %lo(call_method), %g2 138 stx %g2, [%sp + 2047 + 128 + 0x00] 139 mov 5, %g2 140 stx %g2, [%sp + 2047 + 128 + 0x08] 141 mov 1, %g2 142 stx %g2, [%sp + 2047 + 128 + 0x10] 143 sethi %hi(dtlb_load), %g2 144 or %g2, %lo(dtlb_load), %g2 145 stx %g2, [%sp + 2047 + 128 + 0x18] 146 sethi %hi(mmu_ihandle_cache), %g2 147 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 148 stx %g2, [%sp + 2047 + 128 + 0x20] 149 sethi %hi(KERNBASE), %g2 150 stx %g2, [%sp + 2047 + 128 + 0x28] 151 sethi %hi(kern_locked_tte_data), %g2 152 ldx [%g2 + %lo(kern_locked_tte_data)], %g2 153 stx %g2, [%sp + 2047 + 128 + 0x30] 154 155 mov 15, %g2 156 BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 157 158 mov 63, %g2 1591: 160 161 stx %g2, [%sp + 2047 + 128 + 0x38] 162 sethi %hi(p1275buf), %g2 163 or %g2, %lo(p1275buf), %g2 164 ldx [%g2 + 0x08], %o1 165 call %o1 166 add %sp, (2047 + 128), %o0 167 168 sethi %hi(prom_entry_lock), %g2 169 stb %g0, [%g2 + %lo(prom_entry_lock)] 170 membar #StoreStore | #StoreLoad 171 172 mov %l1, %sp 173 flushw 174 175 mov %l0, %o0 176 177 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate 178 wr %g0, 0, %fprs 179 180 sethi %uhi(PAGE_OFFSET), %g4 181 sllx %g4, 32, %g4 182 183 srl %o0, 0, %o0 184 ldx [%o0], %g6 185 186 wr %g0, ASI_P, %asi 187 188 mov PRIMARY_CONTEXT, %g7 189 stxa %g0, [%g7] ASI_DMMU 190 membar #Sync 191 mov SECONDARY_CONTEXT, %g7 192 stxa %g0, [%g7] ASI_DMMU 193 membar #Sync 194 195 mov 1, %g5 196 sllx %g5, THREAD_SHIFT, %g5 197 sub %g5, (REGWIN_SZ + STACK_BIAS), %g5 198 add %g6, %g5, %sp 199 mov 0, %fp 200 201 wrpr %g0, 0, %wstate 202 wrpr %g0, 0, %tl 203 204 /* Setup the trap globals, then we can resurface. */ 205 rdpr %pstate, %o1 206 mov %g6, %o2 207 wrpr %o1, PSTATE_AG, %pstate 208 sethi %hi(sparc64_ttable_tl0), %g5 209 wrpr %g5, %tba 210 mov %o2, %g6 211 212 wrpr %o1, PSTATE_MG, %pstate 213#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) 214#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) 215 216 mov TSB_REG, %g1 217 stxa %g0, [%g1] ASI_DMMU 218 membar #Sync 219 mov TLB_SFSR, %g1 220 sethi %uhi(KERN_HIGHBITS), %g2 221 or %g2, %ulo(KERN_HIGHBITS), %g2 222 sllx %g2, 32, %g2 223 or %g2, KERN_LOWBITS, %g2 224 225 BRANCH_IF_ANY_CHEETAH(g3,g7,9f) 226 227 ba,pt %xcc, 1f 228 nop 229 2309: 231 sethi %uhi(VPTE_BASE_CHEETAH), %g3 232 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 233 ba,pt %xcc, 2f 234 sllx %g3, 32, %g3 2351: 236 sethi %uhi(VPTE_BASE_SPITFIRE), %g3 237 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 238 sllx %g3, 32, %g3 239 2402: 241 clr %g7 242#undef KERN_HIGHBITS 243#undef KERN_LOWBITS 244 245 /* Setup interrupt globals, we are always SMP. */ 246 wrpr %o1, PSTATE_IG, %pstate 247 248 /* Get our UPA MID. */ 249 lduw [%o2 + AOFF_task_processor], %g1 250 sethi %hi(cpu_data), %g5 251 or %g5, %lo(cpu_data), %g5 252 253 /* In theory this is: &(cpu_data[this_upamid].irq_worklists[0]) */ 254 sllx %g1, 7, %g1 255 add %g5, %g1, %g1 256 add %g1, 64, %g6 257 258 wrpr %g0, 0, %wstate 259 or %o1, PSTATE_IE, %o1 260 wrpr %o1, 0, %pstate 261 262 call prom_set_trap_table 263 sethi %hi(sparc64_ttable_tl0), %o0 264 265 call smp_callin 266 nop 267 call cpu_idle 268 mov 0, %o0 269 call cpu_panic 270 nop 2711: b,a,pt %xcc, 1b 272 273 .align 8 274sparc64_cpu_startup_end: 275