mp_locore.S revision 182916
1/*- 2 * Copyright (c) 2002 Jake Burkholder. 3 * Copyright (c) 2008 Marius Strobl <marius@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <machine/asm.h> 29__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/mp_locore.S 182916 2008-09-10 20:07:08Z marius $"); 30 31#include <machine/asi.h> 32#include <machine/asmacros.h> 33#include <machine/ktr.h> 34#include <machine/pstate.h> 35#include <machine/smp.h> 36#include <machine/upa.h> 37#include <machine/ver.h> 38 39#include "assym.s" 40 41 .register %g2, #ignore 42 .register %g3, #ignore 43 44 .text 45 _ALIGN_TEXT 46 /* 47 * Initialize misc. state to known values: interrupts disabled, 48 * normal globals, no clean windows, PIL 0, and floating point 49 * disabled. 50 */ 511: wrpr %g0, PSTATE_NORMAL, %pstate 52 wrpr %g0, 0, %cleanwin 53 wrpr %g0, 0, %pil 54 wr %g0, 0, %fprs 55 56 rdpr %ver, %l7 57 srlx %l7, VER_IMPL_SHIFT, %l7 58 sll %l7, VER_IMPL_SIZE, %l7 59 srl %l7, VER_IMPL_SIZE, %l7 60 cmp %l7, CPU_IMPL_ULTRASPARCIIIp 61 bne %icc, 3f 62 nop 63 64 /* 65 * Relocate the locked entry in it16 slot 0 (if existent) 66 * as part of working around Cheetah+ erratum 34. 67 */ 68 69 setx TD_V | TD_L, %l1, %l0 70 /* 71 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work 72 * around errata of USIII and beyond. 73 */ 74 ldxa [%g0] ASI_ITLB_DATA_ACCESS_REG, %g0 75 ldxa [%g0] ASI_ITLB_DATA_ACCESS_REG, %l6 76 and %l6, %l0, %l1 77 cmp %l0, %l1 78 bne %xcc, 3f 79 nop 80 81 /* Flush the mapping of slot 0. */ 82 ldxa [%g0] ASI_ITLB_TAG_READ_REG, %l5 83 srlx %l5, TAR_VPN_SHIFT, %l0 84 sllx %l0, TAR_VPN_SHIFT, %l0 85 or %l0, TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, %l0 86 stxa %g0, [%l0] ASI_IMMU_DEMAP 87 /* The USIII-family ignores the address. */ 88 flush %g0 89 90 /* 91 * Search a replacement slot != 0 and enter the data and tag 92 * that formerly were in slot 0. 93 */ 94 mov (1 << TLB_DAR_SLOT_SHIFT), %l4 95 setx TD_V, %l1, %l0 96 /* 97 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work 98 * around errata of USIII and beyond. 99 */ 1002: ldxa [%l4] ASI_ITLB_DATA_ACCESS_REG, %g0 101 ldxa [%l4] ASI_ITLB_DATA_ACCESS_REG, %l1 102 and %l1, %l0, %l1 103 cmp %l0, %l1 104 be,a %xcc, 2b 105 add %l4, (1 << TLB_DAR_SLOT_SHIFT), %l4 106 wr %g0, ASI_IMMU, %asi 107 stxa %l5, [%g0 + AA_IMMU_TAR] %asi 108 stxa %l6, [%l4] ASI_ITLB_DATA_ACCESS_REG 109 /* The USIII-family ignores the address. */ 110 flush %g0 111 1123: rd %pc, %l6 113 ldx [%l6 + (9f-3b)], %l1 114 add %l6, (11f-3b), %l2 115 clr %l3 1164: cmp %l3, %l1 117 be %xcc, 8f 118 nop 119 ldx [%l2 + TTE_VPN], %l4 120 ldx [%l2 + TTE_DATA], %l5 121 srlx %l4, TV_SIZE_BITS, %l4 122 sllx %l4, PAGE_SHIFT_4M, %l4 123 wr %g0, ASI_DMMU, %asi 124 stxa %l4, [%g0 + AA_DMMU_TAR] %asi 125 stxa %l5, [%g0] ASI_DTLB_DATA_IN_REG 126 membar #Sync 127 128 cmp %l7, CPU_IMPL_ULTRASPARCIIIp 129 bne %icc, 6f 130 wr %g0, ASI_IMMU, %asi 131 132 /* 133 * Search an unused slot != 0 and explicitly enter the data 134 * and tag there in order to avoid Cheetah+ erratum 34. 135 */ 136 mov (1 << TLB_DAR_SLOT_SHIFT), %l0 137 setx TD_V, %o1, %o0 138 /* 139 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work 140 * around errata of USIII and beyond. 141 */ 1425: ldxa [%l0] ASI_ITLB_DATA_ACCESS_REG, %g0 143 ldxa [%l0] ASI_ITLB_DATA_ACCESS_REG, %o1 144 and %o1, %o0, %o1 145 cmp %o0, %o1 146 be,a %xcc, 5b 147 add %l0, (1 << TLB_DAR_SLOT_SHIFT), %l0 148 sethi %hi(KERNBASE), %o0 149 stxa %l4, [%g0 + AA_IMMU_TAR] %asi 150 stxa %l5, [%l0] ASI_ITLB_DATA_ACCESS_REG 151 flush %o0 152 ba %xcc, 7f 153 nop 154 1556: sethi %hi(KERNBASE), %l0 156 stxa %l4, [%g0 + AA_IMMU_TAR] %asi 157 stxa %l5, [%g0] ASI_ITLB_DATA_IN_REG 158 flush %l0 1597: add %l2, 1 << TTE_SHIFT, %l2 160 add %l3, 1, %l3 161 ba %xcc, 4b 162 nop 1638: ldx [%l6 + (10f-3b)], %l1 164 jmpl %l1, %g0 165 nop 166 _ALIGN_DATA 1679: .xword 0x0 16810: .xword 0x0 16911: 170 171DATA(mp_tramp_code) 172 .xword 1b 173DATA(mp_tramp_code_len) 174 .xword 11b-1b 175DATA(mp_tramp_tlb_slots) 176 .xword 9b-1b 177DATA(mp_tramp_func) 178 .xword 10b-1b 179 180/* 181 * void mp_startup(void) 182 */ 183ENTRY(mp_startup) 184 SET(cpu_start_args, %l1, %l0) 185 186 mov CPU_TICKSYNC, %l1 187 membar #StoreLoad 188 stw %l1, [%l0 + CSA_STATE] 189 1901: ldx [%l0 + CSA_TICK], %l1 191 brz %l1, 1b 192 nop 193 wrpr %l1, 0, %tick 194 195 rdpr %ver, %l1 196 stx %l1, [%l0 + CSA_VER] 197 198 srlx %l1, VER_IMPL_SHIFT, %l1 199 sll %l1, VER_IMPL_SIZE, %l1 200 srl %l1, VER_IMPL_SIZE, %l1 201 cmp %l1, CPU_IMPL_ULTRASPARCIII 202 bl %icc, 3f 203 nop 204 mov CPU_STICKSYNC, %l1 205 membar #StoreLoad 206 stw %l1, [%l0 + CSA_STATE] 207 2082: ldx [%l0 + CSA_STICK], %l1 209 brz %l1, 2b 210 nop 211 wr %l1, 0, %asr24 212 2133: UPA_GET_MID(%o0) 214 215#if KTR_COMPILE & KTR_SMP 216 CATR(KTR_SMP, "mp_start: CPU %d entered kernel" 217 , %g1, %g2, %g3, 7, 8, 9) 218 stx %o0, [%g1 + KTR_PARM1] 2199: 220#endif 221 222 /* 223 * Inform the boot processor we have inited. 224 */ 225 mov CPU_INIT, %l1 226 membar #LoadStore 227 stw %l1, [%l0 + CSA_STATE] 228 229 /* 230 * Wait till its our turn to bootstrap. 231 */ 2324: lduw [%l0 + CSA_MID], %l1 233 cmp %l1, %o0 234 bne %xcc, 4b 235 nop 236 237#if KTR_COMPILE & KTR_SMP 238 CATR(KTR_SMP, "_mp_start: CPU %d got start signal" 239 , %g1, %g2, %g3, 7, 8, 9) 240 stx %o0, [%g1 + KTR_PARM1] 2419: 242#endif 243 244 add %l0, CSA_TTES, %l1 245 clr %l2 246 247 /* 248 * Map the per-CPU pages. 249 */ 2505: sllx %l2, TTE_SHIFT, %l3 251 add %l1, %l3, %l3 252 253 ldx [%l3 + TTE_VPN], %l4 254 ldx [%l3 + TTE_DATA], %l5 255 256 wr %g0, ASI_DMMU, %asi 257 srlx %l4, TV_SIZE_BITS, %l4 258 sllx %l4, PAGE_SHIFT_8K, %l4 259 stxa %l4, [%g0 + AA_DMMU_TAR] %asi 260 stxa %l5, [%g0] ASI_DTLB_DATA_IN_REG 261 membar #Sync 262 263 add %l2, 1, %l2 264 cmp %l2, PCPU_PAGES 265 bne %xcc, 5b 266 nop 267 268 /* 269 * Get onto our per-CPU panic stack, which precedes the struct pcpu 270 * in the per-CPU page. 271 */ 272 ldx [%l0 + CSA_PCPU], %l1 273 set PCPU_PAGES * PAGE_SIZE - PC_SIZEOF, %l2 274 add %l1, %l2, %l1 275 sub %l1, SPOFF + CCFSZ, %sp 276 277 /* 278 * Enable interrupts. 279 */ 280 wrpr %g0, PSTATE_KERNEL, %pstate 281 282#if KTR_COMPILE & KTR_SMP 283 CATR(KTR_SMP, 284 "_mp_start: bootstrap cpuid=%d mid=%d pcpu=%#lx data=%#lx sp=%#lx" 285 , %g1, %g2, %g3, 7, 8, 9) 286 lduw [%l1 + PC_CPUID], %g2 287 stx %g2, [%g1 + KTR_PARM1] 288 lduw [%l1 + PC_MID], %g2 289 stx %g2, [%g1 + KTR_PARM2] 290 stx %l1, [%g1 + KTR_PARM3] 291 stx %sp, [%g1 + KTR_PARM5] 2929: 293#endif 294 295 /* 296 * And away we go. This doesn't return. 297 */ 298 call cpu_mp_bootstrap 299 mov %l1, %o0 300 sir 301 ! NOTREACHED 302END(mp_startup) 303