1217044Snwhitehorn/*- 2217044Snwhitehorn * Copyright (C) 2010 Nathan Whitehorn 3217044Snwhitehorn * All rights reserved. 4217044Snwhitehorn * 5217044Snwhitehorn * Redistribution and use in source and binary forms, with or without 6217044Snwhitehorn * modification, are permitted provided that the following conditions 7217044Snwhitehorn * are met: 8217044Snwhitehorn * 1. Redistributions of source code must retain the above copyright 9217044Snwhitehorn * notice, this list of conditions and the following disclaimer. 10217044Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 11217044Snwhitehorn * notice, this list of conditions and the following disclaimer in the 12217044Snwhitehorn * documentation and/or other materials provided with the distribution. 13217044Snwhitehorn * 14217044Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15217044Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16217044Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17217044Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 18217044Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19217044Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 20217044Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 21217044Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 22217044Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 23217044Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24217044Snwhitehorn * 25217044Snwhitehorn * $FreeBSD$ 26217044Snwhitehorn */ 27217044Snwhitehorn 28233666Snwhitehorn#define LOCORE 29233666Snwhitehorn 30265998Sian#include <machine/trap.h> 31217044Snwhitehorn 32217044Snwhitehorn/* 33217044Snwhitehorn * KBoot and simulators will start this program from the _start symbol, with 34217044Snwhitehorn * r3 pointing to a flattened device tree (kexec), r4 the physical address 35217044Snwhitehorn * at which we were loaded, and r5 0 (kexec) or a pointer to Open Firmware 36217044Snwhitehorn * (simulator). If r4 is non-zero, the first order of business is relocating 37217044Snwhitehorn * ourselves to 0. In the kboot case, the PPE secondary thread will enter 38217044Snwhitehorn * at 0x60. 39217044Snwhitehorn * 40217044Snwhitehorn * If started directly by the LV1 hypervisor, we are loaded to address 0 41217044Snwhitehorn * and execution on both threads begins at 0x100 (EXC_RST). 42217044Snwhitehorn */ 43217044Snwhitehorn 44217044Snwhitehorn#define CACHELINE_SIZE 128 45217044Snwhitehorn#define SPR_CTRL 136 46217044Snwhitehorn 47217044Snwhitehorn/* KBoot thread 0 entry -- do relocation, then jump to main */ 48217044Snwhitehorn.global _start 49217044Snwhitehorn_start: 50217044Snwhitehorn mfmsr %r31 51217044Snwhitehorn clrldi %r31,%r31,1 52217044Snwhitehorn mtmsrd %r31 53217044Snwhitehorn isync 54217044Snwhitehorn cmpwi %r4,0 55217044Snwhitehorn bne relocate_self 56217044Snwhitehornrelocated_start: 57217044Snwhitehorn lis %r1,0x100 58217044Snwhitehorn bl main 59217044Snwhitehorn 60217044Snwhitehorn. = 0x40 61217044Snwhitehorn.global secondary_spin_sem 62217044Snwhitehornsecondary_spin_sem: 63217044Snwhitehorn .long 0 64217044Snwhitehorn 65217044Snwhitehorn. = 0x60 66217044Snwhitehornthread1_start_kboot: 67217044Snwhitehorn mfmsr %r31 68217044Snwhitehorn clrldi %r31,%r31,1 69217044Snwhitehorn mtmsrd %r31 70217044Snwhitehorn isync 71217044Snwhitehorn 72217044Snwhitehorn ba thread1_start /* kboot copies the first 256 bytes to 73217044Snwhitehorn * address 0, so we are safe to jump 74217044Snwhitehorn * (and stay) there */ 75217044Snwhitehorn 76217044Snwhitehornthread1_start: 77217044Snwhitehorn li %r3,secondary_spin_sem@l 78217044Snwhitehorn1: lwz %r1,0(%r3) /* Spin on SECONDARY_SPIN_SEM_ADDRESS */ 79217044Snwhitehorn cmpwi %r1,0 80217044Snwhitehorn beq 1b /* If the semaphore is still zero, spin again */ 81217044Snwhitehorn 82217044Snwhitehorn /* We have been woken up by thread 0 */ 83217044Snwhitehorn li %r0,0x100 /* Invalidate reset vector cache line */ 84217044Snwhitehorn icbi 0,%r0 85217044Snwhitehorn isync 86217044Snwhitehorn sync 87217044Snwhitehorn ba 0x100 /* Jump to the reset vector */ 88217044Snwhitehorn 89217044Snwhitehorn. = EXC_RST 90217044Snwhitehornexc_rst: 91217044Snwhitehorn mfmsr %r31 92217044Snwhitehorn clrldi %r31,%r31,1 93217044Snwhitehorn mtmsrd %r31 94217044Snwhitehorn isync 95217044Snwhitehorn 96217044Snwhitehorn mfspr %r3,SPR_CTRL 97217044Snwhitehorn /* The first two bits of r0 are 01 (thread 1) or 10 (thread 0) */ 98217044Snwhitehorn cntlzw %r3,%r3 /* Now 0 for thread 0, 1 for thread 1 */ 99217044Snwhitehorn 100217044Snwhitehorn cmpwi %r3,0 101217044Snwhitehorn bne thread1_start /* Send thread 1 to wait */ 102217044Snwhitehorn 103217044Snwhitehorn b relocated_start /* Main entry point for thread 0 */ 104217044Snwhitehorn 105217044Snwhitehorn#define EXCEPTION_HANDLER(exc) \ 106217044Snwhitehorn. = exc; \ 107217044Snwhitehorn li %r3, exc; \ 108217044Snwhitehorn mfsrr0 %r4; \ 109217044Snwhitehorn mfmsr %r5; \ 110217044Snwhitehorn clrldi %r6,%r5,1; \ 111217044Snwhitehorn mtmsrd %r6; \ 112217044Snwhitehorn isync; \ 113217044Snwhitehorn lis %r1,0x100; \ 114217044Snwhitehorn bl ppc_exception 115217044Snwhitehorn 116217044SnwhitehornEXCEPTION_HANDLER(EXC_MCHK) 117217044SnwhitehornEXCEPTION_HANDLER(EXC_DSI) 118217044SnwhitehornEXCEPTION_HANDLER(EXC_DSE) 119217044SnwhitehornEXCEPTION_HANDLER(EXC_ISI) 120217044SnwhitehornEXCEPTION_HANDLER(EXC_ISE) 121217044SnwhitehornEXCEPTION_HANDLER(EXC_EXI) 122217044SnwhitehornEXCEPTION_HANDLER(EXC_ALI) 123217044SnwhitehornEXCEPTION_HANDLER(EXC_PGM) 124217044SnwhitehornEXCEPTION_HANDLER(EXC_FPU) 125217044SnwhitehornEXCEPTION_HANDLER(EXC_DECR) 126217044SnwhitehornEXCEPTION_HANDLER(EXC_SC) 127217044Snwhitehorn 128217044Snwhitehornrelocate_self: 129217044Snwhitehorn /* We enter this with r4 the physical offset for our relocation */ 130217044Snwhitehorn lis %r8,_end@ha /* r8: copy length */ 131217044Snwhitehorn addi %r8,%r8,_end@l 132217044Snwhitehorn li %r5,0x100 /* r5: dest address */ 133217044Snwhitehorn1: add %r6,%r4,%r5 /* r6: source address */ 134217044Snwhitehorn ld %r7,0(%r6) 135217044Snwhitehorn std %r7,0(%r5) 136217044Snwhitehorn addi %r5,%r5,8 137217044Snwhitehorn cmpw %r5,%r8 138217044Snwhitehorn blt 1b 139217044Snwhitehorn 140217044Snwhitehorn /* 141217044Snwhitehorn * Now invalidate the cacheline with the second half of relocate_self, 142217044Snwhitehorn * and do an absolute branch there in case we overwrote part of 143217044Snwhitehorn * ourselves. 144217044Snwhitehorn */ 145217044Snwhitehorn 146217044Snwhitehorn lis %r9,relocate_self_cache@ha 147217044Snwhitehorn addi %r9,%r9,relocate_self_cache@l 148217044Snwhitehorn dcbst 0,%r9 149217044Snwhitehorn sync 150217044Snwhitehorn icbi 0,%r9 151217044Snwhitehorn sync 152217044Snwhitehorn isync 153217044Snwhitehorn ba relocate_self_cache 154217044Snwhitehorn 155217044Snwhitehornrelocate_self_cache: 156217044Snwhitehorn /* Now invalidate the icache */ 157217044Snwhitehorn li %r5,0x100 158217044Snwhitehorn2: dcbst 0,%r5 159217044Snwhitehorn sync 160217044Snwhitehorn icbi 0,%r5 161217044Snwhitehorn sync 162217044Snwhitehorn isync 163217044Snwhitehorn cmpw %r5,%r8 164217044Snwhitehorn addi %r5,%r5,CACHELINE_SIZE 165217044Snwhitehorn blt 2b 166217044Snwhitehorn 167217044Snwhitehorn /* All done: absolute jump to relocated entry point */ 168217044Snwhitehorn ba relocated_start 169217044Snwhitehorn 170