1/*- 2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org> 3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 *
| 1/*- 2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org> 3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 *
|
27 * $FreeBSD: head/sys/i386/acpica/acpi_wakecode.S 124051 2004-01-01 22:57:22Z njl $
| 27 * $FreeBSD: head/sys/i386/acpica/acpi_wakecode.S 145397 2005-04-22 09:53:04Z iedowse $
|
28 */ 29 30#define LOCORE 31 32#include <machine/asmacros.h>
| 28 */ 29 30#define LOCORE 31 32#include <machine/asmacros.h>
|
33#include <machine/param.h>
| |
34#include <machine/specialreg.h> 35
| 33#include <machine/specialreg.h> 34
|
| 35#include "assym.s" 36
|
36 .align 4 37 .code16 38wakeup_16: 39 nop 40 cli 41 42 /* 43 * Set up segment registers for real mode and a small stack for 44 * any calls we make. 45 */ 46 movw %cs,%ax 47 movw %ax,%ds 48 movw %ax,%ss 49 movw $PAGE_SIZE,%sp 50 51 /* Re-initialize video BIOS if the reset_video tunable is set. */ 52 cmp $0,reset_video 53 je wakeup_16_gdt 54 lcall $0xc000,$3 55 56 /* 57 * Set up segment registers for real mode again in case the 58 * previous BIOS call clobbers them. 59 */ 60 movw %cs,%ax 61 movw %ax,%ds 62 movw %ax,%ss 63 64wakeup_16_gdt: 65 /* Load GDT for real mode */ 66 lgdt physical_gdt 67 68 /* Restore CR2, CR3 and CR4 */ 69 mov previous_cr2,%eax 70 mov %eax,%cr2 71 mov previous_cr3,%eax 72 mov %eax,%cr3 73 mov previous_cr4,%eax 74 mov %eax,%cr4 75 76 /* Transfer some values to protected mode */ 77#define NVALUES 9 78#define TRANSFER_STACK32(val, idx) \ 79 mov val,%eax; \ 80 mov %eax,wakeup_32stack+(idx+1)+(idx*4); 81 82 TRANSFER_STACK32(previous_ss, (NVALUES - 9)) 83 TRANSFER_STACK32(previous_fs, (NVALUES - 8)) 84 TRANSFER_STACK32(previous_ds, (NVALUES - 7)) 85 TRANSFER_STACK32(physical_gdt+2, (NVALUES - 6)) 86 TRANSFER_STACK32(where_to_recover, (NVALUES - 5)) 87 TRANSFER_STACK32(previous_idt+2, (NVALUES - 4)) 88 TRANSFER_STACK32(previous_ldt, (NVALUES - 3)) 89 TRANSFER_STACK32(previous_gdt+2, (NVALUES - 2)) 90 TRANSFER_STACK32(previous_tr, (NVALUES - 1)) 91 TRANSFER_STACK32(previous_cr0, (NVALUES - 0)) 92 93 mov physical_esp,%esi /* to be used in 32bit code */ 94 95 /* Enable protected mode */ 96 mov %cr0,%eax 97 orl $(CR0_PE),%eax 98 mov %eax,%cr0 99 100wakeup_sw32: 101 /* Switch to protected mode by intersegmental jump */
| 37 .align 4 38 .code16 39wakeup_16: 40 nop 41 cli 42 43 /* 44 * Set up segment registers for real mode and a small stack for 45 * any calls we make. 46 */ 47 movw %cs,%ax 48 movw %ax,%ds 49 movw %ax,%ss 50 movw $PAGE_SIZE,%sp 51 52 /* Re-initialize video BIOS if the reset_video tunable is set. */ 53 cmp $0,reset_video 54 je wakeup_16_gdt 55 lcall $0xc000,$3 56 57 /* 58 * Set up segment registers for real mode again in case the 59 * previous BIOS call clobbers them. 60 */ 61 movw %cs,%ax 62 movw %ax,%ds 63 movw %ax,%ss 64 65wakeup_16_gdt: 66 /* Load GDT for real mode */ 67 lgdt physical_gdt 68 69 /* Restore CR2, CR3 and CR4 */ 70 mov previous_cr2,%eax 71 mov %eax,%cr2 72 mov previous_cr3,%eax 73 mov %eax,%cr3 74 mov previous_cr4,%eax 75 mov %eax,%cr4 76 77 /* Transfer some values to protected mode */ 78#define NVALUES 9 79#define TRANSFER_STACK32(val, idx) \ 80 mov val,%eax; \ 81 mov %eax,wakeup_32stack+(idx+1)+(idx*4); 82 83 TRANSFER_STACK32(previous_ss, (NVALUES - 9)) 84 TRANSFER_STACK32(previous_fs, (NVALUES - 8)) 85 TRANSFER_STACK32(previous_ds, (NVALUES - 7)) 86 TRANSFER_STACK32(physical_gdt+2, (NVALUES - 6)) 87 TRANSFER_STACK32(where_to_recover, (NVALUES - 5)) 88 TRANSFER_STACK32(previous_idt+2, (NVALUES - 4)) 89 TRANSFER_STACK32(previous_ldt, (NVALUES - 3)) 90 TRANSFER_STACK32(previous_gdt+2, (NVALUES - 2)) 91 TRANSFER_STACK32(previous_tr, (NVALUES - 1)) 92 TRANSFER_STACK32(previous_cr0, (NVALUES - 0)) 93 94 mov physical_esp,%esi /* to be used in 32bit code */ 95 96 /* Enable protected mode */ 97 mov %cr0,%eax 98 orl $(CR0_PE),%eax 99 mov %eax,%cr0 100 101wakeup_sw32: 102 /* Switch to protected mode by intersegmental jump */
|
102 ljmpl $0x8,$0x12345678 /* Code location, to be replaced */
| 103 ljmpl $KCSEL,$0x12345678 /* Code location, to be replaced */
|
103 104 .code32 105wakeup_32: 106 /* 107 * Switched to protected mode w/o paging 108 * %esi: KERNEL stack pointer (physical address) 109 */ 110 111 nop 112 113 /* Set up segment registers for protected mode */
| 104 105 .code32 106wakeup_32: 107 /* 108 * Switched to protected mode w/o paging 109 * %esi: KERNEL stack pointer (physical address) 110 */ 111 112 nop 113 114 /* Set up segment registers for protected mode */
|
114 movw $0x10,%ax /* KDSEL to segment registers */
| 115 movw $KDSEL,%ax /* KDSEL to segment registers */
|
115 movw %ax,%ds 116 movw %ax,%es 117 movw %ax,%gs 118 movw %ax,%ss
| 116 movw %ax,%ds 117 movw %ax,%es 118 movw %ax,%gs 119 movw %ax,%ss
|
119 movw $0x18,%ax /* KPSEL to %fs */
| 120 movw $KPSEL,%ax /* KPSEL to %fs */
|
120 movw %ax,%fs 121 movl %esi,%esp /* physical address stack pointer */ 122 123wakeup_32stack: 124 /* Operands are overwritten in 16bit code */ 125 pushl $0xabcdef09 /* ss + dummy */ 126 pushl $0xabcdef08 /* fs + gs */ 127 pushl $0xabcdef07 /* ds + es */ 128 pushl $0xabcdef06 /* gdt:base (physical address) */ 129 pushl $0xabcdef05 /* recover address */ 130 pushl $0xabcdef04 /* idt:base */ 131 pushl $0xabcdef03 /* ldt + idt:limit */ 132 pushl $0xabcdef02 /* gdt:base */ 133 pushl $0xabcdef01 /* TR + gdt:limit */ 134 pushl $0xabcdef00 /* CR0 */ 135 136 movl %esp,%ebp 137#define CR0_REGISTER 0(%ebp) 138#define TASK_REGISTER 4(%ebp) 139#define PREVIOUS_GDT 6(%ebp) 140#define PREVIOUS_LDT 12(%ebp) 141#define PREVIOUS_IDT 14(%ebp) 142#define RECOVER_ADDR 20(%ebp) 143#define PHYSICAL_GDT_BASE 24(%ebp) 144#define PREVIOUS_DS 28(%ebp) 145#define PREVIOUS_ES 30(%ebp) 146#define PREVIOUS_FS 32(%ebp) 147#define PREVIOUS_GS 34(%ebp) 148#define PREVIOUS_SS 36(%ebp) 149 150 /* Fixup TSS type field */ 151#define TSS_TYPEFIX_MASK 0xf9 152 xorl %esi,%esi 153 movl PHYSICAL_GDT_BASE,%ebx 154 movw TASK_REGISTER,%si 155 leal (%ebx,%esi),%eax /* get TSS segment descriptor */ 156 andb $TSS_TYPEFIX_MASK,5(%eax) 157 158 /* Prepare to return to sleep/wakeup code point */ 159 lgdt PREVIOUS_GDT 160 lidt PREVIOUS_IDT 161 162 xorl %eax,%eax 163 movl %eax,%ebx 164 movl %eax,%ecx 165 movl %eax,%edx 166 movl %eax,%esi 167 movl %eax,%edi 168 movl PREVIOUS_DS,%ebx 169 movl PREVIOUS_FS,%ecx 170 movl PREVIOUS_SS,%edx 171 movw TASK_REGISTER,%si 172 shll $16,%esi 173 movw PREVIOUS_LDT,%si 174 movl RECOVER_ADDR,%edi 175 176 /* Enable paging and etc. */ 177 movl CR0_REGISTER,%eax 178 movl %eax,%cr0 179 180 /* Flush the prefetch queue */ 181 jmp 1f 1821: jmp 1f 1831: 184 /* 185 * Now that we are in kernel virtual memory addressing 186 * %ebx: ds + es 187 * %ecx: fs + gs 188 * %edx: ss + dummy 189 * %esi: LDTR + TR 190 * %edi: recover address 191 */ 192 193 nop 194 195 movl %esi,%eax /* LDTR + TR */ 196 lldt %ax /* load LDT register */ 197 shrl $16,%eax 198 ltr %ax /* load task register */ 199 200 /* Restore segment registers */ 201 movl %ebx,%eax /* ds + es */ 202 movw %ax,%ds 203 shrl $16,%eax 204 movw %ax,%es 205 movl %ecx,%eax /* fs + gs */ 206 movw %ax,%fs 207 shrl $16,%eax 208 movw %ax,%gs 209 movl %edx,%eax /* ss */ 210 movw %ax,%ss 211 212 /* Jump to acpi_restorecpu() */ 213 jmp *%edi 214 215/* used in real mode */ 216physical_gdt: .word 0 217 .long 0 218physical_esp: .long 0 219previous_cr2: .long 0 220previous_cr3: .long 0 221previous_cr4: .long 0 222reset_video: .long 0 223 224/* transfer from real mode to protected mode */ 225previous_cr0: .long 0 226previous_tr: .word 0 227previous_gdt: .word 0 228 .long 0 229previous_ldt: .word 0 230previous_idt: .word 0 231 .long 0 232where_to_recover: .long 0 233previous_ds: .word 0 234previous_es: .word 0 235previous_fs: .word 0 236previous_gs: .word 0 237previous_ss: .word 0 238dummy: .word 0
| 121 movw %ax,%fs 122 movl %esi,%esp /* physical address stack pointer */ 123 124wakeup_32stack: 125 /* Operands are overwritten in 16bit code */ 126 pushl $0xabcdef09 /* ss + dummy */ 127 pushl $0xabcdef08 /* fs + gs */ 128 pushl $0xabcdef07 /* ds + es */ 129 pushl $0xabcdef06 /* gdt:base (physical address) */ 130 pushl $0xabcdef05 /* recover address */ 131 pushl $0xabcdef04 /* idt:base */ 132 pushl $0xabcdef03 /* ldt + idt:limit */ 133 pushl $0xabcdef02 /* gdt:base */ 134 pushl $0xabcdef01 /* TR + gdt:limit */ 135 pushl $0xabcdef00 /* CR0 */ 136 137 movl %esp,%ebp 138#define CR0_REGISTER 0(%ebp) 139#define TASK_REGISTER 4(%ebp) 140#define PREVIOUS_GDT 6(%ebp) 141#define PREVIOUS_LDT 12(%ebp) 142#define PREVIOUS_IDT 14(%ebp) 143#define RECOVER_ADDR 20(%ebp) 144#define PHYSICAL_GDT_BASE 24(%ebp) 145#define PREVIOUS_DS 28(%ebp) 146#define PREVIOUS_ES 30(%ebp) 147#define PREVIOUS_FS 32(%ebp) 148#define PREVIOUS_GS 34(%ebp) 149#define PREVIOUS_SS 36(%ebp) 150 151 /* Fixup TSS type field */ 152#define TSS_TYPEFIX_MASK 0xf9 153 xorl %esi,%esi 154 movl PHYSICAL_GDT_BASE,%ebx 155 movw TASK_REGISTER,%si 156 leal (%ebx,%esi),%eax /* get TSS segment descriptor */ 157 andb $TSS_TYPEFIX_MASK,5(%eax) 158 159 /* Prepare to return to sleep/wakeup code point */ 160 lgdt PREVIOUS_GDT 161 lidt PREVIOUS_IDT 162 163 xorl %eax,%eax 164 movl %eax,%ebx 165 movl %eax,%ecx 166 movl %eax,%edx 167 movl %eax,%esi 168 movl %eax,%edi 169 movl PREVIOUS_DS,%ebx 170 movl PREVIOUS_FS,%ecx 171 movl PREVIOUS_SS,%edx 172 movw TASK_REGISTER,%si 173 shll $16,%esi 174 movw PREVIOUS_LDT,%si 175 movl RECOVER_ADDR,%edi 176 177 /* Enable paging and etc. */ 178 movl CR0_REGISTER,%eax 179 movl %eax,%cr0 180 181 /* Flush the prefetch queue */ 182 jmp 1f 1831: jmp 1f 1841: 185 /* 186 * Now that we are in kernel virtual memory addressing 187 * %ebx: ds + es 188 * %ecx: fs + gs 189 * %edx: ss + dummy 190 * %esi: LDTR + TR 191 * %edi: recover address 192 */ 193 194 nop 195 196 movl %esi,%eax /* LDTR + TR */ 197 lldt %ax /* load LDT register */ 198 shrl $16,%eax 199 ltr %ax /* load task register */ 200 201 /* Restore segment registers */ 202 movl %ebx,%eax /* ds + es */ 203 movw %ax,%ds 204 shrl $16,%eax 205 movw %ax,%es 206 movl %ecx,%eax /* fs + gs */ 207 movw %ax,%fs 208 shrl $16,%eax 209 movw %ax,%gs 210 movl %edx,%eax /* ss */ 211 movw %ax,%ss 212 213 /* Jump to acpi_restorecpu() */ 214 jmp *%edi 215 216/* used in real mode */ 217physical_gdt: .word 0 218 .long 0 219physical_esp: .long 0 220previous_cr2: .long 0 221previous_cr3: .long 0 222previous_cr4: .long 0 223reset_video: .long 0 224 225/* transfer from real mode to protected mode */ 226previous_cr0: .long 0 227previous_tr: .word 0 228previous_gdt: .word 0 229 .long 0 230previous_ldt: .word 0 231previous_idt: .word 0 232 .long 0 233where_to_recover: .long 0 234previous_ds: .word 0 235previous_es: .word 0 236previous_fs: .word 0 237previous_gs: .word 0 238previous_ss: .word 0 239dummy: .word 0
|