1/* 2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#include <i386/asm.h> 30#include <i386/proc_reg.h> 31#include <i386/postcode.h> 32#include <i386/acpi.h> 33#include <assym.s> 34 35 .file "acpi_wakeup.s" 36 37 .text 38 .align 12 /* Page align for single bcopy_phys() */ 39 40#define PA(addr) (addr) 41 42#if CONFIG_SLEEP 43ENTRY(acpi_wake_prot) 44 45 /* protected mode, paging disabled */ 46 47 /* setup the protected mode segment registers */ 48 mov $0x10, %eax 49 movw %ax, %ds 50 movw %ax, %es 51 movw %ax, %ss 52 movw %ax, %fs 53 movw %ax, %gs 54 55 /* jump back to the sleep function in the kernel */ 56 movl PA(saved_eip), %eax 57 jmp *%eax 58 59/* 60 * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon) 61 * 62 * Save CPU state before platform sleep. Restore CPU state 63 * following wake up. 64 */ 65 66ENTRY(acpi_sleep_cpu) 67 pushl %ebp 68 movl %esp, %ebp 69 70 /* save flags */ 71 pushfl 72 73 /* save general purpose registers */ 74 pushal 75 movl %esp, saved_esp 76 77 /* make sure tlb is flushed */ 78 movl %cr3,%eax 79 movl %eax,%cr3 80 81 /* save control registers */ 82 movl %cr0, %eax 83 movl %eax, saved_cr0 84 movl %cr2, %eax 85 movl %eax, saved_cr2 86 movl %cr3, %eax 87 movl %eax, saved_cr3 88 movl %cr4, %eax 89 movl %eax, saved_cr4 90 91 /* save segment registers */ 92 movw %es, saved_es 93 movw %fs, saved_fs 94 movw %gs, saved_gs 95 movw %ss, saved_ss 96 97 /* save descriptor table registers */ 98 sgdt saved_gdt 99 sldt saved_ldt 100 sidt saved_idt 101 str saved_tr 102 103 /* 104 * When system wakes up, the real mode wake handler will revert to 105 * protected mode, then jump to the address stored at saved_eip. 106 */ 107 movl $(PA(wake_prot)), saved_eip 108 109 /* 110 * Call ACPI function provided by the caller to sleep the platform. 111 * This call will not return on success. 112 */ 113 pushl B_ARG1 114 movl B_ARG0, %edi 115 call *%edi 116 popl %edi 117 118 /* sleep failed, no cpu context lost */ 119 jmp wake_restore 120 121wake_prot: 122 /* protected mode, paging disabled */ 123 POSTCODE(ACPI_WAKE_PROT_ENTRY) 124 125 movl PA(saved_cr3), %ebx 126 movl PA(saved_cr4), %ecx 127 /* 128 * restore cr3, PAE and NXE states in an orderly fashion 129 */ 130 movl %ebx, %cr3 131 movl %ecx, %cr4 132 133 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */ 134 rdmsr /* MSR value return in edx: eax */ 135 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */ 136 wrmsr /* Update Extended Feature Enable reg */ 137 138 /* restore kernel GDT */ 139 lgdt PA(saved_gdt) 140 141 movl PA(saved_cr2), %eax 142 movl %eax, %cr2 143 144 /* restore CR0, paging enabled */ 145 movl PA(saved_cr0), %eax 146 movl %eax, %cr0 147 148 /* switch to kernel code segment */ 149 ljmpl $(KERNEL32_CS), $wake_paged 150 151wake_paged: 152 153 /* protected mode, paging enabled */ 154 POSTCODE(ACPI_WAKE_PAGED_ENTRY) 155 156 /* switch to kernel data segment */ 157 movw $(KERNEL_DS), %ax 158 movw %ax, %ds 159 160 /* restore local and interrupt descriptor tables */ 161 lldt saved_ldt 162 lidt saved_idt 163 164 /* restore segment registers */ 165 movw saved_es, %es 166 movw saved_fs, %fs 167 movw saved_gs, %gs 168 movw saved_ss, %ss 169 170 /* 171 * Restore task register. Before doing this, clear the busy flag 172 * in the TSS descriptor set by the CPU. 173 */ 174 movl $saved_gdt, %eax 175 movl 2(%eax), %edx /* GDT base, skip limit word */ 176 movl $(KERNEL_TSS), %eax /* TSS segment selector */ 177 movb $(K_TSS), 5(%edx, %eax) /* clear busy flag */ 178 ltr saved_tr /* restore TR */ 179 180wake_restore: 181 182 /* restore general purpose registers */ 183 movl saved_esp, %esp 184 popal 185 186 /* restore flags */ 187 popfl 188 189 leave 190 ret 191 192 193 .section __HIB, __text 194 .align 2 195 196 .globl EXT(acpi_wake_prot_entry) 197ENTRY(acpi_wake_prot_entry) 198 mov %cr0, %eax 199 and $(~CR0_PG), %eax 200 mov %eax, %cr0 201 mov $EXT(IdlePDPT), %eax 202 mov EXT(IdlePTD), %ecx 203 or $(INTEL_PTE_VALID), %ecx 204 mov $0x0, %edx 205 mov %ecx, (0*8+0)(%eax) 206 mov %edx, (0*8+4)(%eax) 207 add $(PAGE_SIZE), %ecx 208 mov %ecx, (1*8+0)(%eax) 209 mov %edx, (1*8+4)(%eax) 210 add $(PAGE_SIZE), %ecx 211 mov %ecx, (2*8+0)(%eax) 212 mov %edx, (2*8+4)(%eax) 213 add $(PAGE_SIZE), %ecx 214 mov %ecx, (3*8+0)(%eax) 215 mov %edx, (3*8+4)(%eax) 216 mov %eax, %cr3 217 mov %cr0, %eax 218 or $(CR0_PG), %eax 219 mov %eax, %cr0 220 221 /* protected mode, paging enabled */ 222 223 POSTCODE(ACPI_WAKE_PAGED_ENTRY) 224 225 /* restore kernel GDT */ 226 lgdt saved_gdt 227 228 POSTCODE(0x40) 229 230 /* restore control registers */ 231 232 movl saved_cr0, %eax 233 movl %eax, %cr0 234 235 movl saved_cr2, %eax 236 movl %eax, %cr2 237 238 POSTCODE(0x3E) 239 240 /* restore real PDE base */ 241 movl saved_cr3, %eax 242 movl saved_cr4, %edx 243 movl %eax, %cr3 244 movl %edx, %cr4 245 movl %eax, %cr3 246 247 /* switch to kernel data segment */ 248 movw $(KERNEL_DS), %ax 249 movw %ax, %ds 250 251 POSTCODE(0x3C) 252 /* restore local and interrupt descriptor tables */ 253 lldt saved_ldt 254 lidt saved_idt 255 256 POSTCODE(0x3B) 257 /* restore segment registers */ 258 movw saved_es, %es 259 movw saved_fs, %fs 260 movw saved_gs, %gs 261 movw saved_ss, %ss 262 263 POSTCODE(0x3A) 264 /* 265 * Restore task register. Before doing this, clear the busy flag 266 * in the TSS descriptor set by the CPU. 267 */ 268 movl $saved_gdt, %eax 269 movl 2(%eax), %edx /* GDT base, skip limit word */ 270 movl $(KERNEL_TSS), %eax /* TSS segment selector */ 271 movb $(K_TSS), 5(%edx, %eax) /* clear busy flag */ 272 ltr saved_tr /* restore TR */ 273 274 /* restore general purpose registers */ 275 movl saved_esp, %esp 276 popal 277 278 /* restore flags */ 279 popfl 280 281 /* make sure interrupts are disabled */ 282 cli 283 284 movl $2, %eax 285 286 leave 287 288 ret 289#endif /* CONFIG_SLEEP */ 290 291.data 292.section __SLEEP, __data 293.align 2 294 295/* 296 * CPU registers saved across sleep/wake. 297 */ 298 299saved_esp: .long 0 300saved_es: .word 0 301saved_fs: .word 0 302saved_gs: .word 0 303saved_ss: .word 0 304saved_cr0: .long 0 305saved_cr2: .long 0 306saved_cr3: .long 0 307saved_cr4: .long 0 308saved_gdt: .word 0 309 .long 0 310saved_idt: .word 0 311 .long 0 312saved_ldt: .word 0 313saved_tr: .word 0 314saved_eip: .long 0 315 316