1/*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <i386/asm.h>
30#include <i386/proc_reg.h>
31
32#include <i386/postcode.h>
33#include <assym.s>
34
35/*
36This code is linked into the kernel but part of the "__HIB" section, which means
37its used by code running in the special context of restoring the kernel text and data
38from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
39it calls or references (ie. hibernate_restore_phys_page())
40needs to be careful to only touch memory also in the "__HIB" section.
41*/
42
43/*
44 * GAS won't handle an intersegment jump with a relocatable offset.
45 */
46#define	LJMP(segment,address)	\
47	.byte	0xea		;\
48	.long	address		;\
49	.word	segment
50
51/* Location of temporary page tables */
52#define HPTD        (0x13000)
53#define HPDPT       (0x17000)
54
55#define LAST_PAGE	(0xFFE00000)
56#define LAST_PAGE_PDE   (0x7ff)
57
58/*
59 * fillpse
60 *	eax = physical page address
61 *	ebx = index into page table
62 *	ecx = how many pages to map
63 * 	base = base address of page dir/table
64 *	prot = protection bits
65 */
66#define	fillpse(base, prot)		  \
67	shll	$3,%ebx			; \
68	addl	base,%ebx		; \
69	orl	$(PTE_V|PTE_PS|0x60), %eax   ; \
70	orl	prot,%eax		; \
71        xorl    %edx, %edx		; \
721:	movl	%eax,(%ebx)		; /* low 32b */ \
73	addl	$4,%ebx			; \
74	movl	%edx,(%ebx)		; /* high 32b */ \
75	addl	$(1 << PDESHIFT),%eax	; /* increment physical address 2Mb */ \
76	addl	$4,%ebx			; /* next entry */ \
77	loop	1b
78
79
80
81/*  Segment Descriptor
82 *
83 * 31          24         19   16                 7           0
84 * ------------------------------------------------------------
85 * |             | |B| |A|       | |   |1|0|E|W|A|            |
86 * | BASE 31..24 |G|/|0|V| LIMIT |P|DPL|  TYPE   | BASE 23:16 |
87 * |             | |D| |L| 19..16| |   |1|1|C|R|A|            |
88 * ------------------------------------------------------------
89 * |                             |                            |
90 * |        BASE 15..0           |       LIMIT 15..0          |
91 * |                             |                            |
92 * ------------------------------------------------------------
93 */
94
95	.align	ALIGN
96ENTRY(hib_gdt)
97	.word	0, 0		/* 0x0  : null */
98	.byte	0, 0, 0, 0
99
100	.word	0xffff, 0x0000	/* 0x8  : code */
101	.byte	0, 0x9e, 0xcf, 0
102
103	.word	0xffff, 0x0000	/* 0x10 : data */
104	.byte	0, 0x92, 0xcf, 0
105
106ENTRY(hib_gdtr)
107	.word	24		/* limit (8*3 segs) */
108	.long	EXT(hib_gdt)
109
110/*
111 * Hibernation code restarts here.  Steal some pages from 0x10000
112 * to 0x90000 for pages tables and directories etc to temporarily
113 * map the hibernation code (put at 0x100000 (phys) by the booter
114 * and linked to 0xC0100000 by the linker) to 0xC0100000 so it can
115 * execute.  It's self-contained and won't make any references outside
116 * of itself.
117 *
118 * On the way down it has to save IdlePTD (and if PAE also IdlePDPT)
119 * and after it runs it has to restore those and load IdlePTD (or
120 * IdlePDPT if PAE) into %cr3 to re-establish the original mappings
121 */
122
123	.align	ALIGN
124	.globl	EXT(hibernate_machine_entrypoint)
125LEXT(hibernate_machine_entrypoint)
126	cli
127
128        mov     %eax, %edi
129
130	POSTCODE(0x1)
131
132	/* Map physical memory from zero to LAST_PAGE */
133        xorl    %eax, %eax
134        xorl    %ebx, %ebx
135        movl    $(LAST_PAGE_PDE), %ecx
136        fillpse( $(HPTD), $(PTE_W) )
137
138	movl	$(HPDPT), %ebx
139        movl    $(HPTD), %eax
140	orl	$(PTE_V), %eax
141
142        xorl    %edx, %edx		; \
143
144	movl	%eax,(%ebx)		; /* low 32b */ \
145	addl	$4,%ebx			; \
146	movl	%edx,(%ebx)		; /* high 32b */ \
147	addl	$4,%ebx			; \
148	addl	$(1 << 12),%eax		; /* increment physical address 1Gb */ \
149
150	movl	%eax,(%ebx)		; /* low 32b */ \
151	addl	$4,%ebx			; \
152	movl	%edx,(%ebx)		; /* high 32b */ \
153	addl	$4,%ebx			; \
154	addl	$(1 << 12),%eax		; /* increment physical address 1Gb */ \
155
156	movl	%eax,(%ebx)		; /* low 32b */ \
157	addl	$4,%ebx			; \
158	movl	%edx,(%ebx)		; /* high 32b */ \
159	addl	$4,%ebx			; \
160	addl	$(1 << 12),%eax		; /* increment physical address 1Gb */ \
161
162	movl	%eax,(%ebx)		; /* low 32b */
163	addl	$4,%ebx			;
164	movl	%edx,(%ebx)		; /* high 32b */ \
165	addl	$4,%ebx			; \
166	addl	$(1 << 12),%eax		; /* increment physical address 1Gb */ \
167
168	/* set page dir ptr table addr */
169	movl	$(HPDPT), %eax
170	movl	%eax, %cr3
171
172        POSTCODE(0x3)
173
174	movl    %cr4,%eax
175        orl     $(CR4_PAE),%eax
176        movl    %eax,%cr4               /* enable page size extensions */
177
178	movl	$(MSR_IA32_EFER), %ecx			/* MSR number in ecx */
179	rdmsr						/* MSR value return in edx: eax */
180	orl	$(MSR_IA32_EFER_NXE), %eax		/* Set NXE bit in low 32-bits */
181	wrmsr						/* Update Extended Feature Enable reg */
182
183	movl	%cr0, %eax
184	orl	$(CR0_PG|CR0_WP|CR0_PE), %eax
185	movl	%eax, %cr0	/* ready paging */
186
187        POSTCODE(0x4)
188
189	lgdt	EXT(gdtptr)		/* load GDT */
190	lidt	EXT(idtptr)		/* load IDT */
191
192        POSTCODE(0x5)
193
194        LJMP	(KERNEL_CS,EXT(hstart))  /* paging on and go to correct vaddr */
195
196/* Hib restart code now running with correct addresses */
197LEXT(hstart)
198	POSTCODE(0x6)
199
200	mov	$(KERNEL_DS),%ax	/* set kernel data segment */
201	mov	%ax,%ds
202	mov	%ax,%es
203	mov	%ax,%ss
204
205	mov	$0,%ax			/* fs must be zeroed; */
206	mov	%ax,%fs			/* some bootstrappers don`t do this */
207	mov	%ax,%gs
208
209	lea	EXT(gIOHibernateRestoreStackEnd),%esp	/* switch to the bootup stack */
210
211        POSTCODE(0x7)
212
213        xorl    %eax, %eax              /* Video memory - N/A */
214        pushl   %eax
215        pushl   %eax
216        pushl   %eax
217        mov     %edi, %eax              /* Pointer to hibernate header */
218        pushl   %eax
219        call    EXT(hibernate_kernel_entrypoint)
220        /* NOTREACHED */
221        hlt
222
223/*
224void
225hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
226*/
227
228	.align	5
229	.globl	EXT(hibernate_restore_phys_page)
230
231	/* XXX can only deal with exactly one page */
232LEXT(hibernate_restore_phys_page)
233	pushl	%edi
234	pushl	%esi
235
236	movl	8+ 4(%esp),%esi		/* source virtual address */
237        addl    $0, %esi
238        jz      3f                      /* If source == 0, nothing to do */
239
240	movl    8+ 16(%esp),%eax        /* destination physical address, high 32 bits  */
241	movl    8+ 12(%esp),%edi        /* destination physical address, low 32 bits */
242        addl    $0, %eax
243        jne     1f                      /* need to map, above LAST_PAGE */
244
245        cmpl    $(LAST_PAGE), %edi
246        jb      2f                      /* no need to map, below LAST_PAGE */
2471:
248        /* Map physical address %eax:%edi to virt. address LAST_PAGE (4GB - 2MB) */
249        movl    %eax, (HPTD + (LAST_PAGE_PDE * 8) + 4)
250        movl    %edi, %eax              /* destination physical address */
251        andl    $(LAST_PAGE), %eax
252        orl     $(PTE_V | PTE_PS | PTE_W), %eax
253        movl    %eax, (HPTD + (LAST_PAGE_PDE * 8))
254        orl     $(LAST_PAGE), %edi
255        invlpg  (%edi)
256
2572:
258	movl	8+ 20(%esp),%edx	/* number of bytes */
259	cld
260	/* move longs*/
261	movl	%edx,%ecx
262	shrl	$2,%ecx
263	rep
264	movsl
265	/* move bytes*/
266	movl	%edx,%ecx
267	andl	$3,%ecx
268	rep
269	movsb
2703:
271	popl	%esi
272	popl	%edi
273	ret
274