1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59#include <debug.h>
60
61#include <i386/asm.h>
62#include <i386/proc_reg.h>
63#include <i386/postcode.h>
64#include <assym.s>
65
66#include <i386/mp.h>
67#include <i386/cpuid.h>
68#include <i386/acpi.h>
69
70.code32
71
72
73/*
74 * Interrupt and bootup stack for initial processor.
75 * Note: we switch to a dynamically allocated interrupt stack once VM is up.
76 */
77
78/* in the __HIB section since the hibernate restore code uses this stack. */
79	.section __HIB, __data
80	.align	12
81
82	.globl	EXT(low_intstack)
83EXT(low_intstack):
84	.globl  EXT(gIOHibernateRestoreStack)
85EXT(gIOHibernateRestoreStack):
86
87	.space	INTSTACK_SIZE
88
89	.globl	EXT(low_eintstack)
90EXT(low_eintstack:)
91	.globl  EXT(gIOHibernateRestoreStackEnd)
92EXT(gIOHibernateRestoreStackEnd):
93
94	/* back to the regular __DATA section. */
95
96	.section __DATA, __data
97
98/*
99 * Stack for machine-check handler.
100 */
101	.align	12
102	.globl	EXT(mc_task_stack)
103EXT(mc_task_stack):
104	.space	INTSTACK_SIZE
105	.globl	EXT(mc_task_stack_end)
106EXT(mc_task_stack_end):
107
108	/* Must not clobber EDI */
109#define SWITCH_TO_64BIT_MODE					 \
110	movl	$(CR4_PAE),%eax		/* enable PAE */	;\
111	movl	%eax,%cr4					;\
112	movl    $MSR_IA32_EFER,%ecx				;\
113	rdmsr							;\
114	/* enable long mode, NX */				;\
115	orl	$(MSR_IA32_EFER_LME | MSR_IA32_EFER_NXE),%eax	;\
116	wrmsr							;\
117	movl	$EXT(BootPML4),%eax				;\
118	movl	%eax,%cr3					;\
119	movl	%cr0,%eax					;\
120	orl	$(CR0_PG|CR0_WP),%eax	/* enable paging */	;\
121	movl	%eax,%cr0					;\
122	ljmpl	$KERNEL64_CS,$64f				;\
12364:								;\
124	.code64
125
126/*
127 * BSP CPU start here.
128 *	eax points to kernbootstruct
129 *
130 * Environment:
131 *	protected mode, no paging, flat 32-bit address space.
132 *	(Code/data/stack segments have base == 0, limit == 4G)
133 */
134
135.code32
136	.text
137	.section __HIB, __text
138	.align	ALIGN
139	.globl	EXT(_start)
140	.globl	EXT(pstart)
141LEXT(_start)
142LEXT(pstart)
143
144/*
145 * Here we do the minimal setup to switch from 32 bit mode to 64 bit long mode.
146 *
147 * Initial memory layout:
148 *
149 *	-------------------------
150 *	|			|
151 *	| Kernel text/data	|
152 *	|			|
153 *	|-----------------------| Kernel text base addr - 2MB-aligned
154 *	| padding		|
155 *	|-----------------------|
156 *	| __HIB section		|
157 *	|-----------------------| Page-aligned
158 *	|			|
159 *	| padding		|
160 *	|			|
161 *	------------------------- 0
162 *
163 */
164	mov	%eax, %edi	/* save kernbootstruct */
165
166	/* Use low 32-bits of address as 32-bit stack */
167	movl	$EXT(low_eintstack), %esp
168
169	POSTCODE(PSTART_ENTRY)
170
171	/*
172	 * Set up segmentation
173	 */
174	movl	$EXT(protected_mode_gdtr), %eax
175	lgdtl	(%eax)
176
177	/*
178	 * Rebase Boot page tables to kernel base address.
179	 */
180	movl	$EXT(BootPML4), %eax			// Level 4:
181	add	%eax, 0*8+0(%eax)			//  - 1:1
182	add	%eax, KERNEL_PML4_INDEX*8+0(%eax)	//  - kernel space
183
184	movl	$EXT(BootPDPT), %edx			// Level 3:
185	add	%eax, 0*8+0(%edx)
186	add	%eax, 1*8+0(%edx)
187	add	%eax, 2*8+0(%edx)
188	add	%eax, 3*8+0(%edx)
189
190	POSTCODE(PSTART_REBASE)
191
192/* the following code is shared by the master CPU and all slave CPUs */
193L_pstart_common:
194	/*
195	 * switch to 64 bit mode
196	 */
197	SWITCH_TO_64BIT_MODE
198
199	/* Flush data segment selectors */
200	xor	%eax, %eax
201	mov	%ax, %ss
202	mov	%ax, %ds
203	mov	%ax, %es
204	mov	%ax, %fs
205	mov	%ax, %gs
206
207	test	%edi, %edi /* Populate stack canary on BSP */
208	jz	Lvstartshim
209
210	mov	$1, %eax
211	cpuid
212	test	$(1 << 30), %ecx
213	jz	Lnon_rdrand
214	rdrand	%rax		/* RAX := 64 bits of DRBG entropy */
215	jnc	Lnon_rdrand	/* TODO: complain if DRBG fails at this stage */
216
217Lstore_random_guard:
218	xor	%ah, %ah	/* Security: zero second byte of stack canary */
219	movq	%rax, ___stack_chk_guard(%rip)
220	/* %edi = boot_args_start if BSP */
221Lvstartshim:
222
223	POSTCODE(PSTART_VSTART)
224
225	/* %edi = boot_args_start */
226
227	leaq	_vstart(%rip), %rcx
228	movq	$0xffffff8000000000, %rax	/* adjust pointer up high */
229	or	%rax, %rsp			/* and stack pointer up there */
230	or	%rcx, %rax
231	andq	$0xfffffffffffffff0, %rsp	/* align stack */
232	xorq	%rbp, %rbp			/* zero frame pointer */
233	callq	*%rax
234
235Lnon_rdrand:
236	rdtsc /* EDX:EAX := TSC */
237	/* Distribute low order bits */
238	mov	%eax, %ecx
239	xor	%al, %ah
240	shl	$16, %rcx
241	xor	%rcx, %rax
242	xor	%eax, %edx
243
244	/* Incorporate ASLR entropy, if any */
245	lea	(%rip), %rcx
246	shr	$21, %rcx
247	movzbl	%cl, %ecx
248	shl	$16, %ecx
249	xor	%ecx, %edx
250
251	mov	%ah, %cl
252	ror	%cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */
253	shl	$32, %rdx
254	xor	%rdx, %rax
255	mov	%cl, %al
256	jmp	Lstore_random_guard
257/*
258 * AP (slave) CPUs enter here.
259 *
260 * Environment:
261 *	protected mode, no paging, flat 32-bit address space.
262 *	(Code/data/stack segments have base == 0, limit == 4G)
263 */
264	.align	ALIGN
265	.globl	EXT(slave_pstart)
266LEXT(slave_pstart)
267	.code32
268	cli				/* disable interrupts, so we don`t */
269					/* need IDT for a while */
270	POSTCODE(SLAVE_PSTART)
271
272	movl	$EXT(mp_slave_stack) + PAGE_SIZE, %esp
273
274	xor 	%edi, %edi		/* AP, no "kernbootstruct" */
275
276	jmp	L_pstart_common		/* hop a ride to vstart() */
277
278
279/* BEGIN HIBERNATE CODE */
280
281.section __HIB, __text
282/*
283 * This code is linked into the kernel but part of the "__HIB" section,
284 * which means it's used by code running in the special context of restoring
285 * the kernel text and data from the hibernation image read by the booter.
286 * hibernate_kernel_entrypoint() and everything it calls or references
287 * (ie. hibernate_restore_phys_page()) needs to be careful to only touch
288 * memory also in the "__HIB" section.
289 */
290
291	.align	ALIGN
292	.globl	EXT(hibernate_machine_entrypoint)
293.code32
294LEXT(hibernate_machine_entrypoint)
295	movl    %eax, %edi /* regparm(1) calling convention */
296
297	/* Use low 32-bits of address as 32-bit stack */
298	movl $EXT(low_eintstack), %esp
299
300	/*
301	 * Set up GDT
302	 */
303	movl	$EXT(master_gdtr), %eax
304	lgdtl	(%eax)
305
306	/* Switch to 64-bit on the Boot PTs */
307	SWITCH_TO_64BIT_MODE
308
309	leaq	EXT(hibernate_kernel_entrypoint)(%rip),%rcx
310
311	/* adjust the pointers to be up high */
312	movq	$0xffffff8000000000, %rax
313	orq	%rax, %rsp
314	orq	%rcx, %rax
315
316	/* %edi is already filled with header pointer */
317	xorl	%esi, %esi			/* zero 2nd arg */
318	xorl	%edx, %edx			/* zero 3rd arg */
319	xorl	%ecx, %ecx			/* zero 4th arg */
320	andq	$0xfffffffffffffff0, %rsp	/* align stack */
321
322	/* call instead of jmp to keep the required stack alignment */
323	xorq	%rbp, %rbp			/* zero frame pointer */
324	call	*%rax
325
326	/* NOTREACHED */
327	hlt
328
329/* END HIBERNATE CODE */
330
331#if CONFIG_SLEEP
332/* BEGIN ACPI WAKEUP CODE */
333
334#include <i386/acpi.h>
335
336
337/*
338 * acpi_wake_start
339 */
340
341.section __TEXT,__text
342.code64
343
344/*
345 * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon)
346 *
347 * Save CPU state before platform sleep. Restore CPU state
348 * following wake up.
349 */
350
351ENTRY(acpi_sleep_cpu)
352	push	%rbp
353	mov	%rsp, %rbp
354
355	/* save flags */
356	pushf
357
358	/* save general purpose registers */
359	push %rax
360	push %rbx
361	push %rcx
362	push %rdx
363	push %rbp
364	push %rsi
365	push %rdi
366	push %r8
367	push %r9
368	push %r10
369	push %r11
370	push %r12
371	push %r13
372	push %r14
373	push %r15
374
375	mov	%rsp, saved_rsp(%rip)
376
377	/* make sure tlb is flushed */
378	mov	%cr3,%rax
379	mov	%rax,%cr3
380
381	/* save control registers */
382	mov	%cr0, %rax
383	mov	%rax, saved_cr0(%rip)
384	mov	%cr2, %rax
385	mov	%rax, saved_cr2(%rip)
386	mov	%cr3, %rax
387	mov	%rax, saved_cr3(%rip)
388	mov	%cr4, %rax
389	mov	%rax, saved_cr4(%rip)
390
391	/* save segment registers */
392	movw	%es, saved_es(%rip)
393	movw	%fs, saved_fs(%rip)
394	movw	%gs, saved_gs(%rip)
395	movw	%ss, saved_ss(%rip)
396
397	/* save the 64bit user and kernel gs base */
398	/* note: user's curently swapped into kernel base MSR */
399	mov	$MSR_IA32_KERNEL_GS_BASE, %rcx
400	rdmsr
401	movl	%eax, saved_ugs_base(%rip)
402	movl	%edx, saved_ugs_base+4(%rip)
403	swapgs
404	rdmsr
405	movl	%eax, saved_kgs_base(%rip)
406	movl	%edx, saved_kgs_base+4(%rip)
407	swapgs
408
409	/* save descriptor table registers */
410	sgdt	saved_gdt(%rip)
411	sldt	saved_ldt(%rip)
412	sidt	saved_idt(%rip)
413	str	saved_tr(%rip)
414
415	/*
416	 * Call ACPI function provided by the caller to sleep the platform.
417	 * This call will not return on success.
418	 */
419
420	xchgq %rdi, %rsi
421	call	*%rsi
422
423	/* sleep failed, no cpu context lost */
424	jmp	wake_restore
425
426.section __HIB, __text
427.code32
428.globl EXT(acpi_wake_prot)
429EXT(acpi_wake_prot):
430	/* protected mode, paging disabled */
431	movl	$EXT(low_eintstack), %esp
432
433	SWITCH_TO_64BIT_MODE
434
435	jmp	Lwake_64
436
437.section __TEXT,__text
438.code64
439
440.globl EXT(acpi_wake_prot_entry)
441EXT(acpi_wake_prot_entry):
442	POSTCODE(ACPI_WAKE_PROT_ENTRY)
443	/* Return from hibernate code in iokit/Kernel/IOHibernateRestoreKernel.c
444	 */
445Lwake_64:
446	/*
447	 * restore cr4, PAE and NXE states in an orderly fashion
448	 */
449	mov	saved_cr4(%rip), %rcx
450	mov	%rcx, %cr4
451
452	mov	$(MSR_IA32_EFER), %ecx		/* MSR number in ecx */
453	rdmsr					/* MSR value in edx:eax */
454	or	$(MSR_IA32_EFER_NXE), %eax	/* Set NXE bit in low 32-bits */
455	wrmsr					/* Update */
456
457	movq	saved_cr2(%rip), %rax
458	mov	%rax, %cr2
459
460	/* restore CR0, paging enabled */
461	mov	saved_cr0(%rip), %rax
462	mov	%rax, %cr0
463
464	/* restore the page tables */
465	mov	saved_cr3(%rip), %rax
466	mov	%rax, %cr3
467
468	/* protected mode, paging enabled */
469	POSTCODE(ACPI_WAKE_PAGED_ENTRY)
470
471	/* load null segment selectors */
472	xor	%eax, %eax
473	movw	%ax, %ss
474	movw	%ax, %ds
475
476	/* restore descriptor tables */
477	lgdt	saved_gdt(%rip)
478	lldt	saved_ldt(%rip)
479	lidt	saved_idt(%rip)
480
481	/* restore segment registers */
482	movw	saved_es(%rip), %es
483	movw	saved_fs(%rip), %fs
484	movw	saved_gs(%rip), %gs
485	movw	saved_ss(%rip), %ss
486
487	/* restore the 64bit kernel and user gs base */
488	mov	$MSR_IA32_KERNEL_GS_BASE, %rcx
489	movl	saved_kgs_base(%rip),   %eax
490	movl	saved_kgs_base+4(%rip), %edx
491	wrmsr
492	swapgs
493	movl	saved_ugs_base(%rip),   %eax
494	movl	saved_ugs_base+4(%rip), %edx
495	wrmsr
496
497	/*
498	 * Restore task register. Before doing this, clear the busy flag
499	 * in the TSS descriptor set by the CPU.
500	 */
501	lea	saved_gdt(%rip), %rax
502	movq	2(%rax), %rdx			/* GDT base, skip limit word */
503	movl	$(KERNEL_TSS), %eax		/* TSS segment selector */
504	movb	$(K_TSS), 5(%rdx, %rax)		/* clear busy flag */
505
506	ltr	saved_tr(%rip)			/* restore TR */
507
508wake_restore:
509	mov	saved_rsp(%rip), %rsp
510
511	/* restore general purpose registers */
512	pop %r15
513	pop %r14
514	pop %r13
515	pop %r12
516	pop %r11
517	pop %r10
518	pop %r9
519	pop %r8
520	pop %rdi
521	pop %rsi
522	pop %rbp
523	pop %rdx
524	pop %rcx
525	pop %rbx
526	pop %rax
527
528	/* restore flags */
529	popf
530
531	leave
532	ret
533
534/* END ACPI WAKEUP CODE */
535#endif /* CONFIG_SLEEP */
536
537/* Code to get from real mode to protected mode */
538
539#define	operand_size_prefix	.byte 0x66
540#define	address_size_prefix	.byte 0x67
541#define	cs_base_prefix		.byte 0x2e
542
543#define	LJMP(segment,address)			\
544	operand_size_prefix			;\
545	.byte	0xea				;\
546	.long	address-EXT(real_mode_bootstrap_base)	;\
547	.word	segment
548
549#define	LGDT(address)				\
550	cs_base_prefix				;\
551	address_size_prefix			;\
552	operand_size_prefix			;\
553	.word	0x010f				;\
554	.byte	0x15				;\
555	.long	address-EXT(real_mode_bootstrap_base)
556
557.section __HIB, __text
558.align	12	/* Page align for single bcopy_phys() */
559.code32
560Entry(real_mode_bootstrap_base)
561	cli
562
563	LGDT(EXT(protected_mode_gdtr))
564
565	/* set the PE bit of CR0 */
566	mov	%cr0, %eax
567	inc %eax
568	mov	%eax, %cr0
569
570	/* reload CS register */
571	LJMP(KERNEL32_CS, 1f + REAL_MODE_BOOTSTRAP_OFFSET)
5721:
573
574	/* we are in protected mode now */
575	/* set up the segment registers */
576	mov	$KERNEL_DS, %eax
577	movw	%ax, %ds
578	movw	%ax, %es
579	movw	%ax, %ss
580	xor	%eax,%eax
581	movw	%ax, %fs
582	movw	%ax, %gs
583
584	POSTCODE(SLAVE_STARTPROG_ENTRY);
585
586	mov	PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, %ecx
587	jmp 	*%ecx
588
589Entry(protected_mode_gdtr)
590	.short	160		/* limit (8*20 segs) */
591	.quad	EXT(master_gdt)
592
593Entry(real_mode_bootstrap_end)
594
595/* Save area used across sleep/wake */
596.section __HIB, __data
597.align	2
598
599/* gdtr for real address of master_gdt in HIB (not the aliased address) */
600Entry(master_gdtr)
601		.word 160		/* limit (8*20 segs) */
602		.quad EXT(master_gdt)
603
604saved_gdt:	.word 0
605		.quad 0
606saved_rsp:	.quad 0
607saved_es:	.word 0
608saved_fs:	.word 0
609saved_gs:	.word 0
610saved_ss:	.word 0
611saved_cr0:	.quad 0
612saved_cr2:	.quad 0
613saved_cr3:	.quad 0
614saved_cr4:	.quad 0
615saved_idt:	.word 0
616		.quad 0
617saved_ldt:	.word 0
618saved_tr:	.word 0
619saved_kgs_base:	.quad 0
620saved_ugs_base:	.quad 0
621
622