1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <i386/pmap.h>
30#include <i386/proc_reg.h>
31#include <i386/mp_desc.h>
32#include <i386/misc_protos.h>
33#include <i386/mp.h>
34#include <i386/cpu_data.h>
35#if CONFIG_MTRR
36#include <i386/mtrr.h>
37#endif
38#if CONFIG_VMX
39#include <i386/vmx/vmx_cpu.h>
40#endif
41#include <i386/ucode.h>
42#include <i386/acpi.h>
43#include <i386/fpu.h>
44#include <i386/lapic.h>
45#include <i386/mp.h>
46#include <i386/mp_desc.h>
47#include <i386/serial_io.h>
48#if CONFIG_MCA
49#include <i386/machine_check.h>
50#endif
51#include <i386/pmCPU.h>
52
53#include <i386/tsc.h>
54
55#include <kern/cpu_data.h>
56#include <kern/machine.h>
57#include <kern/timer_queue.h>
58#include <console/serial_protos.h>
59#include <machine/pal_routines.h>
60#include <vm/vm_page.h>
61
62#if HIBERNATION
63#include <IOKit/IOHibernatePrivate.h>
64#endif
65#include <IOKit/IOPlatformExpert.h>
66#include <sys/kdebug.h>
67
68#if CONFIG_SLEEP
69extern void	acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
70extern void	acpi_wake_prot(void);
71#endif
72extern kern_return_t IOCPURunPlatformQuiesceActions(void);
73extern kern_return_t IOCPURunPlatformActiveActions(void);
74extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
75
76extern void 	fpinit(void);
77
78vm_offset_t
79acpi_install_wake_handler(void)
80{
81#if CONFIG_SLEEP
82	install_real_mode_bootstrap(acpi_wake_prot);
83	return REAL_MODE_BOOTSTRAP_OFFSET;
84#else
85	return 0;
86#endif
87}
88
89#if HIBERNATION
90struct acpi_hibernate_callback_data {
91	acpi_sleep_callback func;
92	void *refcon;
93};
94typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
95
96unsigned int		save_kdebug_enable = 0;
97static uint64_t		acpi_sleep_abstime;
98static uint64_t		acpi_idle_abstime;
99static uint64_t		acpi_wake_abstime, acpi_wake_postrebase_abstime;
100boolean_t		deep_idle_rebase = TRUE;
101
102#if CONFIG_SLEEP
103static void
104acpi_hibernate(void *refcon)
105{
106	uint32_t mode;
107
108	acpi_hibernate_callback_data_t *data =
109		(acpi_hibernate_callback_data_t *)refcon;
110
111	if (current_cpu_datap()->cpu_hibernate)
112	{
113		mode = hibernate_write_image();
114
115		if( mode == kIOHibernatePostWriteHalt )
116		{
117			// off
118			HIBLOG("power off\n");
119			IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
120			if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU);
121		}
122		else if( mode == kIOHibernatePostWriteRestart )
123		{
124			// restart
125			HIBLOG("restart\n");
126			IOCPURunPlatformHaltRestartActions(kPERestartCPU);
127			if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU);
128		}
129		else
130		{
131			// sleep
132			HIBLOG("sleep\n");
133
134			// should we come back via regular wake, set the state in memory.
135			cpu_datap(0)->cpu_hibernate = 0;
136		}
137
138	}
139	kdebug_enable = 0;
140
141	IOCPURunPlatformQuiesceActions();
142
143	acpi_sleep_abstime = mach_absolute_time();
144
145	(data->func)(data->refcon);
146
147	/* should never get here! */
148}
149#endif /* CONFIG_SLEEP */
150#endif /* HIBERNATION */
151
152extern void			slave_pstart(void);
153extern void			hibernate_rebuild_vm_structs(void);
154
155extern	unsigned int		wake_nkdbufs;
156
157void
158acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
159{
160#if HIBERNATION
161	acpi_hibernate_callback_data_t data;
162#endif
163	boolean_t did_hibernate;
164	unsigned int	cpu;
165	kern_return_t	rc;
166	unsigned int	my_cpu;
167	uint64_t	start;
168	uint64_t	elapsed = 0;
169	uint64_t	elapsed_trace_start = 0;
170
171	kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n",
172			current_cpu_datap()->cpu_hibernate, cpu_number());
173
174    	/* Get all CPUs to be in the "off" state */
175    	my_cpu = cpu_number();
176	for (cpu = 0; cpu < real_ncpus; cpu += 1) {
177	    	if (cpu == my_cpu)
178			continue;
179		rc = pmCPUExitHaltToOff(cpu);
180		if (rc != KERN_SUCCESS)
181			panic("Error %d trying to transition CPU %d to OFF",
182			      rc, cpu);
183	}
184
185	/* shutdown local APIC before passing control to firmware */
186	lapic_shutdown();
187
188#if HIBERNATION
189	data.func = func;
190	data.refcon = refcon;
191#endif
192
193	/* Save power management timer state */
194	pmTimerSave();
195
196#if CONFIG_VMX
197	/*
198	 * Turn off VT, otherwise switching to legacy mode will fail
199	 */
200	vmx_suspend();
201#endif
202
203	/*
204	 * Enable FPU/SIMD unit for potential hibernate acceleration
205	 */
206	clear_ts();
207
208	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0);
209
210	save_kdebug_enable = kdebug_enable;
211	kdebug_enable = 0;
212
213	acpi_sleep_abstime = mach_absolute_time();
214
215#if CONFIG_SLEEP
216	/*
217	 * Save master CPU state and sleep platform.
218	 * Will not return until platform is woken up,
219	 * or if sleep failed.
220	 */
221	uint64_t old_cr3 = x86_64_pre_sleep();
222#if HIBERNATION
223	acpi_sleep_cpu(acpi_hibernate, &data);
224#else
225	acpi_sleep_cpu(func, refcon);
226#endif
227
228	start = mach_absolute_time();
229
230	x86_64_post_sleep(old_cr3);
231
232#endif /* CONFIG_SLEEP */
233
234	/* Reset UART if kprintf is enabled.
235	 * However kprintf should not be used before rtc_sleep_wakeup()
236	 * for compatibility with firewire kprintf.
237	 */
238
239	if (FALSE == disable_serial_output)
240		pal_serial_init();
241
242#if HIBERNATION
243	if (current_cpu_datap()->cpu_hibernate) {
244		did_hibernate = TRUE;
245
246	} else
247#endif
248	{
249		did_hibernate = FALSE;
250	}
251
252	/* Re-enable mode (including 64-bit if applicable) */
253	cpu_mode_init(current_cpu_datap());
254
255#if CONFIG_MCA
256	/* Re-enable machine check handling */
257	mca_cpu_init();
258#endif
259
260#if CONFIG_MTRR
261	/* restore MTRR settings */
262	mtrr_update_cpu();
263#endif
264
265	/* update CPU microcode */
266	ucode_update_wake();
267
268#if CONFIG_VMX
269	/*
270	 * Restore VT mode
271	 */
272	vmx_resume();
273#endif
274
275#if CONFIG_MTRR
276	/* set up PAT following boot processor power up */
277	pat_init();
278#endif
279
280	/*
281	 * Go through all of the CPUs and mark them as requiring
282	 * a full restart.
283	 */
284	pmMarkAllCPUsOff();
285
286
287	/* re-enable and re-init local apic (prior to starting timers) */
288	if (lapic_probe())
289		lapic_configure();
290
291	hibernate_rebuild_vm_structs();
292
293	elapsed += mach_absolute_time() - start;
294	acpi_wake_abstime = mach_absolute_time();
295
296	/* let the realtime clock reset */
297	rtc_sleep_wakeup(acpi_sleep_abstime);
298	acpi_wake_postrebase_abstime = mach_absolute_time();
299	assert(mach_absolute_time() >= acpi_sleep_abstime);
300
301	kdebug_enable = save_kdebug_enable;
302
303	if (kdebug_enable == 0) {
304		if (wake_nkdbufs) {
305			start = mach_absolute_time();
306			start_kern_tracing(wake_nkdbufs, TRUE);
307			elapsed_trace_start += mach_absolute_time() - start;
308		}
309	}
310	start = mach_absolute_time();
311
312	/* Reconfigure FP/SIMD unit */
313	init_fpu();
314	clear_ts();
315
316	IOCPURunPlatformActiveActions();
317
318	if (did_hibernate) {
319		elapsed += mach_absolute_time() - start;
320
321		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, elapsed, elapsed_trace_start, 0, 0, 0);
322		hibernate_machine_init();
323		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0);
324
325		current_cpu_datap()->cpu_hibernate = 0;
326
327		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
328	} else
329		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
330
331	/* Restore power management register state */
332	pmCPUMarkRunning(current_cpu_datap());
333
334	/* Restore power management timer state */
335	pmTimerRestore();
336
337	/* Restart timer interrupts */
338	rtc_timer_start();
339
340#if HIBERNATION
341
342	kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
343#endif
344
345#if CONFIG_SLEEP
346	/* Becase we don't save the bootstrap page, and we share it
347	 * between sleep and mp slave init, we need to recreate it
348	 * after coming back from sleep or hibernate */
349	install_real_mode_bootstrap(slave_pstart);
350#endif
351}
352
353/*
354 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
355 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
356 * processors are expected already to have been offlined in the deepest C-state.
357 *
358 * The contract with ACPI is that although the kernel is called with interrupts
359 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
360 * interrupt. However, the callback function will be called once this has
361 * occurred and interrupts are guaranteed to be disabled at that time,
362 * and to remain disabled during C-state entry, exit (wake) and return
363 * from acpi_idle_kernel.
364 */
365void
366acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
367{
368	boolean_t	istate = ml_get_interrupts_enabled();
369
370	kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
371		cpu_number(), istate ? "enabled" : "disabled");
372
373	assert(cpu_number() == master_cpu);
374
375	/*
376	 * Effectively set the boot cpu offline.
377	 * This will stop further deadlines being set.
378	 */
379	cpu_datap(master_cpu)->cpu_running = FALSE;
380
381	/* Cancel any pending deadline */
382	setPop(0);
383	while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) {
384		(void) ml_set_interrupts_enabled(TRUE);
385		setPop(0);
386		ml_set_interrupts_enabled(FALSE);
387	}
388
389	/*
390	 * Call back to caller to indicate that interrupts will remain
391	 * disabled while we deep idle, wake and return.
392	 */
393	func(refcon);
394
395	acpi_idle_abstime = mach_absolute_time();
396
397	KERNEL_DEBUG_CONSTANT(
398		MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
399		acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
400
401	/*
402	 * Disable tracing during S0-sleep
403	 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
404	 */
405	if (deep_idle_rebase) {
406		save_kdebug_enable = kdebug_enable;
407		kdebug_enable = 0;
408	}
409
410	/*
411	 * Call into power-management to enter the lowest C-state.
412	 * Note when called on the boot processor this routine will
413	 * return directly when awoken.
414	 */
415	pmCPUHalt(PM_HALT_SLEEP);
416
417	/*
418	 * Get wakeup time relative to the TSC which has progressed.
419	 * Then rebase nanotime to reflect time not progressing over sleep
420	 * - unless overriden so that tracing can occur during deep_idle.
421	 */
422	acpi_wake_abstime = mach_absolute_time();
423	if (deep_idle_rebase) {
424		rtc_sleep_wakeup(acpi_idle_abstime);
425		kdebug_enable = save_kdebug_enable;
426	}
427	acpi_wake_postrebase_abstime = mach_absolute_time();
428	assert(mach_absolute_time() >= acpi_idle_abstime);
429	cpu_datap(master_cpu)->cpu_running = TRUE;
430
431	KERNEL_DEBUG_CONSTANT(
432		MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
433		acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
434
435	/* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
436	if (kdebug_enable == 0) {
437		if (wake_nkdbufs)
438			start_kern_tracing(wake_nkdbufs, TRUE);
439	}
440
441	IOCPURunPlatformActiveActions();
442
443	/* Restart timer interrupts */
444	rtc_timer_start();
445}
446
447extern char real_mode_bootstrap_end[];
448extern char real_mode_bootstrap_base[];
449
450void
451install_real_mode_bootstrap(void *prot_entry)
452{
453	/*
454	 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
455	 * This is in page 1 which has been reserved for this purpose by
456	 * machine_startup() from the boot processor.
457	 * The slave boot code is responsible for switching to protected
458	 * mode and then jumping to the common startup, _start().
459	 */
460	bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
461		   (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
462		   real_mode_bootstrap_end-real_mode_bootstrap_base);
463
464	/*
465	 * Set the location at the base of the stack to point to the
466	 * common startup entry.
467	 */
468	ml_phys_write_word(
469		PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET,
470		(unsigned int)kvtophys((vm_offset_t)prot_entry));
471
472	/* Flush caches */
473	__asm__("wbinvd");
474}
475
476boolean_t
477ml_recent_wake(void) {
478	uint64_t ctime = mach_absolute_time();
479	assert(ctime > acpi_wake_postrebase_abstime);
480	return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC);
481}
482