1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 *
31 */
32
33#ifndef	I386_CPU_DATA
34#define I386_CPU_DATA
35
36#include <mach_assert.h>
37
38#include <kern/assert.h>
39#include <kern/kern_types.h>
40#include <kern/queue.h>
41#include <kern/processor.h>
42#include <kern/pms.h>
43#include <pexpert/pexpert.h>
44#include <mach/i386/thread_status.h>
45#include <mach/i386/vm_param.h>
46#include <i386/rtclock_protos.h>
47#include <i386/pmCPU.h>
48#include <i386/cpu_topology.h>
49
50#if CONFIG_VMX
51#include <i386/vmx/vmx_cpu.h>
52#endif
53
54#include <machine/pal_routines.h>
55
56/*
57 * Data structures referenced (anonymously) from per-cpu data:
58 */
59struct cpu_cons_buffer;
60struct cpu_desc_table;
61struct mca_state;
62
63/*
64 * Data structures embedded in per-cpu data:
65 */
66typedef struct rtclock_timer {
67	mpqueue_head_t		queue;
68	uint64_t		deadline;
69	uint64_t		when_set;
70	boolean_t		has_expired;
71} rtclock_timer_t;
72
73
74#if defined(__i386__)
75
76typedef struct {
77	struct i386_tss         *cdi_ktss;
78	struct __attribute__((packed)) {
79		uint16_t size;
80		struct fake_descriptor *ptr;
81	} cdi_gdt, cdi_idt;
82	struct fake_descriptor	*cdi_ldt;
83	vm_offset_t				cdi_sstk;
84} cpu_desc_index_t;
85
86typedef enum {
87	TASK_MAP_32BIT,			/* 32-bit, compatibility mode */
88	TASK_MAP_64BIT,			/* 64-bit, separate address space */
89	TASK_MAP_64BIT_SHARED		/* 64-bit, kernel-shared addr space */
90} task_map_t;
91
92#elif defined(__x86_64__)
93
94
95typedef struct {
96	struct x86_64_tss		*cdi_ktss;
97	struct __attribute__((packed)) {
98		uint16_t size;
99		void *ptr;
100	} cdi_gdt, cdi_idt;
101	struct fake_descriptor	*cdi_ldt;
102	vm_offset_t				cdi_sstk;
103} cpu_desc_index_t;
104
105typedef enum {
106	TASK_MAP_32BIT,			/* 32-bit user, compatibility mode */
107	TASK_MAP_64BIT,			/* 64-bit user thread, shared space */
108} task_map_t;
109
110#else
111#error Unsupported architecture
112#endif
113
114/*
115 * This structure is used on entry into the (uber-)kernel on syscall from
116 * a 64-bit user. It contains the address of the machine state save area
117 * for the current thread and a temporary place to save the user's rsp
118 * before loading this address into rsp.
119 */
120typedef struct {
121	addr64_t	cu_isf;		/* thread->pcb->iss.isf */
122	uint64_t	cu_tmp;		/* temporary scratch */
123	addr64_t	cu_user_gs_base;
124} cpu_uber_t;
125
126typedef	uint16_t	pcid_t;
127typedef	uint8_t		pcid_ref_t;
128
129#define CPU_RTIME_BINS (12)
130#define CPU_ITIME_BINS (CPU_RTIME_BINS)
131
132/*
133 * Per-cpu data.
134 *
135 * Each processor has a per-cpu data area which is dereferenced through the
136 * current_cpu_datap() macro. For speed, the %gs segment is based here, and
137 * using this, inlines provides single-instruction access to frequently used
138 * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/
139 * current_thread().
140 *
141 * Cpu data owned by another processor can be accessed using the
142 * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu
143 * pointers.
144 */
145typedef struct cpu_data
146{
147	struct pal_cpu_data	cpu_pal_data;		/* PAL-specific data */
148#define				cpu_pd cpu_pal_data	/* convenience alias */
149	struct cpu_data		*cpu_this;		/* pointer to myself */
150	thread_t		cpu_active_thread;
151	int			cpu_preemption_level;
152	int			cpu_number;		/* Logical CPU */
153	void			*cpu_int_state;		/* interrupt state */
154	vm_offset_t		cpu_active_stack;	/* kernel stack base */
155	vm_offset_t		cpu_kernel_stack;	/* kernel stack top */
156	vm_offset_t		cpu_int_stack_top;
157	int			cpu_interrupt_level;
158	int			cpu_phys_number;	/* Physical CPU */
159	cpu_id_t		cpu_id;			/* Platform Expert */
160	int			cpu_signals;		/* IPI events */
161	int			cpu_prior_signals;	/* Last set of events,
162							 * debugging
163							 */
164	ast_t			cpu_pending_ast;
165	volatile int		cpu_running;
166	boolean_t		cpu_fixed_pmcs_enabled;
167	rtclock_timer_t		rtclock_timer;
168	boolean_t		cpu_is64bit;
169	volatile addr64_t	cpu_active_cr3 __attribute((aligned(64)));
170	union {
171		volatile uint32_t cpu_tlb_invalid;
172		struct {
173			volatile uint16_t cpu_tlb_invalid_local;
174			volatile uint16_t cpu_tlb_invalid_global;
175		};
176	};
177	volatile task_map_t	cpu_task_map;
178	volatile addr64_t	cpu_task_cr3;
179	addr64_t		cpu_kernel_cr3;
180	cpu_uber_t		cpu_uber;
181	void			*cpu_chud;
182	void			*cpu_console_buf;
183	struct x86_lcpu		lcpu;
184	struct processor	*cpu_processor;
185#if NCOPY_WINDOWS > 0
186	struct cpu_pmap		*cpu_pmap;
187#endif
188	struct cpu_desc_table	*cpu_desc_tablep;
189	struct fake_descriptor	*cpu_ldtp;
190	cpu_desc_index_t	cpu_desc_index;
191	int			cpu_ldt;
192#if NCOPY_WINDOWS > 0
193	vm_offset_t		cpu_copywindow_base;
194	uint64_t		*cpu_copywindow_pdp;
195
196	vm_offset_t		cpu_physwindow_base;
197	uint64_t		*cpu_physwindow_ptep;
198#endif
199
200#define HWINTCNT_SIZE 256
201	uint32_t		cpu_hwIntCnt[HWINTCNT_SIZE];	/* Interrupt counts */
202 	uint64_t		cpu_hwIntpexits[HWINTCNT_SIZE];
203	uint64_t		cpu_hwIntcexits[HWINTCNT_SIZE];
204	uint64_t		cpu_dr7; /* debug control register */
205	uint64_t		cpu_int_event_time;	/* intr entry/exit time */
206	uint64_t		cpu_uber_arg_store;	/* Double mapped address
207							 * of current thread's
208							 * uu_arg array.
209							 */
210	uint64_t		cpu_uber_arg_store_valid; /* Double mapped
211							   * address of pcb
212							   * arg store
213							   * validity flag.
214							   */
215	pal_rtc_nanotime_t	*cpu_nanotime;		/* Nanotime info */
216#if	CONFIG_COUNTERS
217	thread_t		csw_old_thread;
218	thread_t		csw_new_thread;
219#endif /* CONFIG COUNTERS */
220#if	defined(__x86_64__)
221	uint32_t		cpu_pmap_pcid_enabled;
222	pcid_t			cpu_active_pcid;
223	pcid_t			cpu_last_pcid;
224	volatile pcid_ref_t	*cpu_pmap_pcid_coherentp;
225	volatile pcid_ref_t	*cpu_pmap_pcid_coherentp_kernel;
226#define	PMAP_PCID_MAX_PCID      (0x1000)
227	pcid_t			cpu_pcid_free_hint;
228	pcid_ref_t		cpu_pcid_refcounts[PMAP_PCID_MAX_PCID];
229	pmap_t			cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];
230#ifdef	PCID_STATS
231	uint64_t		cpu_pmap_pcid_flushes;
232	uint64_t		cpu_pmap_pcid_preserves;
233#endif
234#endif /* x86_64 */
235	uint64_t		cpu_aperf;
236	uint64_t		cpu_mperf;
237	uint64_t		cpu_c3res;
238	uint64_t		cpu_c6res;
239	uint64_t		cpu_c7res;
240	uint64_t		cpu_itime_total;
241	uint64_t		cpu_rtime_total;
242	uint64_t		cpu_ixtime;
243	uint64_t                cpu_idle_exits;
244 	uint64_t		cpu_rtimes[CPU_RTIME_BINS];
245 	uint64_t		cpu_itimes[CPU_ITIME_BINS];
246 	uint64_t		cpu_cur_insns;
247 	uint64_t		cpu_cur_ucc;
248 	uint64_t		cpu_cur_urc;
249	uint64_t                cpu_max_observed_int_latency;
250	int                     cpu_max_observed_int_latency_vector;
251	uint64_t		debugger_entry_time;
252	uint64_t		debugger_ipi_time;
253	volatile boolean_t	cpu_NMI_acknowledged;
254	/* A separate nested interrupt stack flag, to account
255	 * for non-nested interrupts arriving while on the interrupt stack
256	 * Currently only occurs when AICPM enables interrupts on the
257	 * interrupt stack during processor offlining.
258	 */
259	uint32_t		cpu_nested_istack;
260	uint32_t		cpu_nested_istack_events;
261	x86_saved_state64_t	*cpu_fatal_trap_state;
262	x86_saved_state64_t	*cpu_post_fatal_trap_state;
263#if CONFIG_VMX
264	vmx_cpu_t		cpu_vmx;		/* wonderful world of virtualization */
265#endif
266#if CONFIG_MCA
267	struct mca_state	*cpu_mca_state;		/* State at MC fault */
268#endif
269 	int			cpu_type;
270 	int			cpu_subtype;
271 	int			cpu_threadtype;
272 	boolean_t		cpu_iflag;
273 	boolean_t		cpu_boot_complete;
274 	int			cpu_hibernate;
275} cpu_data_t;
276
277extern cpu_data_t	*cpu_data_ptr[];
278extern cpu_data_t	cpu_data_master;
279
280/* Macro to generate inline bodies to retrieve per-cpu data fields. */
281#ifndef offsetof
282#define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
283#endif /* offsetof */
284#define CPU_DATA_GET(member,type)					\
285	type ret;							\
286	__asm__ volatile ("mov %%gs:%P1,%0"				\
287		: "=r" (ret)						\
288		: "i" (offsetof(cpu_data_t,member)));			\
289	return ret;
290
291#define CPU_DATA_GET_INDEX(member,index,type)	\
292	type ret;							\
293	__asm__ volatile ("mov %%gs:(%1),%0"				\
294		: "=r" (ret)						\
295		: "r" (offsetof(cpu_data_t,member[index])));			\
296	return ret;
297
298#define CPU_DATA_SET(member,value)					\
299	__asm__ volatile ("mov %0,%%gs:%P1"				\
300		:							\
301		: "r" (value), "i" (offsetof(cpu_data_t,member)));
302#define CPU_DATA_XCHG(member,value,type)				\
303	type ret;							\
304	__asm__ volatile ("xchg %0,%%gs:%P1"				\
305		: "=r" (ret)						\
306		: "i" (offsetof(cpu_data_t,member)), "0" (value));	\
307	return ret;
308
309/*
310 * Everyone within the osfmk part of the kernel can use the fast
311 * inline versions of these routines.  Everyone outside, must call
312 * the real thing,
313 */
314static inline thread_t
315get_active_thread(void)
316{
317	CPU_DATA_GET(cpu_active_thread,thread_t)
318}
319#define current_thread_fast()		get_active_thread()
320#define current_thread()		current_thread_fast()
321
322static inline boolean_t
323get_is64bit(void)
324{
325	CPU_DATA_GET(cpu_is64bit, boolean_t)
326}
327#if CONFIG_YONAH
328#define cpu_mode_is64bit()		get_is64bit()
329#else
330#define cpu_mode_is64bit()		TRUE
331#endif
332
333static inline int
334get_preemption_level(void)
335{
336	CPU_DATA_GET(cpu_preemption_level,int)
337}
338static inline int
339get_interrupt_level(void)
340{
341	CPU_DATA_GET(cpu_interrupt_level,int)
342}
343static inline int
344get_cpu_number(void)
345{
346	CPU_DATA_GET(cpu_number,int)
347}
348static inline int
349get_cpu_phys_number(void)
350{
351	CPU_DATA_GET(cpu_phys_number,int)
352}
353
354
355static inline void
356disable_preemption(void)
357{
358	__asm__ volatile ("incl %%gs:%P0"
359			:
360			: "i" (offsetof(cpu_data_t, cpu_preemption_level)));
361}
362
363static inline void
364enable_preemption(void)
365{
366	assert(get_preemption_level() > 0);
367
368	__asm__ volatile ("decl %%gs:%P0		\n\t"
369			  "jne 1f			\n\t"
370			  "call _kernel_preempt_check	\n\t"
371			  "1:"
372			: /* no outputs */
373			: "i" (offsetof(cpu_data_t, cpu_preemption_level))
374			: "eax", "ecx", "edx", "cc", "memory");
375}
376
377static inline void
378enable_preemption_no_check(void)
379{
380	assert(get_preemption_level() > 0);
381
382	__asm__ volatile ("decl %%gs:%P0"
383			: /* no outputs */
384			: "i" (offsetof(cpu_data_t, cpu_preemption_level))
385			: "cc", "memory");
386}
387
388static inline void
389mp_disable_preemption(void)
390{
391	disable_preemption();
392}
393
394static inline void
395mp_enable_preemption(void)
396{
397	enable_preemption();
398}
399
400static inline void
401mp_enable_preemption_no_check(void)
402{
403	enable_preemption_no_check();
404}
405
406static inline cpu_data_t *
407current_cpu_datap(void)
408{
409	CPU_DATA_GET(cpu_this, cpu_data_t *);
410}
411
412static inline cpu_data_t *
413cpu_datap(int cpu)
414{
415	return cpu_data_ptr[cpu];
416}
417
418extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu);
419extern void cpu_data_realloc(void);
420
421#endif	/* I386_CPU_DATA */
422