1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <platforms.h>
58#include <mach_ldebug.h>
59
60/*
61 * Pass field offsets to assembly code.
62 */
63#include <kern/ast.h>
64#include <kern/thread.h>
65#include <kern/task.h>
66#include <kern/lock.h>
67#include <kern/locks.h>
68#include <kern/host.h>
69#include <kern/misc_protos.h>
70#include <ipc/ipc_space.h>
71#include <ipc/ipc_port.h>
72#include <ipc/ipc_pset.h>
73#include <vm/vm_map.h>
74#include <i386/pmap.h>
75#include <i386/Diagnostics.h>
76#include <i386/mp_desc.h>
77#include <i386/seg.h>
78#include <i386/thread.h>
79#include <i386/cpu_data.h>
80#include <i386/tss.h>
81#include <i386/cpu_capabilities.h>
82#include <i386/cpuid.h>
83#include <i386/pmCPU.h>
84#include <mach/i386/vm_param.h>
85#include <mach/i386/thread_status.h>
86#include <machine/commpage.h>
87#include <pexpert/i386/boot.h>
88
89#if	CONFIG_DTRACE
90#define NEED_DTRACE_DEFS
91#include <../bsd/sys/lockstat.h>
92#endif
93
94/*
95 * genassym.c is used to produce an
96 * assembly file which, intermingled with unuseful assembly code,
97 * has all the necessary definitions emitted. This assembly file is
98 * then postprocessed with sed to extract only these definitions
99 * and thus the final assyms.s is created.
100 *
101 * This convoluted means is necessary since the structure alignment
102 * and packing may be different between the host machine and the
103 * target so we are forced into using the cross compiler to generate
104 * the values, but we cannot run anything on the target machine.
105 */
106
107#undef	offsetof
108#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER)
109
110#if  0
111#define DECLARE(SYM,VAL) \
112	__asm("#DEFINITION#\t.set\t" SYM ",\t%0" : : "n" ((u_int)(VAL)))
113#else
114#define DECLARE(SYM,VAL) \
115	__asm("#DEFINITION##define " SYM "\t%0" : : "n" ((u_int)(VAL)))
116#endif
117
118int	main(
119		int		argc,
120		char		** argv);
121
122int
123main(
124	int	argc,
125	char	**argv)
126{
127
128	DECLARE("AST_URGENT",		AST_URGENT);
129	DECLARE("AST_BSD",			AST_BSD);
130
131	DECLARE("MAX_CPUS",			MAX_CPUS);
132
133	/* Simple Lock structure */
134	DECLARE("SLOCK_ILK",	offsetof(usimple_lock_t, interlock));
135#if	MACH_LDEBUG
136	DECLARE("SLOCK_TYPE",	offsetof(usimple_lock_t, lock_type));
137	DECLARE("SLOCK_PC",	offsetof(usimple_lock_t, debug.lock_pc));
138	DECLARE("SLOCK_THREAD",	offsetof(usimple_lock_t, debug.lock_thread));
139	DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_t, debug.duration[0]));
140	DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_t, debug.duration[1]));
141	DECLARE("USLOCK_TAG",	USLOCK_TAG);
142#endif	/* MACH_LDEBUG */
143
144	/* Mutex structure */
145	DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t *, lck_mtx_owner));
146	DECLARE("MUTEX_PTR",   offsetof(lck_mtx_t *, lck_mtx_ptr));
147	DECLARE("MUTEX_STATE", offsetof(lck_mtx_t *, lck_mtx_state));
148#ifdef __i386__
149	DECLARE("MUTEX_TYPE",	offsetof(lck_mtx_ext_t *, lck_mtx_deb.type));
150	DECLARE("MUTEX_PC",		offsetof(lck_mtx_ext_t *, lck_mtx_deb.pc));
151	DECLARE("MUTEX_ATTR",	offsetof(lck_mtx_ext_t *, lck_mtx_attr));
152	DECLARE("MUTEX_ATTR_DEBUG", LCK_MTX_ATTR_DEBUG);
153	DECLARE("MUTEX_ATTR_DEBUGb", LCK_MTX_ATTR_DEBUGb);
154	DECLARE("MUTEX_ATTR_STAT", LCK_MTX_ATTR_STAT);
155	DECLARE("MUTEX_ATTR_STATb", LCK_MTX_ATTR_STATb);
156	DECLARE("MUTEX_TAG",	MUTEX_TAG);
157#endif
158	DECLARE("MUTEX_IND",	LCK_MTX_TAG_INDIRECT);
159	DECLARE("MUTEX_PTR",	offsetof(lck_mtx_t *, lck_mtx_ptr));
160	DECLARE("MUTEX_ASSERT_OWNED",	LCK_MTX_ASSERT_OWNED);
161	DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED);
162	DECLARE("GRP_MTX_STAT_UTIL",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
163	DECLARE("GRP_MTX_STAT_MISS",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
164	DECLARE("GRP_MTX_STAT_WAIT",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));
165
166	/* x86 only */
167	DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED);
168
169	/* Per-mutex statistic element */
170	DECLARE("MTX_ACQ_TSC",	offsetof(lck_mtx_ext_t *, lck_mtx_stat));
171
172	/* Mutex group statistics elements */
173	DECLARE("MUTEX_GRP",	offsetof(lck_mtx_ext_t *, lck_mtx_grp));
174
175	/*
176	 * The use of this field is somewhat at variance with the alias.
177	 */
178	DECLARE("GRP_MTX_STAT_DIRECT_WAIT",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));
179
180	DECLARE("GRP_MTX_STAT_HELD_MAX",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
181	/* Reader writer lock types */
182	DECLARE("RW_SHARED",    LCK_RW_TYPE_SHARED);
183	DECLARE("RW_EXCL",      LCK_RW_TYPE_EXCLUSIVE);
184
185	DECLARE("TH_RECOVER",		offsetof(thread_t, recover));
186	DECLARE("TH_CONTINUATION",	offsetof(thread_t, continuation));
187	DECLARE("TH_KERNEL_STACK",	offsetof(thread_t, kernel_stack));
188	DECLARE("TH_MUTEX_COUNT",	offsetof(thread_t, mutex_count));
189	DECLARE("TH_WAS_PROMOTED_ON_WAKEUP", offsetof(thread_t, was_promoted_on_wakeup));
190
191	DECLARE("TH_SYSCALLS_MACH",	offsetof(thread_t, syscalls_mach));
192	DECLARE("TH_SYSCALLS_UNIX",	offsetof(thread_t, syscalls_unix));
193
194	DECLARE("TASK_VTIMERS",			offsetof(struct task *, vtimers));
195
196	/* These fields are being added on demand */
197	DECLARE("TH_TASK",	offsetof(thread_t, task));
198	DECLARE("TH_AST",	offsetof(thread_t, ast));
199	DECLARE("TH_MAP",	offsetof(thread_t, map));
200	DECLARE("TH_SPF",	offsetof(thread_t, machine.specFlags));
201	DECLARE("TH_PCB_ISS", 	offsetof(thread_t, machine.iss));
202	DECLARE("TH_PCB_IDS", 	offsetof(thread_t, machine.ids));
203	DECLARE("TH_PCB_FPS",	offsetof(thread_t, machine.ifps));
204#if NCOPY_WINDOWS > 0
205	DECLARE("TH_COPYIO_STATE", offsetof(thread_t, machine.copyio_state));
206	DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
207#endif
208
209	DECLARE("MAP_PMAP",	offsetof(vm_map_t, pmap));
210
211#define IEL_SIZE		(sizeof(struct i386_exception_link *))
212	DECLARE("IKS_SIZE",	sizeof(struct x86_kernel_state));
213
214	/*
215	 * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack)
216	 */
217#if defined(__i386__)
218	DECLARE("KSS_EBX",	offsetof(struct x86_kernel_state *, k_ebx));
219	DECLARE("KSS_ESP",	offsetof(struct x86_kernel_state *, k_esp));
220	DECLARE("KSS_EBP",	offsetof(struct x86_kernel_state *, k_ebp));
221	DECLARE("KSS_EDI",	offsetof(struct x86_kernel_state *, k_edi));
222	DECLARE("KSS_ESI",	offsetof(struct x86_kernel_state *, k_esi));
223	DECLARE("KSS_EIP",	offsetof(struct x86_kernel_state *, k_eip));
224#elif defined(__x86_64__)
225	DECLARE("KSS_RBX",	offsetof(struct x86_kernel_state *, k_rbx));
226	DECLARE("KSS_RSP",	offsetof(struct x86_kernel_state *, k_rsp));
227	DECLARE("KSS_RBP",	offsetof(struct x86_kernel_state *, k_rbp));
228	DECLARE("KSS_R12",	offsetof(struct x86_kernel_state *, k_r12));
229	DECLARE("KSS_R13",	offsetof(struct x86_kernel_state *, k_r13));
230	DECLARE("KSS_R14",	offsetof(struct x86_kernel_state *, k_r14));
231	DECLARE("KSS_R15",	offsetof(struct x86_kernel_state *, k_r15));
232	DECLARE("KSS_RIP",	offsetof(struct x86_kernel_state *, k_rip));
233#else
234#error Unsupported architecture
235#endif
236
237	DECLARE("DS_DR0",	offsetof(struct x86_debug_state32 *, dr0));
238	DECLARE("DS_DR1",	offsetof(struct x86_debug_state32 *, dr1));
239	DECLARE("DS_DR2",	offsetof(struct x86_debug_state32 *, dr2));
240	DECLARE("DS_DR3",	offsetof(struct x86_debug_state32 *, dr3));
241	DECLARE("DS_DR4",	offsetof(struct x86_debug_state32 *, dr4));
242	DECLARE("DS_DR5",	offsetof(struct x86_debug_state32 *, dr5));
243	DECLARE("DS_DR6",	offsetof(struct x86_debug_state32 *, dr6));
244	DECLARE("DS_DR7",	offsetof(struct x86_debug_state32 *, dr7));
245
246	DECLARE("DS64_DR0",	offsetof(struct x86_debug_state64 *, dr0));
247	DECLARE("DS64_DR1",	offsetof(struct x86_debug_state64 *, dr1));
248	DECLARE("DS64_DR2",	offsetof(struct x86_debug_state64 *, dr2));
249	DECLARE("DS64_DR3",	offsetof(struct x86_debug_state64 *, dr3));
250	DECLARE("DS64_DR4",	offsetof(struct x86_debug_state64 *, dr4));
251	DECLARE("DS64_DR5",	offsetof(struct x86_debug_state64 *, dr5));
252	DECLARE("DS64_DR6",	offsetof(struct x86_debug_state64 *, dr6));
253	DECLARE("DS64_DR7",	offsetof(struct x86_debug_state64 *, dr7));
254
255	DECLARE("FP_VALID",	offsetof(struct x86_fx_thread_state *,fp_valid));
256
257	DECLARE("SS_FLAVOR",	offsetof(x86_saved_state_t *, flavor));
258	DECLARE("SS_32",	x86_SAVED_STATE32);
259	DECLARE("SS_64",	x86_SAVED_STATE64);
260
261#define R_(x)  offsetof(x86_saved_state_t *, ss_32.x)
262	DECLARE("R32_CS",	R_(cs));
263	DECLARE("R32_SS",	R_(ss));
264	DECLARE("R32_DS",	R_(ds));
265	DECLARE("R32_ES",	R_(es));
266	DECLARE("R32_FS",	R_(fs));
267	DECLARE("R32_GS",	R_(gs));
268	DECLARE("R32_UESP",	R_(uesp));
269	DECLARE("R32_EBP",	R_(ebp));
270	DECLARE("R32_EAX",	R_(eax));
271	DECLARE("R32_EBX",	R_(ebx));
272	DECLARE("R32_ECX",	R_(ecx));
273	DECLARE("R32_EDX",	R_(edx));
274	DECLARE("R32_ESI",	R_(esi));
275	DECLARE("R32_EDI",	R_(edi));
276	DECLARE("R32_TRAPNO",	R_(trapno));
277	DECLARE("R32_ERR",	R_(err));
278	DECLARE("R32_EFLAGS",	R_(efl));
279	DECLARE("R32_EIP",	R_(eip));
280	DECLARE("R32_CR2",	R_(cr2));
281	DECLARE("ISS32_SIZE",	sizeof (x86_saved_state32_t));
282
283#define R64_(x)  offsetof(x86_saved_state_t *, ss_64.x)
284	DECLARE("R64_FS",	R64_(fs));
285	DECLARE("R64_GS",	R64_(gs));
286	DECLARE("R64_R8",	R64_(r8));
287	DECLARE("R64_R9",	R64_(r9));
288	DECLARE("R64_R10",	R64_(r10));
289	DECLARE("R64_R11",	R64_(r11));
290	DECLARE("R64_R12",	R64_(r12));
291	DECLARE("R64_R13",	R64_(r13));
292	DECLARE("R64_R14",	R64_(r14));
293	DECLARE("R64_R15",	R64_(r15));
294	DECLARE("R64_RBP",	R64_(rbp));
295	DECLARE("R64_RAX",	R64_(rax));
296	DECLARE("R64_RBX",	R64_(rbx));
297	DECLARE("R64_RCX",	R64_(rcx));
298	DECLARE("R64_RDX",	R64_(rdx));
299	DECLARE("R64_RSI",	R64_(rsi));
300	DECLARE("R64_RDI",	R64_(rdi));
301	DECLARE("R64_V_ARG6",	R64_(v_arg6));
302	DECLARE("R64_V_ARG7",	R64_(v_arg7));
303	DECLARE("R64_V_ARG8",	R64_(v_arg8));
304	DECLARE("R64_CS",	R64_(isf.cs));
305	DECLARE("R64_SS",	R64_(isf.ss));
306	DECLARE("R64_RSP",	R64_(isf.rsp));
307	DECLARE("R64_TRAPNO",	R64_(isf.trapno));
308	DECLARE("R64_TRAPFN",	R64_(isf.trapfn));
309	DECLARE("R64_ERR",	R64_(isf.err));
310	DECLARE("R64_RFLAGS",	R64_(isf.rflags));
311	DECLARE("R64_RIP",	R64_(isf.rip));
312	DECLARE("R64_CR2",	R64_(cr2));
313	DECLARE("ISS64_OFFSET",	R64_(isf));
314	DECLARE("ISS64_SIZE",	sizeof (x86_saved_state64_t));
315
316#define ISF64_(x)  offsetof(x86_64_intr_stack_frame_t *, x)
317	DECLARE("ISF64_TRAPNO",	ISF64_(trapno));
318	DECLARE("ISF64_TRAPFN",	ISF64_(trapfn));
319	DECLARE("ISF64_ERR",	ISF64_(err));
320	DECLARE("ISF64_RIP",	ISF64_(rip));
321	DECLARE("ISF64_CS",	ISF64_(cs));
322	DECLARE("ISF64_RFLAGS",	ISF64_(rflags));
323	DECLARE("ISF64_RSP",	ISF64_(rsp));
324	DECLARE("ISF64_SS",	ISF64_(ss));
325	DECLARE("ISF64_SIZE",	sizeof(x86_64_intr_stack_frame_t));
326
327	DECLARE("ISC32_OFFSET",	offsetof(x86_saved_state_compat32_t *, isf64));
328#define ISC32_(x)  offsetof(x86_saved_state_compat32_t *, isf64.x)
329	DECLARE("ISC32_TRAPNO", ISC32_(trapno));
330	DECLARE("ISC32_TRAPFN",	ISC32_(trapfn));
331	DECLARE("ISC32_ERR",	ISC32_(err));
332	DECLARE("ISC32_RIP",	ISC32_(rip));
333	DECLARE("ISC32_CS",	ISC32_(cs));
334	DECLARE("ISC32_RFLAGS",	ISC32_(rflags));
335	DECLARE("ISC32_RSP",	ISC32_(rsp));
336	DECLARE("ISC32_SS",	ISC32_(ss));
337
338	DECLARE("NBPG",			I386_PGBYTES);
339	DECLARE("PAGE_SIZE",            I386_PGBYTES);
340	DECLARE("PAGE_MASK",            I386_PGBYTES-1);
341	DECLARE("PAGE_SHIFT",           12);
342	DECLARE("NKPT",                 NKPT);
343#ifdef __i386__
344	DECLARE("KPTDI",                KPTDI);
345#endif
346	DECLARE("VM_MIN_ADDRESS",	VM_MIN_ADDRESS);
347	DECLARE("VM_MAX_ADDRESS",	VM_MAX_ADDRESS);
348	DECLARE("KERNELBASE",		VM_MIN_KERNEL_ADDRESS);
349	DECLARE("LINEAR_KERNELBASE",	LINEAR_KERNEL_ADDRESS);
350	DECLARE("KERNEL_STACK_SIZE",	KERNEL_STACK_SIZE);
351#ifdef __i386__
352	DECLARE("KERNEL_UBER_BASE_HI32", KERNEL_UBER_BASE_HI32);
353#endif
354
355	DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS",  _COMM_PAGE32_BASE_ADDRESS);
356	DECLARE("ASM_COMM_PAGE32_START_ADDRESS",  _COMM_PAGE32_START_ADDRESS);
357	DECLARE("ASM_COMM_PAGE_SCHED_GEN",  _COMM_PAGE_SCHED_GEN);
358
359	DECLARE("PDESHIFT",	PDESHIFT);
360	DECLARE("PTEMASK",	PTEMASK);
361	DECLARE("PTEINDX",      PTEINDX);
362	DECLARE("INTEL_PTE_PFN",	INTEL_PTE_PFN);
363	DECLARE("INTEL_PTE_VALID",	INTEL_PTE_VALID);
364	DECLARE("INTEL_PTE_WRITE",	INTEL_PTE_WRITE);
365	DECLARE("INTEL_PTE_PS",       INTEL_PTE_PS);
366	DECLARE("INTEL_PTE_USER",        INTEL_PTE_USER);
367	DECLARE("INTEL_PTE_INVALID",	INTEL_PTE_INVALID);
368	DECLARE("NPGPTD", NPGPTD);
369#if defined(__x86_64__)
370	DECLARE("KERNEL_PML4_INDEX",KERNEL_PML4_INDEX);
371#endif
372	DECLARE("IDTSZ",	IDTSZ);
373	DECLARE("GDTSZ",	GDTSZ);
374	DECLARE("LDTSZ",	LDTSZ);
375
376	DECLARE("KERNEL_DS",	KERNEL_DS);
377	DECLARE("USER_CS",	USER_CS);
378	DECLARE("USER_DS",	USER_DS);
379	DECLARE("KERNEL32_CS",	KERNEL32_CS);
380	DECLARE("KERNEL64_CS",  KERNEL64_CS);
381	DECLARE("USER64_CS",	USER64_CS);
382	DECLARE("KERNEL_TSS",	KERNEL_TSS);
383	DECLARE("KERNEL_LDT",	KERNEL_LDT);
384#ifdef __i386__
385	DECLARE("DF_TSS",	DF_TSS);
386	DECLARE("MC_TSS",	MC_TSS);
387	DECLARE("CPU_DATA_GS",	CPU_DATA_GS);
388#endif /* __i386__ */
389	DECLARE("SYSENTER_CS",	SYSENTER_CS);
390	DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS);
391	DECLARE("SYSENTER_DS",	SYSENTER_DS);
392	DECLARE("SYSCALL_CS",	SYSCALL_CS);
393#ifdef __i386__
394	DECLARE("USER_WINDOW_SEL",	USER_WINDOW_SEL);
395	DECLARE("PHYS_WINDOW_SEL",	PHYS_WINDOW_SEL);
396#endif
397
398        DECLARE("CPU_THIS",
399		offsetof(cpu_data_t *, cpu_this));
400        DECLARE("CPU_ACTIVE_THREAD",
401		offsetof(cpu_data_t *, cpu_active_thread));
402        DECLARE("CPU_ACTIVE_STACK",
403		offsetof(cpu_data_t *, cpu_active_stack));
404        DECLARE("CPU_KERNEL_STACK",
405		offsetof(cpu_data_t *, cpu_kernel_stack));
406        DECLARE("CPU_INT_STACK_TOP",
407		offsetof(cpu_data_t *, cpu_int_stack_top));
408#if	MACH_RT
409        DECLARE("CPU_PREEMPTION_LEVEL",
410		offsetof(cpu_data_t *, cpu_preemption_level));
411#endif	/* MACH_RT */
412        DECLARE("CPU_HIBERNATE",
413		offsetof(cpu_data_t *, cpu_hibernate));
414        DECLARE("CPU_INTERRUPT_LEVEL",
415		offsetof(cpu_data_t *, cpu_interrupt_level));
416	DECLARE("CPU_NESTED_ISTACK",
417	    offsetof(cpu_data_t *, cpu_nested_istack));
418        DECLARE("CPU_NUMBER_GS",
419		offsetof(cpu_data_t *,cpu_number));
420        DECLARE("CPU_RUNNING",
421		offsetof(cpu_data_t *,cpu_running));
422	DECLARE("CPU_PENDING_AST",
423		offsetof(cpu_data_t *,cpu_pending_ast));
424	DECLARE("CPU_DESC_TABLEP",
425		offsetof(cpu_data_t *,cpu_desc_tablep));
426	DECLARE("CPU_DESC_INDEX",
427		offsetof(cpu_data_t *,cpu_desc_index));
428	DECLARE("CDI_GDT",
429		offsetof(cpu_desc_index_t *,cdi_gdt));
430	DECLARE("CDI_IDT",
431		offsetof(cpu_desc_index_t *,cdi_idt));
432	DECLARE("CPU_PROCESSOR",
433		offsetof(cpu_data_t *,cpu_processor));
434        DECLARE("CPU_INT_STATE",
435		offsetof(cpu_data_t *, cpu_int_state));
436        DECLARE("CPU_INT_EVENT_TIME",
437		offsetof(cpu_data_t *, cpu_int_event_time));
438
439#ifdef __i386__
440        DECLARE("CPU_HI_ISS",
441		offsetof(cpu_data_t *, cpu_hi_iss));
442#endif
443        DECLARE("CPU_TASK_CR3",
444		offsetof(cpu_data_t *, cpu_task_cr3));
445        DECLARE("CPU_ACTIVE_CR3",
446		offsetof(cpu_data_t *, cpu_active_cr3));
447        DECLARE("CPU_KERNEL_CR3",
448		offsetof(cpu_data_t *, cpu_kernel_cr3));
449#ifdef __x86_64__
450		DECLARE("CPU_TLB_INVALID",
451		offsetof(cpu_data_t *, cpu_tlb_invalid));
452#endif
453
454	DECLARE("CPU_IS64BIT",
455		offsetof(cpu_data_t *, cpu_is64bit));
456	DECLARE("CPU_TASK_MAP",
457		offsetof(cpu_data_t *, cpu_task_map));
458	DECLARE("TASK_MAP_32BIT",		TASK_MAP_32BIT);
459	DECLARE("TASK_MAP_64BIT",		TASK_MAP_64BIT);
460#ifdef __i386__
461	DECLARE("TASK_MAP_64BIT_SHARED",	TASK_MAP_64BIT_SHARED);
462#endif
463	DECLARE("CPU_UBER_USER_GS_BASE",
464		offsetof(cpu_data_t *, cpu_uber.cu_user_gs_base));
465	DECLARE("CPU_UBER_ISF",
466		offsetof(cpu_data_t *, cpu_uber.cu_isf));
467	DECLARE("CPU_UBER_TMP",
468		offsetof(cpu_data_t *, cpu_uber.cu_tmp));
469	DECLARE("CPU_UBER_ARG_STORE",
470		offsetof(cpu_data_t *, cpu_uber_arg_store));
471	DECLARE("CPU_UBER_ARG_STORE_VALID",
472		offsetof(cpu_data_t *, cpu_uber_arg_store_valid));
473
474	DECLARE("CPU_NANOTIME",
475		offsetof(cpu_data_t *, cpu_nanotime));
476
477	DECLARE("CPU_DR7",
478		offsetof(cpu_data_t *, cpu_dr7));
479
480	DECLARE("hwIntCnt", 	offsetof(cpu_data_t *,cpu_hwIntCnt));
481#if	defined(__x86_64__)
482	DECLARE("CPU_ACTIVE_PCID",
483		offsetof(cpu_data_t *, cpu_active_pcid));
484	DECLARE("CPU_PCID_COHERENTP",
485		offsetof(cpu_data_t *, cpu_pmap_pcid_coherentp));
486	DECLARE("CPU_PCID_COHERENTP_KERNEL",
487		offsetof(cpu_data_t *, cpu_pmap_pcid_coherentp_kernel));
488	DECLARE("CPU_PMAP_PCID_ENABLED",
489	    offsetof(cpu_data_t *, cpu_pmap_pcid_enabled));
490
491#ifdef	PCID_STATS
492	DECLARE("CPU_PMAP_USER_RETS",
493	    offsetof(cpu_data_t *, cpu_pmap_user_rets));
494	DECLARE("CPU_PMAP_PCID_PRESERVES",
495	    offsetof(cpu_data_t *, cpu_pmap_pcid_preserves));
496	DECLARE("CPU_PMAP_PCID_FLUSHES",
497	    offsetof(cpu_data_t *, cpu_pmap_pcid_flushes));
498#endif
499	DECLARE("CPU_TLB_INVALID",
500		offsetof(cpu_data_t *, cpu_tlb_invalid));
501	DECLARE("CPU_TLB_INVALID_LOCAL",
502	    offsetof(cpu_data_t *, cpu_tlb_invalid_local));
503	DECLARE("CPU_TLB_INVALID_GLOBAL",
504		offsetof(cpu_data_t *, cpu_tlb_invalid_global));
505#endif /* x86_64 */
506	DECLARE("enaExpTrace",	enaExpTrace);
507	DECLARE("enaUsrFCall",	enaUsrFCall);
508	DECLARE("enaUsrPhyMp",	enaUsrPhyMp);
509	DECLARE("enaDiagSCs",	enaDiagSCs);
510	DECLARE("enaDiagEM",	enaDiagEM);
511	DECLARE("enaNotifyEM",	enaNotifyEM);
512	DECLARE("dgLock",		offsetof(struct diagWork *, dgLock));
513	DECLARE("dgFlags",		offsetof(struct diagWork *, dgFlags));
514	DECLARE("dgMisc1",		offsetof(struct diagWork *, dgMisc1));
515	DECLARE("dgMisc2",		offsetof(struct diagWork *, dgMisc2));
516	DECLARE("dgMisc3",		offsetof(struct diagWork *, dgMisc3));
517	DECLARE("dgMisc4",		offsetof(struct diagWork *, dgMisc4));
518	DECLARE("dgMisc5",		offsetof(struct diagWork *, dgMisc5));
519
520	DECLARE("INTEL_PTE_KERNEL",	INTEL_PTE_VALID|INTEL_PTE_WRITE);
521	DECLARE("PDESHIFT",     PDESHIFT);
522	DECLARE("PDESIZE",     PDESIZE);
523	DECLARE("PTESIZE",     PTESIZE);
524#ifdef __i386__
525	DECLARE("PTDPTDI",     PTDPTDI);
526	DECLARE("APTDPTDI",     APTDPTDI);
527	DECLARE("HIGH_MEM_BASE", HIGH_MEM_BASE);
528	DECLARE("HIGH_IDT_BASE", pmap_index_to_virt(HIGH_FIXED_IDT));
529#endif
530
531	DECLARE("KERNELBASEPDE",
532		(LINEAR_KERNEL_ADDRESS >> PDESHIFT) *
533		sizeof(pt_entry_t));
534
535	DECLARE("TSS_ESP0",	offsetof(struct i386_tss *, esp0));
536	DECLARE("TSS_SS0",	offsetof(struct i386_tss *, ss0));
537	DECLARE("TSS_LDT",	offsetof(struct i386_tss *, ldt));
538	DECLARE("TSS_PDBR",	offsetof(struct i386_tss *, cr3));
539	DECLARE("TSS_LINK",	offsetof(struct i386_tss *, back_link));
540
541	DECLARE("K_TASK_GATE",	ACC_P|ACC_PL_K|ACC_TASK_GATE);
542	DECLARE("K_TRAP_GATE",	ACC_P|ACC_PL_K|ACC_TRAP_GATE);
543	DECLARE("U_TRAP_GATE",	ACC_P|ACC_PL_U|ACC_TRAP_GATE);
544	DECLARE("K_INTR_GATE",	ACC_P|ACC_PL_K|ACC_INTR_GATE);
545	DECLARE("U_INTR_GATE",  ACC_P|ACC_PL_U|ACC_INTR_GATE);
546	DECLARE("K_TSS",	ACC_P|ACC_PL_K|ACC_TSS);
547
548	/*
549	 *	usimple_lock fields
550	 */
551	DECLARE("USL_INTERLOCK",	offsetof(usimple_lock_t, interlock));
552
553	DECLARE("INTSTACK_SIZE",	INTSTACK_SIZE);
554	DECLARE("KADDR", offsetof(struct boot_args *, kaddr));
555	DECLARE("KSIZE", offsetof(struct boot_args *, ksize));
556	DECLARE("MEMORYMAP", offsetof(struct boot_args *, MemoryMap));
557	DECLARE("DEVICETREEP", offsetof(struct boot_args *, deviceTreeP));
558
559	DECLARE("RNT_TSC_BASE",
560		offsetof(pal_rtc_nanotime_t *, tsc_base));
561	DECLARE("RNT_NS_BASE",
562		offsetof(pal_rtc_nanotime_t *, ns_base));
563	DECLARE("RNT_SCALE",
564		offsetof(pal_rtc_nanotime_t *, scale));
565	DECLARE("RNT_SHIFT",
566		offsetof(pal_rtc_nanotime_t *, shift));
567	DECLARE("RNT_GENERATION",
568		offsetof(pal_rtc_nanotime_t *, generation));
569
570	/* values from kern/timer.h */
571#ifdef __LP64__
572	DECLARE("TIMER_ALL", offsetof(struct timer *, all_bits));
573#else
574	DECLARE("TIMER_LOW",	 	offsetof(struct timer *, low_bits));
575	DECLARE("TIMER_HIGH",		offsetof(struct timer *, high_bits));
576	DECLARE("TIMER_HIGHCHK",	offsetof(struct timer *, high_bits_check));
577#endif
578	DECLARE("TIMER_TSTAMP",
579		offsetof(struct timer *, tstamp));
580
581	DECLARE("THREAD_TIMER",
582		offsetof(struct processor *, processor_data.thread_timer));
583	DECLARE("KERNEL_TIMER",
584		offsetof(struct processor *, processor_data.kernel_timer));
585	DECLARE("SYSTEM_TIMER",
586		offsetof(struct thread *, system_timer));
587	DECLARE("USER_TIMER",
588		offsetof(struct thread *, user_timer));
589	DECLARE("SYSTEM_STATE",
590			offsetof(struct processor *, processor_data.system_state));
591	DECLARE("USER_STATE",
592			offsetof(struct processor *, processor_data.user_state));
593	DECLARE("IDLE_STATE",
594			offsetof(struct processor *, processor_data.idle_state));
595	DECLARE("CURRENT_STATE",
596			offsetof(struct processor *, processor_data.current_state));
597
598	DECLARE("OnProc", OnProc);
599
600
601#if	CONFIG_DTRACE
602	DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
603	DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
604	DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
605	DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
606	DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
607	DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
608	DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
609	DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
610	DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
611	DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
612	DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
613	DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
614	DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
615	DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
616#endif
617
618	return (0);
619}
620