1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#ifndef	_KERN_KERN_TYPES_H_
33#define	_KERN_KERN_TYPES_H_
34
35#include <stdint.h>
36#include <mach/mach_types.h>
37#include <mach/machine/vm_types.h>
38
39#ifdef	KERNEL_PRIVATE
40
41#ifndef	MACH_KERNEL_PRIVATE
42
43struct zone ;
44
45#ifndef __LP64__
46struct wait_queue { unsigned int opaque[2]; uintptr_t opaquep[2]; } ;
47#else
48struct wait_queue { unsigned char opaque[32]; };
49#endif
50
51#endif	/* MACH_KERNEL_PRIVATE */
52
53typedef struct zone			*zone_t;
54#define		ZONE_NULL			((zone_t) 0)
55
56typedef struct wait_queue		*wait_queue_t;
57#define		WAIT_QUEUE_NULL 	((wait_queue_t) 0)
58#define 		SIZEOF_WAITQUEUE	sizeof(struct wait_queue)
59
60typedef vm_offset_t			ipc_kobject_t;
61#define		IKO_NULL			((ipc_kobject_t) 0)
62
63#endif	/* KERNEL_PRIVATE */
64
65typedef	void *event_t;		/* wait event */
66#define		NO_EVENT			((event_t) 0)
67
68typedef uint64_t event64_t;		/* 64 bit wait event */
69#define		NO_EVENT64		((event64_t) 0)
70#define		CAST_EVENT64_T(a_ptr)	((event64_t)((uintptr_t)(a_ptr)))
71
72/*
73 *	Possible wait_result_t values.
74 */
75typedef int wait_result_t;
76#define THREAD_WAITING		-1		/* thread is waiting */
77#define THREAD_AWAKENED		0		/* normal wakeup */
78#define THREAD_TIMED_OUT	1		/* timeout expired */
79#define THREAD_INTERRUPTED	2		/* aborted/interrupted */
80#define THREAD_RESTART		3		/* restart operation entirely */
81#define THREAD_NOT_WAITING      10              /* thread didn't need to wait */
82
83typedef	void (*thread_continue_t)(void *, wait_result_t);
84#define	THREAD_CONTINUE_NULL	((thread_continue_t) 0)
85
86/*
87 * Interruptible flag for waits.
88 *
89 * THREAD_UNINT: Uninterruptible wait
90 *   Wait will only end when someone explicitly wakes up the thread, or if the
91 *   wait timeout expires.
92 *
93 *   Use this state if the system as a whole cannot recover from a thread being
94 *   interrupted out of the wait.
95 *
96 * THREAD_INTERRUPTIBLE:
97 *    Wait will end if someone explicitly wakes up the thread, the wait timeout
98 *    expires,  or the current thread is being terminated.
99 *
100 *    This value can be used when your operation may not be cleanly restartable
101 *    for the current process or thread (i.e. the loss of state would be only visible
102 *    to the current client).  Since the thread is exiting anyways, you're willing
103 *    to cut the operation short.  The system as a whole must be able to cleanly
104 *    deal with the interruption (i.e. remain in a consistent and recoverable state).
105 *
106 * THREAD_ABORTSAFE:
107 *    Wait will end if someone explicitly wakes up the thread, the wait timeout
108 *    expires, the current thread is being terminated, if any signal arrives for
109 *    the task, or thread_abort_safely() is called on the thread.
110 *
111 *    Using this value means that you are willing to be interrupted in the face
112 *    of any user signal, and safely rewind the thread back to the user/kernel
113 *    boundary.  Many syscalls will try to restart the operation they were performing
114 *    after the signal has been handled.
115 *
116 *    You must provide this value for any unbounded wait - otherwise you will
117 *    pend user signals forever.
118 *
119 * Thread interrupt mask:
120 *
121 *   The current maximum interruptible state for the thread, as set by
122 *   thread_interrupt_level(), will limit the conditions that will cause a wake.
123 *   This is useful for code that can't be interrupted to set before calling code
124 *   that doesn't know that.
125 *
126 * Thread termination vs safe abort:
127 *
128 *    Termination abort: thread_abort(), thread_terminate()
129 *
130 *    A termination abort is sticky.  Once a thread is marked for termination, every
131 *    THREAD_INTERRUPTIBLE wait will return immediately with THREAD_INTERRUPTED
132 *    until the thread successfully exits.
133 *
134 *    Safe abort: thread_abort_safely()
135 *
136 *    A safe abort is not sticky.  The current wait, (or the next wait if the thread
137 *    is not currently waiting) will be interrupted, but then the abort condition is cleared.
138 *    The next wait will sleep as normal. Safe aborts only have a single effect.
139 *
140 *    The path back to the user/kernel boundary must not make any further unbounded
141 *    wait calls. The waiter should detect the THREAD_INTERRUPTED return code
142 *    from an ABORTSAFE wait and return an error code that causes its caller
143 *    to understand that the current operation has been interrupted, and its
144 *    caller should return a similar error code, and so on until the
145 *    user/kernel boundary is reached.  For Mach, the error code is usually KERN_ABORTED,
146 *    for BSD it is EINTR.
147 *
148 *    Debuggers rely on the safe abort mechanism - a signaled thread must return to
149 *    the AST at the user/kernel boundary for the debugger to finish attaching.
150 *
151 *    No wait/block will ever disappear a thread out from under the waiter. The block
152 *    call will always either return or call the passed in continuation.
153 */
154typedef int wait_interrupt_t;
155#define THREAD_UNINT			0		/* not interruptible      */
156#define THREAD_INTERRUPTIBLE	1		/* may not be restartable */
157#define THREAD_ABORTSAFE		2		/* abortable safely       */
158
159typedef int wait_timeout_urgency_t;
160#define TIMEOUT_URGENCY_SYS_NORMAL	0x00		/* use default leeway thresholds for system */
161#define TIMEOUT_URGENCY_SYS_CRITICAL	0x01		/* use critical leeway thresholds for system */
162#define TIMEOUT_URGENCY_SYS_BACKGROUND	0x02		/* use background leeway thresholds for system */
163
164#define TIMEOUT_URGENCY_USER_MASK	0x10		/* mask to identify user timeout urgency classes */
165#define TIMEOUT_URGENCY_USER_NORMAL	0x10		/* use default leeway thresholds for user */
166#define TIMEOUT_URGENCY_USER_CRITICAL	0x11		/* use critical leeway thresholds for user */
167#define TIMEOUT_URGENCY_USER_BACKGROUND	0x12		/* use background leeway thresholds for user */
168
169#define TIMEOUT_URGENCY_MASK		0x13		/* mask to identify timeout urgency */
170
171#define TIMEOUT_URGENCY_LEEWAY		0x20		/* don't ignore provided leeway value */
172
173#define TIMEOUT_URGENCY_FIRST_AVAIL	0x40		/* first available bit outside of urgency mask/leeway */
174#define	TIMEOUT_URGENCY_RATELIMITED	0x80
175#ifdef	KERNEL_PRIVATE
176
177#ifdef	MACH_KERNEL_PRIVATE
178
179#include <kern/misc_protos.h>
180typedef  struct clock			*clock_t;
181
182typedef struct mig_object		*mig_object_t;
183#define MIG_OBJECT_NULL			((mig_object_t) 0)
184
185typedef struct mig_notify		*mig_notify_t;
186#define MIG_NOTIFY_NULL 		((mig_notify_t) 0)
187
188typedef struct pset_node		*pset_node_t;
189#define PSET_NODE_NULL			((pset_node_t) 0)
190
191typedef struct affinity_set		*affinity_set_t;
192#define AFFINITY_SET_NULL		((affinity_set_t) 0)
193
194typedef struct run_queue               *run_queue_t;
195#define RUN_QUEUE_NULL                 ((run_queue_t) 0)
196
197typedef struct grrr_run_queue               *grrr_run_queue_t;
198#define GRRR_RUN_QUEUE_NULL                 ((grrr_run_queue_t) 0)
199
200typedef struct grrr_group					*grrr_group_t;
201#define GRRR_GROUP_NULL						((grrr_group_t) 0)
202
203#if defined(CONFIG_SCHED_MULTIQ)
204typedef struct sched_group              *sched_group_t;
205#define SCHED_GROUP_NULL                ((sched_group_t) 0)
206#endif /* defined(CONFIG_SCHED_MULTIQ) */
207
208#else	/* MACH_KERNEL_PRIVATE */
209
210struct wait_queue_set ;
211struct _wait_queue_link ;
212
213#endif	/* MACH_KERNEL_PRIVATE */
214
215typedef struct wait_queue_set	*wait_queue_set_t;
216#define WAIT_QUEUE_SET_NULL 	((wait_queue_set_t)0)
217#define SIZEOF_WAITQUEUE_SET	wait_queue_set_size()
218
219typedef struct _wait_queue_link	*wait_queue_link_t;
220#define WAIT_QUEUE_LINK_NULL	((wait_queue_link_t)0)
221#define SIZEOF_WAITQUEUE_LINK	wait_queue_link_size()
222
223/* legacy definitions - going away */
224struct wait_queue_sub ;
225typedef struct wait_queue_sub	*wait_queue_sub_t;
226#define WAIT_QUEUE_SUB_NULL 	((wait_queue_sub_t)0)
227#define SIZEOF_WAITQUEUE_SUB	wait_queue_set_size()
228
229#endif	/* KERNEL_PRIVATE */
230
231#endif	/* _KERN_KERN_TYPES_H_ */
232