1/*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 *
30 */
31
32#ifndef __IOKIT_IOLOCKS_H
33#define __IOKIT_IOLOCKS_H
34
35#ifndef KERNEL
36#error IOLocks.h is for kernel use only
37#endif
38
39#include <sys/appleapiopts.h>
40
41#include <IOKit/system.h>
42
43#include <IOKit/IOReturn.h>
44#include <IOKit/IOTypes.h>
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
50#include <libkern/locks.h>
51#include <machine/machine_routines.h>
52
53extern lck_grp_t	*IOLockGroup;
54
55/*
56 * Mutex lock operations
57 */
58
59#ifdef	XNU_KERNEL_PRIVATE
60typedef lck_mtx_t	IOLock;
61#else
62typedef struct _IOLock	IOLock;
63#endif	/* XNU_KERNEL_PRIVATE */
64
65
66/*! @function IOLockAlloc
67    @abstract Allocates and initializes a mutex.
68    @discussion Allocates a mutex in general purpose memory, and initilizes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
69    @result Pointer to the allocated lock, or zero on failure. */
70
71IOLock * IOLockAlloc( void );
72
73/*! @function IOLockFree
74    @abstract Frees a mutex.
75    @discussion Frees a lock allocated with IOLockAlloc. Any blocked waiters will not be woken.
76    @param lock Pointer to the allocated lock. */
77
78void	IOLockFree( IOLock * lock);
79
80/*! @function IOLockGetMachLock
81    @abstract Accessor to a Mach mutex.
82    @discussion Accessor to the Mach mutex.
83    @param lock Pointer to the allocated lock. */
84
85lck_mtx_t * IOLockGetMachLock( IOLock * lock);
86
87/*! @function IOLockLock
88    @abstract Lock a mutex.
89    @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock.
90    @param lock Pointer to the allocated lock. */
91
92#ifdef	XNU_KERNEL_PRIVATE
93#ifndef	IOLOCKS_CPP
94static __inline__
95void	IOLockLock( IOLock * lock)
96{
97    lck_mtx_lock(lock);
98}
99#else
100void	IOLockLock( IOLock * lock);
101#endif	/* !IOLOCKS_CPP */
102#else
103void	IOLockLock( IOLock * lock);
104#endif	/* XNU_KERNEL_PRIVATE */
105
106/*! @function IOLockTryLock
107    @abstract Attempt to lock a mutex.
108    @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false.
109    @param lock Pointer to the allocated lock.
110    @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */
111
112#ifdef	XNU_KERNEL_PRIVATE
113#ifndef	IOLOCKS_CPP
114static __inline__
115boolean_t IOLockTryLock( IOLock * lock)
116{
117    return(lck_mtx_try_lock(lock));
118}
119#else
120boolean_t IOLockTryLock( IOLock * lock);
121#endif	/* !IOLOCKS_CPP */
122#else
123boolean_t IOLockTryLock( IOLock * lock);
124#endif	/* XNU_KERNEL_PRIVATE */
125
126/*! @function IOLockUnlock
127    @abstract Unlock a mutex.
128@discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
129    @param lock Pointer to the allocated lock. */
130
131#ifdef	XNU_KERNEL_PRIVATE
132#ifndef	IOLOCKS_CPP
133static __inline__
134void	IOLockUnlock( IOLock * lock)
135{
136    lck_mtx_unlock(lock);
137}
138#else
139void	IOLockUnlock( IOLock * lock);
140#endif	/* !IOLOCKS_CPP */
141#else
142void	IOLockUnlock( IOLock * lock);
143#endif	/* XNU_KERNEL_PRIVATE */
144
145/*! @function IOLockSleep
146    @abstract Sleep with mutex unlock and relock
147@discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup.Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
148    @param lock Pointer to the locked lock.
149    @param event The event to sleep on.
150    @param interType How can the sleep be interrupted.
151	@result The wait-result value indicating how the thread was awakened.*/
152int	IOLockSleep( IOLock * lock, void *event, UInt32 interType);
153
154int	IOLockSleepDeadline( IOLock * lock, void *event,
155				AbsoluteTime deadline, UInt32 interType);
156
157void	IOLockWakeup(IOLock * lock, void *event, bool oneThread);
158
159#ifdef __APPLE_API_OBSOLETE
160
161/* The following API is deprecated */
162
163typedef enum {
164    kIOLockStateUnlocked	= 0,
165    kIOLockStateLocked		= 1
166} IOLockState;
167
168void	IOLockInitWithState( IOLock * lock, IOLockState state);
169#define	IOLockInit( l )	IOLockInitWithState( l, kIOLockStateUnlocked);
170
171static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock); 	     }
172static __inline__ boolean_t IOTryLock(  IOLock * lock) { return(IOLockTryLock(lock)); }
173static __inline__ void IOUnlock(   IOLock * lock) { IOLockUnlock(lock);	     }
174
175#endif /* __APPLE_API_OBSOLETE */
176
177/*
178 * Recursive lock operations
179 */
180
181typedef struct _IORecursiveLock IORecursiveLock;
182
183/*! @function IORecursiveLockAlloc
184    @abstract Allocates and initializes an recursive lock.
185    @discussion Allocates a recursive lock in general purpose memory, and initializes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks.
186    @result Pointer to the allocated lock, or zero on failure. */
187
188IORecursiveLock * IORecursiveLockAlloc( void );
189
190/*! @function IORecursiveLockFree
191    @abstract Frees a recursive lock.
192    @discussion Frees a lock allocated with IORecursiveLockAlloc. Any blocked waiters will not be woken.
193    @param lock Pointer to the allocated lock. */
194
195void		IORecursiveLockFree( IORecursiveLock * lock);
196
197/*! @function IORecursiveLockGetMachLock
198    @abstract Accessor to a Mach mutex.
199    @discussion Accessor to the Mach mutex.
200    @param lock Pointer to the allocated lock. */
201
202lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock);
203
204/*! @function IORecursiveLockLock
205    @abstract Lock a recursive lock.
206    @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock.
207    @param lock Pointer to the allocated lock. */
208
209void		IORecursiveLockLock( IORecursiveLock * lock);
210
211/*! @function IORecursiveLockTryLock
212    @abstract Attempt to lock a recursive lock.
213    @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock.
214    @param lock Pointer to the allocated lock.
215    @result True if the lock is now locked by the caller, otherwise false. */
216
217boolean_t	IORecursiveLockTryLock( IORecursiveLock * lock);
218
219/*! @function IORecursiveLockUnlock
220    @abstract Unlock a recursive lock.
221@discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held.
222    @param lock Pointer to the allocated lock. */
223
224void		IORecursiveLockUnlock( IORecursiveLock * lock);
225
226/*! @function IORecursiveLockHaveLock
227    @abstract Check if a recursive lock is held by the calling thread.
228    @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned.
229    @param lock Pointer to the allocated lock.
230    @result True if the calling thread holds the lock otherwise false. */
231
232boolean_t	IORecursiveLockHaveLock( const IORecursiveLock * lock);
233
234extern int	IORecursiveLockSleep( IORecursiveLock *_lock,
235                                      void *event, UInt32 interType);
236extern void	IORecursiveLockWakeup( IORecursiveLock *_lock,
237                                       void *event, bool oneThread);
238
239/*
240 * Complex (read/write) lock operations
241 */
242
243#ifdef	XNU_KERNEL_PRIVATE
244typedef lck_rw_t		IORWLock;
245#else
246typedef struct _IORWLock	IORWLock;
247#endif	/* XNU_KERNEL_PRIVATE */
248
249/*! @function IORWLockAlloc
250    @abstract Allocates and initializes a read/write lock.
251@discussion Allocates and initializes a read/write lock in general purpose memory, and initilizes it. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
252    @result Pointer to the allocated lock, or zero on failure. */
253
254IORWLock * IORWLockAlloc( void );
255
256/*! @function IORWLockFree
257   @abstract Frees a read/write lock.
258   @discussion Frees a lock allocated with IORWLockAlloc. Any blocked waiters will not be woken.
259    @param lock Pointer to the allocated lock. */
260
261void	IORWLockFree( IORWLock * lock);
262
263/*! @function IORWLockGetMachLock
264    @abstract Accessor to a Mach read/write lock.
265    @discussion Accessor to the Mach read/write lock.
266    @param lock Pointer to the allocated lock. */
267
268lck_rw_t * IORWLockGetMachLock( IORWLock * lock);
269
270/*! @function IORWLockRead
271    @abstract Lock a read/write lock for read.
272@discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
273    @param lock Pointer to the allocated lock. */
274
275#ifdef	XNU_KERNEL_PRIVATE
276#ifndef	IOLOCKS_CPP
277static __inline__
278void	IORWLockRead( IORWLock * lock)
279{
280    lck_rw_lock_shared( lock);
281}
282#else
283void	IORWLockRead( IORWLock * lock);
284#endif	/* !IOLOCKS_CPP */
285#else
286void	IORWLockRead( IORWLock * lock);
287#endif	/* XNU_KERNEL_PRIVATE */
288
289/*! @function IORWLockWrite
290    @abstract Lock a read/write lock for write.
291    @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
292    @param lock Pointer to the allocated lock. */
293
294#ifdef	XNU_KERNEL_PRIVATE
295#ifndef	IOLOCKS_CPP
296static __inline__
297void	IORWLockWrite( IORWLock * lock)
298{
299    lck_rw_lock_exclusive( lock);
300}
301#else
302void	IORWLockWrite( IORWLock * lock);
303#endif	/* !IOLOCKS_CPP */
304#else
305void	IORWLockWrite( IORWLock * lock);
306#endif	/* XNU_KERNEL_PRIVATE */
307
308/*! @function IORWLockUnlock
309    @abstract Unlock a read/write lock.
310    @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held.
311    @param lock Pointer to the allocated lock. */
312
313#ifdef	XNU_KERNEL_PRIVATE
314#ifndef	IOLOCKS_CPP
315static __inline__
316void	IORWLockUnlock( IORWLock * lock)
317{
318    lck_rw_done( lock);
319}
320#else
321void	IORWLockUnlock( IORWLock * lock);
322#endif	/* !IOLOCKS_CPP */
323#else
324void	IORWLockUnlock( IORWLock * lock);
325#endif	/* XNU_KERNEL_PRIVATE */
326
327#ifdef __APPLE_API_OBSOLETE
328
329/* The following API is deprecated */
330
331static __inline__ void IOReadLock( IORWLock * lock)   { IORWLockRead(lock);   }
332static __inline__ void IOWriteLock(  IORWLock * lock) { IORWLockWrite(lock);  }
333static __inline__ void IORWUnlock(   IORWLock * lock) { IORWLockUnlock(lock); }
334
335#endif /* __APPLE_API_OBSOLETE */
336
337
338/*
339 * Simple locks. Cannot block while holding a simple lock.
340 */
341
342#ifdef	KERNEL_PRIVATE
343typedef lck_spin_t		IOSimpleLock;
344#else
345typedef struct _IOSimpleLock	IOSimpleLock;
346#endif	/* XNU_KERNEL_PRIVATE */
347
348/*! @function IOSimpleLockAlloc
349    @abstract Allocates and initializes a spin lock.
350    @discussion Allocates an initializes a spin lock in general purpose memory, and initilizes it. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
351    @result Pointer to the allocated lock, or zero on failure. */
352
353IOSimpleLock * IOSimpleLockAlloc( void );
354
355/*! @function IOSimpleLockFree
356    @abstract Frees a spin lock.
357    @discussion Frees a lock allocated with IOSimpleLockAlloc.
358    @param lock Pointer to the lock. */
359
360void IOSimpleLockFree( IOSimpleLock * lock );
361
362/*! @function IOSimpleLockGetMachLock
363    @abstract Accessor to a Mach spin lock.
364    @discussion Accessor to the Mach spin lock.
365    @param lock Pointer to the allocated lock. */
366
367lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock);
368
369/*! @function IOSimpleLockInit
370    @abstract Initialize a spin lock.
371    @discussion Initialize an embedded spin lock, to the unlocked state.
372    @param lock Pointer to the lock. */
373
374void IOSimpleLockInit( IOSimpleLock * lock );
375
376/*! @function IOSimpleLockLock
377    @abstract Lock a spin lock.
378@discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
379    @param lock Pointer to the lock. */
380
381#ifdef	XNU_KERNEL_PRIVATE
382#ifndef	IOLOCKS_CPP
383static __inline__
384void IOSimpleLockLock( IOSimpleLock * lock )
385{
386    lck_spin_lock( lock );
387}
388#else
389void IOSimpleLockLock( IOSimpleLock * lock );
390#endif	/* !IOLOCKS_CPP */
391#else
392void IOSimpleLockLock( IOSimpleLock * lock );
393#endif	/* XNU_KERNEL_PRIVATE */
394
395/*! @function IOSimpleLockTryLock
396    @abstract Attempt to lock a spin lock.
397@discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock.
398    @param lock Pointer to the lock.
399    @result True if the lock was unlocked and is now locked by the caller, otherwise false. */
400
401#ifdef	XNU_KERNEL_PRIVATE
402#ifndef	IOLOCKS_CPP
403static __inline__
404boolean_t IOSimpleLockTryLock( IOSimpleLock * lock )
405{
406    return( lck_spin_try_lock( lock ) );
407}
408#else
409boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
410#endif	/* !IOLOCKS_CPP */
411#else
412boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
413#endif	/* XNU_KERNEL_PRIVATE */
414
415/*! @function IOSimpleLockUnlock
416    @abstract Unlock a spin lock.
417    @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock.
418    @param lock Pointer to the lock. */
419
420#ifdef	XNU_KERNEL_PRIVATE
421#ifndef	IOLOCKS_CPP
422static __inline__
423void IOSimpleLockUnlock( IOSimpleLock * lock )
424{
425    lck_spin_unlock( lock );
426}
427#else
428void IOSimpleLockUnlock( IOSimpleLock * lock );
429#endif	/* !IOLOCKS_CPP */
430#else
431void IOSimpleLockUnlock( IOSimpleLock * lock );
432#endif	/* XNU_KERNEL_PRIVATE */
433
434typedef long int IOInterruptState;
435
436/*! @function IOSimpleLockLockDisableInterrupt
437    @abstract Lock a spin lock.
438    @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
439    @param lock Pointer to the lock. */
440
441static __inline__
442IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock )
443{
444    IOInterruptState	state = ml_set_interrupts_enabled( false );
445    IOSimpleLockLock( lock );
446    return( state );
447}
448
449/*! @function IOSimpleLockUnlockEnableInterrupt
450    @abstract Unlock a spin lock, and restore interrupt state.
451    @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock.
452    @param lock Pointer to the lock.
453    @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */
454
455static __inline__
456void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock,
457					IOInterruptState state )
458{
459    IOSimpleLockUnlock( lock );
460    ml_set_interrupts_enabled( state );
461}
462
463#ifdef __cplusplus
464} /* extern "C" */
465#endif
466
467#endif /* !__IOKIT_IOLOCKS_H */
468
469