1/*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <IOKit/system.h>
30
31#include <IOKit/IOReturn.h>
32#include <IOKit/IOLib.h>
33#include <IOKit/assert.h>
34
35#include <IOKit/IOLocksPrivate.h>
36
37extern "C" {
38#include <kern/locks.h>
39
40#if defined(__x86_64__)
41/* Synthetic event if none is specified, for backwards compatibility only. */
42static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
43#endif
44
45void	IOLockInitWithState( IOLock * lock, IOLockState state)
46{
47    if( state == kIOLockStateLocked)
48        lck_mtx_lock( lock);
49}
50
51IOLock * IOLockAlloc( void )
52{
53    return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
54}
55
56void	IOLockFree( IOLock * lock)
57{
58    lck_mtx_free( lock, IOLockGroup);
59}
60
61lck_mtx_t * IOLockGetMachLock( IOLock * lock)
62{
63    return( (lck_mtx_t *)lock);
64}
65
66int	IOLockSleep( IOLock * lock, void *event, UInt32 interType)
67{
68    return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
69}
70
71int	IOLockSleepDeadline( IOLock * lock, void *event,
72                                AbsoluteTime deadline, UInt32 interType)
73{
74    return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
75    					(wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
76}
77
78void	IOLockWakeup(IOLock * lock, void *event, bool oneThread)
79{
80	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
81}
82
83#if defined(__x86_64__)
84/*
85 * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
86 * which supports a NULL event,
87 */
88int	IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
89int	IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
90					   AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
91void	IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
92
93int	IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
94{
95    if (event == NULL)
96        event = (void *)&IOLockSleep_NO_EVENT;
97
98    return IOLockSleep(lock, event, interType);
99}
100
101int	IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
102			     AbsoluteTime deadline, UInt32 interType)
103{
104    if (event == NULL)
105        event = (void *)&IOLockSleep_NO_EVENT;
106
107    return IOLockSleepDeadline(lock, event, deadline, interType);
108}
109
110void	IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
111{
112    if (event == NULL)
113        event = (void *)&IOLockSleep_NO_EVENT;
114
115    IOLockWakeup(lock, event, oneThread);
116}
117#endif /* defined(__x86_64__) */
118
119
120struct _IORecursiveLock {
121	lck_mtx_t	*mutex;
122	lck_grp_t	*group;
123	thread_t	thread;
124	UInt32		count;
125};
126
127IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
128{
129    _IORecursiveLock * lock;
130
131    if( lockGroup == 0 )
132        return( 0 );
133
134    lock = IONew( _IORecursiveLock, 1 );
135    if( !lock )
136        return( 0 );
137
138    lock->mutex = lck_mtx_alloc_init( lockGroup, LCK_ATTR_NULL );
139    if( lock->mutex ) {
140		lock->group = lockGroup;
141        lock->thread = 0;
142        lock->count  = 0;
143    } else {
144        IODelete( lock, _IORecursiveLock, 1 );
145        lock = 0;
146    }
147
148    return( (IORecursiveLock *) lock );
149}
150
151
152IORecursiveLock * IORecursiveLockAlloc( void )
153{
154    return IORecursiveLockAllocWithLockGroup( IOLockGroup );
155}
156
157void IORecursiveLockFree( IORecursiveLock * _lock )
158{
159    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
160
161    lck_mtx_free( lock->mutex, lock->group );
162    IODelete( lock, _IORecursiveLock, 1 );
163}
164
165lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
166{
167    return( lock->mutex );
168}
169
170void IORecursiveLockLock( IORecursiveLock * _lock)
171{
172    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
173
174    if( lock->thread == IOThreadSelf())
175        lock->count++;
176    else {
177        lck_mtx_lock( lock->mutex );
178        assert( lock->thread == 0 );
179        assert( lock->count == 0 );
180        lock->thread = IOThreadSelf();
181        lock->count = 1;
182    }
183}
184
185boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
186{
187    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
188
189    if( lock->thread == IOThreadSelf()) {
190        lock->count++;
191	return( true );
192    } else {
193        if( lck_mtx_try_lock( lock->mutex )) {
194            assert( lock->thread == 0 );
195            assert( lock->count == 0 );
196            lock->thread = IOThreadSelf();
197            lock->count = 1;
198            return( true );
199	}
200    }
201    return( false );
202}
203
204void IORecursiveLockUnlock( IORecursiveLock * _lock)
205{
206    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
207
208    assert( lock->thread == IOThreadSelf() );
209
210    if( 0 == (--lock->count)) {
211        lock->thread = 0;
212        lck_mtx_unlock( lock->mutex );
213    }
214}
215
216boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
217{
218    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
219
220    return( lock->thread == IOThreadSelf());
221}
222
223int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
224{
225    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
226    UInt32 count = lock->count;
227    int res;
228
229    assert(lock->thread == IOThreadSelf());
230
231    lock->count = 0;
232    lock->thread = 0;
233    res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
234
235    // Must re-establish the recursive lock no matter why we woke up
236    // otherwise we would potentially leave the return path corrupted.
237    assert(lock->thread == 0);
238    assert(lock->count == 0);
239    lock->thread = IOThreadSelf();
240    lock->count = count;
241    return res;
242}
243
244int	IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
245                                  AbsoluteTime deadline, UInt32 interType)
246{
247    _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
248    UInt32 count = lock->count;
249    int res;
250
251    assert(lock->thread == IOThreadSelf());
252
253    lock->count = 0;
254    lock->thread = 0;
255    res = lck_mtx_sleep_deadline(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
256								      (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
257
258    // Must re-establish the recursive lock no matter why we woke up
259    // otherwise we would potentially leave the return path corrupted.
260    assert(lock->thread == 0);
261    assert(lock->count == 0);
262    lock->thread = IOThreadSelf();
263    lock->count = count;
264    return res;
265}
266
267void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
268{
269    thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
270}
271
272/*
273 * Complex (read/write) lock operations
274 */
275
276IORWLock * IORWLockAlloc( void )
277{
278    return(  lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL)  );
279}
280
281void	IORWLockFree( IORWLock * lock)
282{
283    lck_rw_free( lock, IOLockGroup);
284}
285
286lck_rw_t * IORWLockGetMachLock( IORWLock * lock)
287{
288    return( (lck_rw_t *)lock);
289}
290
291
292/*
293 * Spin locks
294 */
295
296IOSimpleLock * IOSimpleLockAlloc( void )
297{
298    return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) );
299}
300
301void IOSimpleLockInit( IOSimpleLock * lock)
302{
303    lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
304}
305
306void IOSimpleLockFree( IOSimpleLock * lock )
307{
308    lck_spin_free( lock, IOLockGroup);
309}
310
311lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock)
312{
313    return( (lck_spin_t *)lock);
314}
315
316} /* extern "C" */
317
318
319