OsdSynch.c revision 167915
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * 6.1 : Mutual Exclusion and Synchronisation
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 167915 2007-03-26 21:56:35Z jkim $");
34
35#include <contrib/dev/acpica/acpi.h>
36
37#include "opt_acpi.h"
38#include <sys/kernel.h>
39#include <sys/malloc.h>
40#include <sys/sysctl.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43
44#define _COMPONENT	ACPI_OS_SERVICES
45ACPI_MODULE_NAME("SYNCH")
46
47MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
48
49#define AS_LOCK(as)	mtx_lock(&(as)->as_mtx)
50#define AS_UNLOCK(as)	mtx_unlock(&(as)->as_mtx)
51
52/*
53 * Simple counting semaphore implemented using a mutex.  (Subsequently used
54 * in the OSI code to implement a mutex.  Go figure.)
55 */
56struct acpi_semaphore {
57    struct mtx	as_mtx;
58    UINT32	as_units;
59    UINT32	as_maxunits;
60    UINT32	as_pendings;
61    UINT32	as_resetting;
62    UINT32	as_timeouts;
63};
64
65/* Default number of maximum pending threads. */
66#ifndef ACPI_NO_SEMAPHORES
67#ifndef ACPI_SEMAPHORES_MAX_PENDING
68#define ACPI_SEMAPHORES_MAX_PENDING	4
69#endif
70
71static int	acpi_semaphore_debug = 0;
72TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
73SYSCTL_DECL(_debug_acpi);
74SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
75	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
76#endif /* !ACPI_NO_SEMAPHORES */
77
78ACPI_STATUS
79AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
80    ACPI_SEMAPHORE *OutHandle)
81{
82#ifndef ACPI_NO_SEMAPHORES
83    struct acpi_semaphore	*as;
84
85    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
86
87    if (OutHandle == NULL)
88	return_ACPI_STATUS (AE_BAD_PARAMETER);
89    if (InitialUnits > MaxUnits)
90	return_ACPI_STATUS (AE_BAD_PARAMETER);
91
92    if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
93	return_ACPI_STATUS (AE_NO_MEMORY);
94
95    mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
96    as->as_units = InitialUnits;
97    as->as_maxunits = MaxUnits;
98    as->as_pendings = as->as_resetting = as->as_timeouts = 0;
99
100    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
101	"created semaphore %p max %d, initial %d\n",
102	as, InitialUnits, MaxUnits));
103
104    *OutHandle = (ACPI_HANDLE)as;
105#else
106    *OutHandle = (ACPI_HANDLE)OutHandle;
107#endif /* !ACPI_NO_SEMAPHORES */
108
109    return_ACPI_STATUS (AE_OK);
110}
111
112ACPI_STATUS
113AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
114{
115#ifndef ACPI_NO_SEMAPHORES
116    struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
117
118    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
119
120    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
121    mtx_destroy(&as->as_mtx);
122    free(Handle, M_ACPISEM);
123#endif /* !ACPI_NO_SEMAPHORES */
124
125    return_ACPI_STATUS (AE_OK);
126}
127
128/*
129 * This implementation has a bug, in that it has to stall for the entire
130 * timeout before it will return AE_TIME.  A better implementation would
131 * use getmicrotime() to correctly adjust the timeout after being woken up.
132 */
133ACPI_STATUS
134AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
135{
136#ifndef ACPI_NO_SEMAPHORES
137    ACPI_STATUS			result;
138    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
139    int				rv, tmo;
140    struct timeval		timeouttv, currenttv, timelefttv;
141
142    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
143
144    if (as == NULL)
145	return_ACPI_STATUS (AE_BAD_PARAMETER);
146
147    if (cold)
148	return_ACPI_STATUS (AE_OK);
149
150#if 0
151    if (as->as_units < Units && as->as_timeouts > 10) {
152	printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
153	AS_LOCK(as);
154	as->as_units = as->as_maxunits;
155	if (as->as_pendings)
156	    as->as_resetting = 1;
157	as->as_timeouts = 0;
158	wakeup(as);
159	AS_UNLOCK(as);
160	return_ACPI_STATUS (AE_TIME);
161    }
162
163    if (as->as_resetting)
164	return_ACPI_STATUS (AE_TIME);
165#endif
166
167    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
168    if (Timeout == ACPI_WAIT_FOREVER) {
169	tmo = 0;
170	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
171	timeouttv.tv_usec = 0;
172    } else {
173	/* compute timeout using microseconds per tick */
174	tmo = (Timeout * 1000) / (1000000 / hz);
175	if (tmo <= 0)
176	    tmo = 1;
177	timeouttv.tv_sec  = Timeout / 1000;
178	timeouttv.tv_usec = (Timeout % 1000) * 1000;
179    }
180
181    /* calculate timeout value in timeval */
182    getmicrotime(&currenttv);
183    timevaladd(&timeouttv, &currenttv);
184
185    AS_LOCK(as);
186    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
187	"get %d units from semaphore %p (has %d), timeout %d\n",
188	Units, as, as->as_units, Timeout));
189    for (;;) {
190	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
191	    result = AE_OK;
192	    break;
193	}
194	if (as->as_units >= Units) {
195	    as->as_units -= Units;
196	    result = AE_OK;
197	    break;
198	}
199
200	/* limit number of pending threads */
201	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
202	    result = AE_TIME;
203	    break;
204	}
205
206	/* if timeout values of zero is specified, return immediately */
207	if (Timeout == 0) {
208	    result = AE_TIME;
209	    break;
210	}
211
212	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
213	    "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
214	    as, &as->as_mtx, PCATCH, tmo));
215
216	as->as_pendings++;
217
218	if (acpi_semaphore_debug) {
219	    printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
220		__func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
221	}
222
223	rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
224
225	as->as_pendings--;
226
227#if 0
228	if (as->as_resetting) {
229	    /* semaphore reset, return immediately */
230	    if (as->as_pendings == 0) {
231		as->as_resetting = 0;
232	    }
233	    result = AE_TIME;
234	    break;
235	}
236#endif
237
238	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
239	if (rv == EWOULDBLOCK) {
240	    result = AE_TIME;
241	    break;
242	}
243
244	/* check if we already awaited enough */
245	timelefttv = timeouttv;
246	getmicrotime(&currenttv);
247	timevalsub(&timelefttv, &currenttv);
248	if (timelefttv.tv_sec < 0) {
249	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
250		as));
251	    result = AE_TIME;
252	    break;
253	}
254
255	/* adjust timeout for the next sleep */
256	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
257	    (1000000 / hz);
258	if (tmo <= 0)
259	    tmo = 1;
260
261	if (acpi_semaphore_debug) {
262	    printf("%s: Wakeup timeleft(%jd, %lu), tmo %u, sem %p, thread %d\n",
263		__func__, (intmax_t)timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
264		AcpiOsGetThreadId());
265	}
266    }
267
268    if (acpi_semaphore_debug) {
269	if (result == AE_TIME && Timeout > 0) {
270	    printf("%s: Timeout %d, pending %d, semaphore %p\n",
271		__func__, Timeout, as->as_pendings, as);
272	}
273	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
274	    printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
275		__func__, Units, as->as_units, as->as_pendings, as,
276		AcpiOsGetThreadId());
277	}
278    }
279
280    if (result == AE_TIME)
281	as->as_timeouts++;
282    else
283	as->as_timeouts = 0;
284
285    AS_UNLOCK(as);
286    return_ACPI_STATUS (result);
287#else
288    return_ACPI_STATUS (AE_OK);
289#endif /* !ACPI_NO_SEMAPHORES */
290}
291
292ACPI_STATUS
293AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
294{
295#ifndef ACPI_NO_SEMAPHORES
296    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
297
298    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
299
300    if (as == NULL)
301	return_ACPI_STATUS(AE_BAD_PARAMETER);
302
303    AS_LOCK(as);
304    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
305	"return %d units to semaphore %p (has %d)\n",
306	Units, as, as->as_units));
307    if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
308	as->as_units += Units;
309	if (as->as_units > as->as_maxunits)
310	    as->as_units = as->as_maxunits;
311    }
312
313    if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
314	printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
315	    __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
316    }
317
318    wakeup(as);
319    AS_UNLOCK(as);
320#endif /* !ACPI_NO_SEMAPHORES */
321
322    return_ACPI_STATUS (AE_OK);
323}
324
325/* Combined mutex + mutex name storage since the latter must persist. */
326struct acpi_spinlock {
327    struct mtx	lock;
328    char	name[32];
329};
330
331ACPI_STATUS
332AcpiOsCreateLock (ACPI_SPINLOCK *OutHandle)
333{
334    struct acpi_spinlock *h;
335
336    if (OutHandle == NULL)
337	return (AE_BAD_PARAMETER);
338    h = malloc(sizeof(struct acpi_spinlock), M_ACPISEM, M_NOWAIT | M_ZERO);
339    if (h == NULL)
340	return (AE_NO_MEMORY);
341
342    /* Build a unique name based on the address of the handle. */
343    if (OutHandle == &AcpiGbl_GpeLock)
344	snprintf(h->name, sizeof(h->name), "acpi subsystem GPE lock");
345    if (OutHandle == &AcpiGbl_HardwareLock)
346	snprintf(h->name, sizeof(h->name), "acpi subsystem HW lock");
347    else
348	snprintf(h->name, sizeof(h->name), "acpi subsys %p", OutHandle);
349    mtx_init(&h->lock, h->name, NULL, MTX_DEF);
350    *OutHandle = (ACPI_SPINLOCK)h;
351    return (AE_OK);
352}
353
354void
355AcpiOsDeleteLock (ACPI_SPINLOCK Handle)
356{
357    struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
358
359    if (Handle == NULL)
360        return;
361    mtx_destroy(&h->lock);
362    free(h, M_ACPISEM);
363}
364
365/*
366 * The Flags parameter seems to state whether or not caller is an ISR
367 * (and thus can't block) but since we have ithreads, we don't worry
368 * about potentially blocking.
369 */
370ACPI_NATIVE_UINT
371AcpiOsAcquireLock (ACPI_SPINLOCK Handle)
372{
373    struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
374
375    if (Handle == NULL)
376	return (0);
377    mtx_lock(&h->lock);
378    return (0);
379}
380
381void
382AcpiOsReleaseLock (ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
383{
384    struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
385
386    if (Handle == NULL)
387	return;
388    mtx_unlock(&h->lock);
389}
390
391/* Section 5.2.9.1:  global lock acquire/release functions */
392#define GL_ACQUIRED	(-1)
393#define GL_BUSY		0
394#define GL_BIT_PENDING	0x1
395#define GL_BIT_OWNED	0x2
396#define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
397
398/*
399 * Acquire the global lock.  If busy, set the pending bit.  The caller
400 * will wait for notification from the BIOS that the lock is available
401 * and then attempt to acquire it again.
402 */
403int
404acpi_acquire_global_lock(uint32_t *lock)
405{
406	uint32_t new, old;
407
408	do {
409		old = *lock;
410		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
411			((old >> 1) & GL_BIT_PENDING);
412	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
413
414	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
415}
416
417/*
418 * Release the global lock, returning whether there is a waiter pending.
419 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
420 * releases the lock.
421 */
422int
423acpi_release_global_lock(uint32_t *lock)
424{
425	uint32_t new, old;
426
427	do {
428		old = *lock;
429		new = old & ~GL_BIT_MASK;
430	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
431
432	return (old & GL_BIT_PENDING);
433}
434