OsdSynch.c revision 128979
19823Sprr/*-
29823Sprr * Copyright (c) 2000 Michael Smith
39823Sprr * Copyright (c) 2000 BSDi
49823Sprr * All rights reserved.
59823Sprr *
69823Sprr * Redistribution and use in source and binary forms, with or without
79823Sprr * modification, are permitted provided that the following conditions
89823Sprr * are met:
99823Sprr * 1. Redistributions of source code must retain the above copyright
109823Sprr *    notice, this list of conditions and the following disclaimer.
119823Sprr * 2. Redistributions in binary form must reproduce the above copyright
129823Sprr *    notice, this list of conditions and the following disclaimer in the
139823Sprr *    documentation and/or other materials provided with the distribution.
149823Sprr *
159823Sprr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
169823Sprr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
179823Sprr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
189823Sprr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
199823Sprr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
209823Sprr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
219823Sprr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
229823Sprr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
239823Sprr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
249823Sprr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
259823Sprr * SUCH DAMAGE.
269823Sprr *
279823Sprr *	$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 128979 2004-05-05 20:04:14Z njl $
289823Sprr */
299823Sprr
309823Sprr/*
319823Sprr * 6.1 : Mutual Exclusion and Synchronisation
329823Sprr */
339823Sprr
349823Sprr#include "acpi.h"
359823Sprr
369823Sprr#include "opt_acpi.h"
379823Sprr#include <sys/kernel.h>
389823Sprr#include <sys/malloc.h>
399823Sprr#include <sys/sysctl.h>
409823Sprr#if __FreeBSD_version >= 500000
419823Sprr#include <sys/lock.h>
429823Sprr#include <sys/mutex.h>
439823Sprr#endif
449823Sprr
459823Sprr#define _COMPONENT	ACPI_OS_SERVICES
469823SprrACPI_MODULE_NAME("SYNCH")
479823Sprr
489823SprrMALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
499823Sprr
509823Sprr#if __FreeBSD_version < 500000
519823Sprr# define AS_LOCK(as)		s = splhigh()
529823Sprr# define AS_UNLOCK(as)		splx(s)
539823Sprr# define AS_LOCK_DECL		int s
549823Sprr# define msleep(a, b, c, d, e)	tsleep(a, c, d, e)
559823Sprr#else
569823Sprr# define AS_LOCK(as)		mtx_lock(&(as)->as_mtx)
579823Sprr# define AS_UNLOCK(as)		mtx_unlock(&(as)->as_mtx)
589823Sprr# define AS_LOCK_DECL
599823Sprr#endif
609823Sprr
619823Sprr/*
629823Sprr * Simple counting semaphore implemented using a mutex.  (Subsequently used
639823Sprr * in the OSI code to implement a mutex.  Go figure.)
649823Sprr */
659823Sprrstruct acpi_semaphore {
669823Sprr#if __FreeBSD_version >= 500000
679823Sprr    struct mtx	as_mtx;
689823Sprr#endif
699823Sprr    UINT32	as_units;
709823Sprr    UINT32	as_maxunits;
719823Sprr    UINT32	as_pendings;
729823Sprr    UINT32	as_resetting;
739823Sprr    UINT32	as_timeouts;
749823Sprr};
759823Sprr
769823Sprr#ifndef ACPI_NO_SEMAPHORES
779823Sprr#ifndef ACPI_SEMAPHORES_MAX_PENDING
789823Sprr#define ACPI_SEMAPHORES_MAX_PENDING	4
799823Sprr#endif
809823Sprrstatic int	acpi_semaphore_debug = 0;
819823SprrTUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
829823SprrSYSCTL_DECL(_debug_acpi);
839823SprrSYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
849823Sprr	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
859823Sprr#endif /* !ACPI_NO_SEMAPHORES */
869823Sprr
879823SprrACPI_STATUS
889823SprrAcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
899823Sprr    ACPI_HANDLE *OutHandle)
909823Sprr{
919823Sprr#ifndef ACPI_NO_SEMAPHORES
929823Sprr    struct acpi_semaphore	*as;
939823Sprr
949823Sprr    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
959823Sprr
969823Sprr    if (OutHandle == NULL)
979823Sprr	return_ACPI_STATUS (AE_BAD_PARAMETER);
989823Sprr    if (InitialUnits > MaxUnits)
999823Sprr	return_ACPI_STATUS (AE_BAD_PARAMETER);
1009823Sprr
1019823Sprr    if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
1029823Sprr	return_ACPI_STATUS (AE_NO_MEMORY);
1039823Sprr
1049823Sprr#if __FreeBSD_version >= 500000
1059823Sprr    mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
1069823Sprr#endif
1079823Sprr    as->as_units = InitialUnits;
1089823Sprr    as->as_maxunits = MaxUnits;
1099823Sprr    as->as_pendings = as->as_resetting = as->as_timeouts = 0;
1109823Sprr
1119823Sprr    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1129823Sprr	"created semaphore %p max %d, initial %d\n",
1139823Sprr	as, InitialUnits, MaxUnits));
1149823Sprr
1159823Sprr    *OutHandle = (ACPI_HANDLE)as;
1169823Sprr#else
1179823Sprr    *OutHandle = (ACPI_HANDLE)OutHandle;
1189823Sprr#endif /* !ACPI_NO_SEMAPHORES */
1199823Sprr
1209823Sprr    return_ACPI_STATUS (AE_OK);
1219823Sprr}
1229823Sprr
1239823SprrACPI_STATUS
1249823SprrAcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
1259823Sprr{
1269823Sprr#ifndef ACPI_NO_SEMAPHORES
1279823Sprr    struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
1289823Sprr
1299823Sprr    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1309823Sprr
1319823Sprr    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
1329823Sprr#if __FreeBSD_version >= 500000
1339823Sprr    mtx_destroy(&as->as_mtx);
1349823Sprr#endif
1359823Sprr    free(Handle, M_ACPISEM);
1369823Sprr#endif /* !ACPI_NO_SEMAPHORES */
1379823Sprr
1389823Sprr    return_ACPI_STATUS (AE_OK);
1399823Sprr}
1409823Sprr
1419823Sprr/*
1429823Sprr * This implementation has a bug, in that it has to stall for the entire
1439823Sprr * timeout before it will return AE_TIME.  A better implementation would
1449823Sprr * use getmicrotime() to correctly adjust the timeout after being woken up.
1459823Sprr */
1469823SprrACPI_STATUS
1479823SprrAcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
1489823Sprr{
1499823Sprr#ifndef ACPI_NO_SEMAPHORES
1509823Sprr    ACPI_STATUS			result;
1519823Sprr    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
1529823Sprr    int				rv, tmo;
1539823Sprr    struct timeval		timeouttv, currenttv, timelefttv;
1549823Sprr    AS_LOCK_DECL;
1559823Sprr
1569823Sprr    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1579823Sprr
1589823Sprr    if (as == NULL)
1599823Sprr	return_ACPI_STATUS (AE_BAD_PARAMETER);
1609823Sprr
1619823Sprr    if (cold)
1629823Sprr	return_ACPI_STATUS (AE_OK);
1639823Sprr
1649823Sprr#if 0
1659823Sprr    if (as->as_units < Units && as->as_timeouts > 10) {
1669823Sprr	printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
1679823Sprr	AS_LOCK(as);
1689823Sprr	as->as_units = as->as_maxunits;
1699823Sprr	if (as->as_pendings)
1709823Sprr	    as->as_resetting = 1;
1719823Sprr	as->as_timeouts = 0;
1729823Sprr	wakeup(as);
1739823Sprr	AS_UNLOCK(as);
1749823Sprr	return_ACPI_STATUS (AE_TIME);
1759823Sprr    }
1769823Sprr
1779823Sprr    if (as->as_resetting)
1789823Sprr	return_ACPI_STATUS (AE_TIME);
1799823Sprr#endif
1809823Sprr
1819823Sprr    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
1829823Sprr    if (Timeout == ACPI_WAIT_FOREVER) {
1839823Sprr	tmo = 0;
1849823Sprr	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
1859823Sprr	timeouttv.tv_usec = 0;
1869823Sprr    } else {
1879823Sprr	/* compute timeout using microseconds per tick */
1889823Sprr	tmo = (Timeout * 1000) / (1000000 / hz);
1899823Sprr	if (tmo <= 0)
1909823Sprr	    tmo = 1;
1919823Sprr	timeouttv.tv_sec  = Timeout / 1000;
1929823Sprr	timeouttv.tv_usec = (Timeout % 1000) * 1000;
1939823Sprr    }
1949823Sprr
1959823Sprr    /* calculate timeout value in timeval */
1969823Sprr    getmicrotime(&currenttv);
1979823Sprr    timevaladd(&timeouttv, &currenttv);
1989823Sprr
1999823Sprr    AS_LOCK(as);
2009823Sprr    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
2019823Sprr	"get %d units from semaphore %p (has %d), timeout %d\n",
2029823Sprr	Units, as, as->as_units, Timeout));
2039823Sprr    for (;;) {
2049823Sprr	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
2059823Sprr	    result = AE_OK;
2069823Sprr	    break;
2079823Sprr	}
2089823Sprr	if (as->as_units >= Units) {
2099823Sprr	    as->as_units -= Units;
2109823Sprr	    result = AE_OK;
2119823Sprr	    break;
2129823Sprr	}
2139823Sprr
2149823Sprr	/* limit number of pending treads */
2159823Sprr	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
2169823Sprr	    result = AE_TIME;
2179823Sprr	    break;
2189823Sprr	}
2199823Sprr
2209823Sprr	/* if timeout values of zero is specified, return immediately */
2219823Sprr	if (Timeout == 0) {
2229823Sprr	    result = AE_TIME;
2239823Sprr	    break;
2249823Sprr	}
2259823Sprr
2269823Sprr#if __FreeBSD_version >= 500000
2279823Sprr	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
2289823Sprr	    "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
2299823Sprr	    as, &as->as_mtx, PCATCH, tmo));
2309823Sprr#endif
2319823Sprr
2329823Sprr	as->as_pendings++;
2339823Sprr
2349823Sprr	if (acpi_semaphore_debug) {
2359823Sprr	    printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
2369823Sprr		__func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
2379823Sprr	}
2389823Sprr
2399823Sprr	rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
2409823Sprr
2419823Sprr	as->as_pendings--;
2429823Sprr
2439823Sprr#if 0
2449823Sprr	if (as->as_resetting) {
2459823Sprr	    /* semaphore reset, return immediately */
246	    if (as->as_pendings == 0) {
247		as->as_resetting = 0;
248	    }
249	    result = AE_TIME;
250	    break;
251	}
252#endif
253
254	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
255	if (rv == EWOULDBLOCK) {
256	    result = AE_TIME;
257	    break;
258	}
259
260	/* check if we already awaited enough */
261	timelefttv = timeouttv;
262	getmicrotime(&currenttv);
263	timevalsub(&timelefttv, &currenttv);
264	if (timelefttv.tv_sec < 0) {
265	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
266		as));
267	    result = AE_TIME;
268	    break;
269	}
270
271	/* adjust timeout for the next sleep */
272	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
273	    (1000000 / hz);
274	if (tmo <= 0)
275	    tmo = 1;
276
277	if (acpi_semaphore_debug) {
278	    printf("%s: Wakeup timeleft(%lu, %lu), tmo %u, sem %p, thread %d\n",
279		__func__, timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
280		AcpiOsGetThreadId());
281	}
282    }
283
284    if (acpi_semaphore_debug) {
285	if (result == AE_TIME && Timeout > 0) {
286	    printf("%s: Timeout %d, pending %d, semaphore %p\n",
287		__func__, Timeout, as->as_pendings, as);
288	}
289	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
290	    printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
291		__func__, Units, as->as_units, as->as_pendings, as,
292		AcpiOsGetThreadId());
293	}
294    }
295
296    if (result == AE_TIME)
297	as->as_timeouts++;
298    else
299	as->as_timeouts = 0;
300
301    AS_UNLOCK(as);
302    return_ACPI_STATUS (result);
303#else
304    return_ACPI_STATUS (AE_OK);
305#endif /* !ACPI_NO_SEMAPHORES */
306}
307
308ACPI_STATUS
309AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
310{
311#ifndef ACPI_NO_SEMAPHORES
312    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
313    AS_LOCK_DECL;
314
315    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
316
317    if (as == NULL)
318	return_ACPI_STATUS(AE_BAD_PARAMETER);
319
320    AS_LOCK(as);
321    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
322	"return %d units to semaphore %p (has %d)\n",
323	Units, as, as->as_units));
324    if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
325	as->as_units += Units;
326	if (as->as_units > as->as_maxunits)
327	    as->as_units = as->as_maxunits;
328    }
329
330    if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
331	printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
332	    __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
333    }
334
335    wakeup(as);
336    AS_UNLOCK(as);
337#endif /* !ACPI_NO_SEMAPHORES */
338
339    return_ACPI_STATUS (AE_OK);
340}
341
342ACPI_STATUS
343AcpiOsCreateLock (ACPI_HANDLE *OutHandle)
344{
345    struct mtx *m;
346
347    if (OutHandle == NULL)
348	return (AE_BAD_PARAMETER);
349    m = malloc(sizeof(*m), M_ACPISEM, M_NOWAIT | M_ZERO);
350    if (m == NULL)
351	return (AE_NO_MEMORY);
352
353    mtx_init(m, "acpica subsystem lock", NULL, MTX_DEF);
354    *OutHandle = (ACPI_HANDLE)m;
355    return (AE_OK);
356}
357
358void
359AcpiOsDeleteLock (ACPI_HANDLE Handle)
360{
361    struct mtx *m = (struct mtx *)Handle;
362
363    if (Handle == NULL)
364        return;
365    mtx_destroy(m);
366}
367
368/*
369 * The Flags parameter seems to state whether or not caller is an ISR
370 * (and thus can't block) but since we have ithreads, we don't worry
371 * about potentially blocking.
372 */
373void
374AcpiOsAcquireLock (ACPI_HANDLE Handle, UINT32 Flags)
375{
376    struct mtx *m = (struct mtx *)Handle;
377
378    if (Handle == NULL)
379	return;
380    mtx_lock(m);
381}
382
383void
384AcpiOsReleaseLock (ACPI_HANDLE Handle, UINT32 Flags)
385{
386    struct mtx *m = (struct mtx *)Handle;
387
388    if (Handle == NULL)
389	return;
390    mtx_unlock(m);
391}
392
393/* Section 5.2.9.1:  global lock acquire/release functions */
394#define GL_ACQUIRED	(-1)
395#define GL_BUSY		0
396#define GL_BIT_PENDING	0x1
397#define GL_BIT_OWNED	0x2
398#define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
399
400/*
401 * Acquire the global lock.  If busy, set the pending bit.  The caller
402 * will wait for notification from the BIOS that the lock is available
403 * and then attempt to acquire it again.
404 */
405int
406acpi_acquire_global_lock(uint32_t *lock)
407{
408	uint32_t new, old;
409
410	do {
411		old = *lock;
412		new = (((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
413			((old >> 1) & GL_BIT_PENDING));
414	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
415
416	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
417}
418
419/*
420 * Release the global lock, returning whether there is a waiter pending.
421 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
422 * releases the lock.
423 */
424int
425acpi_release_global_lock(uint32_t *lock)
426{
427	uint32_t new, old;
428
429	do {
430		old = *lock;
431		new = old & ~GL_BIT_MASK;
432	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
433
434	return (old & GL_BIT_PENDING);
435}
436