OsdSynch.c revision 130695
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 130695 2004-06-18 17:58:11Z njl $
28 */
29
30/*
31 * 6.1 : Mutual Exclusion and Synchronisation
32 */
33
34#include "acpi.h"
35
36#include "opt_acpi.h"
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/sysctl.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42
43#define _COMPONENT	ACPI_OS_SERVICES
44ACPI_MODULE_NAME("SYNCH")
45
46MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
47
48#define AS_LOCK(as)	mtx_lock(&(as)->as_mtx)
49#define AS_UNLOCK(as)	mtx_unlock(&(as)->as_mtx)
50
51/*
52 * Simple counting semaphore implemented using a mutex.  (Subsequently used
53 * in the OSI code to implement a mutex.  Go figure.)
54 */
55struct acpi_semaphore {
56    struct mtx	as_mtx;
57    UINT32	as_units;
58    UINT32	as_maxunits;
59    UINT32	as_pendings;
60    UINT32	as_resetting;
61    UINT32	as_timeouts;
62};
63
64#ifndef ACPI_NO_SEMAPHORES
65#ifndef ACPI_SEMAPHORES_MAX_PENDING
66#define ACPI_SEMAPHORES_MAX_PENDING	4
67#endif
68static int	acpi_semaphore_debug = 0;
69TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
70SYSCTL_DECL(_debug_acpi);
71SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
72	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
73#endif /* !ACPI_NO_SEMAPHORES */
74
75ACPI_STATUS
76AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
77    ACPI_HANDLE *OutHandle)
78{
79#ifndef ACPI_NO_SEMAPHORES
80    struct acpi_semaphore	*as;
81
82    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
83
84    if (OutHandle == NULL)
85	return_ACPI_STATUS (AE_BAD_PARAMETER);
86    if (InitialUnits > MaxUnits)
87	return_ACPI_STATUS (AE_BAD_PARAMETER);
88
89    if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
90	return_ACPI_STATUS (AE_NO_MEMORY);
91
92    mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
93    as->as_units = InitialUnits;
94    as->as_maxunits = MaxUnits;
95    as->as_pendings = as->as_resetting = as->as_timeouts = 0;
96
97    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
98	"created semaphore %p max %d, initial %d\n",
99	as, InitialUnits, MaxUnits));
100
101    *OutHandle = (ACPI_HANDLE)as;
102#else
103    *OutHandle = (ACPI_HANDLE)OutHandle;
104#endif /* !ACPI_NO_SEMAPHORES */
105
106    return_ACPI_STATUS (AE_OK);
107}
108
109ACPI_STATUS
110AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
111{
112#ifndef ACPI_NO_SEMAPHORES
113    struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
114
115    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
116
117    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
118    mtx_destroy(&as->as_mtx);
119    free(Handle, M_ACPISEM);
120#endif /* !ACPI_NO_SEMAPHORES */
121
122    return_ACPI_STATUS (AE_OK);
123}
124
125/*
126 * This implementation has a bug, in that it has to stall for the entire
127 * timeout before it will return AE_TIME.  A better implementation would
128 * use getmicrotime() to correctly adjust the timeout after being woken up.
129 */
130ACPI_STATUS
131AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
132{
133#ifndef ACPI_NO_SEMAPHORES
134    ACPI_STATUS			result;
135    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
136    int				rv, tmo;
137    struct timeval		timeouttv, currenttv, timelefttv;
138
139    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
140
141    if (as == NULL)
142	return_ACPI_STATUS (AE_BAD_PARAMETER);
143
144    if (cold)
145	return_ACPI_STATUS (AE_OK);
146
147#if 0
148    if (as->as_units < Units && as->as_timeouts > 10) {
149	printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
150	AS_LOCK(as);
151	as->as_units = as->as_maxunits;
152	if (as->as_pendings)
153	    as->as_resetting = 1;
154	as->as_timeouts = 0;
155	wakeup(as);
156	AS_UNLOCK(as);
157	return_ACPI_STATUS (AE_TIME);
158    }
159
160    if (as->as_resetting)
161	return_ACPI_STATUS (AE_TIME);
162#endif
163
164    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
165    if (Timeout == ACPI_WAIT_FOREVER) {
166	tmo = 0;
167	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
168	timeouttv.tv_usec = 0;
169    } else {
170	/* compute timeout using microseconds per tick */
171	tmo = (Timeout * 1000) / (1000000 / hz);
172	if (tmo <= 0)
173	    tmo = 1;
174	timeouttv.tv_sec  = Timeout / 1000;
175	timeouttv.tv_usec = (Timeout % 1000) * 1000;
176    }
177
178    /* calculate timeout value in timeval */
179    getmicrotime(&currenttv);
180    timevaladd(&timeouttv, &currenttv);
181
182    AS_LOCK(as);
183    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
184	"get %d units from semaphore %p (has %d), timeout %d\n",
185	Units, as, as->as_units, Timeout));
186    for (;;) {
187	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
188	    result = AE_OK;
189	    break;
190	}
191	if (as->as_units >= Units) {
192	    as->as_units -= Units;
193	    result = AE_OK;
194	    break;
195	}
196
197	/* limit number of pending treads */
198	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
199	    result = AE_TIME;
200	    break;
201	}
202
203	/* if timeout values of zero is specified, return immediately */
204	if (Timeout == 0) {
205	    result = AE_TIME;
206	    break;
207	}
208
209	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
210	    "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
211	    as, &as->as_mtx, PCATCH, tmo));
212
213	as->as_pendings++;
214
215	if (acpi_semaphore_debug) {
216	    printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
217		__func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
218	}
219
220	rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
221
222	as->as_pendings--;
223
224#if 0
225	if (as->as_resetting) {
226	    /* semaphore reset, return immediately */
227	    if (as->as_pendings == 0) {
228		as->as_resetting = 0;
229	    }
230	    result = AE_TIME;
231	    break;
232	}
233#endif
234
235	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
236	if (rv == EWOULDBLOCK) {
237	    result = AE_TIME;
238	    break;
239	}
240
241	/* check if we already awaited enough */
242	timelefttv = timeouttv;
243	getmicrotime(&currenttv);
244	timevalsub(&timelefttv, &currenttv);
245	if (timelefttv.tv_sec < 0) {
246	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
247		as));
248	    result = AE_TIME;
249	    break;
250	}
251
252	/* adjust timeout for the next sleep */
253	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
254	    (1000000 / hz);
255	if (tmo <= 0)
256	    tmo = 1;
257
258	if (acpi_semaphore_debug) {
259	    printf("%s: Wakeup timeleft(%lu, %lu), tmo %u, sem %p, thread %d\n",
260		__func__, timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
261		AcpiOsGetThreadId());
262	}
263    }
264
265    if (acpi_semaphore_debug) {
266	if (result == AE_TIME && Timeout > 0) {
267	    printf("%s: Timeout %d, pending %d, semaphore %p\n",
268		__func__, Timeout, as->as_pendings, as);
269	}
270	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
271	    printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
272		__func__, Units, as->as_units, as->as_pendings, as,
273		AcpiOsGetThreadId());
274	}
275    }
276
277    if (result == AE_TIME)
278	as->as_timeouts++;
279    else
280	as->as_timeouts = 0;
281
282    AS_UNLOCK(as);
283    return_ACPI_STATUS (result);
284#else
285    return_ACPI_STATUS (AE_OK);
286#endif /* !ACPI_NO_SEMAPHORES */
287}
288
289ACPI_STATUS
290AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
291{
292#ifndef ACPI_NO_SEMAPHORES
293    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
294
295    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
296
297    if (as == NULL)
298	return_ACPI_STATUS(AE_BAD_PARAMETER);
299
300    AS_LOCK(as);
301    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
302	"return %d units to semaphore %p (has %d)\n",
303	Units, as, as->as_units));
304    if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
305	as->as_units += Units;
306	if (as->as_units > as->as_maxunits)
307	    as->as_units = as->as_maxunits;
308    }
309
310    if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
311	printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
312	    __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
313    }
314
315    wakeup(as);
316    AS_UNLOCK(as);
317#endif /* !ACPI_NO_SEMAPHORES */
318
319    return_ACPI_STATUS (AE_OK);
320}
321
322ACPI_STATUS
323AcpiOsCreateLock (ACPI_HANDLE *OutHandle)
324{
325    struct mtx *m;
326
327    if (OutHandle == NULL)
328	return (AE_BAD_PARAMETER);
329    m = malloc(sizeof(*m), M_ACPISEM, M_NOWAIT | M_ZERO);
330    if (m == NULL)
331	return (AE_NO_MEMORY);
332
333    mtx_init(m, "acpica subsystem lock", NULL, MTX_DEF);
334    *OutHandle = (ACPI_HANDLE)m;
335    return (AE_OK);
336}
337
338void
339AcpiOsDeleteLock (ACPI_HANDLE Handle)
340{
341    struct mtx *m = (struct mtx *)Handle;
342
343    if (Handle == NULL)
344        return;
345    mtx_destroy(m);
346}
347
348/*
349 * The Flags parameter seems to state whether or not caller is an ISR
350 * (and thus can't block) but since we have ithreads, we don't worry
351 * about potentially blocking.
352 */
353void
354AcpiOsAcquireLock (ACPI_HANDLE Handle, UINT32 Flags)
355{
356    struct mtx *m = (struct mtx *)Handle;
357
358    if (Handle == NULL)
359	return;
360    mtx_lock(m);
361}
362
363void
364AcpiOsReleaseLock (ACPI_HANDLE Handle, UINT32 Flags)
365{
366    struct mtx *m = (struct mtx *)Handle;
367
368    if (Handle == NULL)
369	return;
370    mtx_unlock(m);
371}
372
373/* Section 5.2.9.1:  global lock acquire/release functions */
374#define GL_ACQUIRED	(-1)
375#define GL_BUSY		0
376#define GL_BIT_PENDING	0x1
377#define GL_BIT_OWNED	0x2
378#define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
379
380/*
381 * Acquire the global lock.  If busy, set the pending bit.  The caller
382 * will wait for notification from the BIOS that the lock is available
383 * and then attempt to acquire it again.
384 */
385int
386acpi_acquire_global_lock(uint32_t *lock)
387{
388	uint32_t new, old;
389
390	do {
391		old = *lock;
392		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
393			((old >> 1) & GL_BIT_PENDING);
394	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
395
396	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
397}
398
399/*
400 * Release the global lock, returning whether there is a waiter pending.
401 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
402 * releases the lock.
403 */
404int
405acpi_release_global_lock(uint32_t *lock)
406{
407	uint32_t new, old;
408
409	do {
410		old = *lock;
411		new = old & ~GL_BIT_MASK;
412	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
413
414	return (old & GL_BIT_PENDING);
415}
416