OsdSynch.c revision 193750
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/*
30 * 6.1 : Mutual Exclusion and Synchronisation
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/acpica/Osd/OsdSynch.c 193750 2009-06-08 20:07:16Z jkim $");
35
36#include <contrib/dev/acpica/include/acpi.h>
37#include <contrib/dev/acpica/include/accommon.h>
38
39#include <sys/condvar.h>
40#include <sys/kernel.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/mutex.h>
44
45#define	_COMPONENT	ACPI_OS_SERVICES
46ACPI_MODULE_NAME("SYNCH")
47
48MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
49
50/*
51 * Convert milliseconds to ticks.
52 */
53static int
54timeout2hz(UINT16 Timeout)
55{
56	struct timeval		tv;
57
58	tv.tv_sec = (time_t)(Timeout / 1000);
59	tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000;
60
61	return (tvtohz(&tv));
62}
63
64/*
65 * ACPI_SEMAPHORE
66 */
67struct acpi_sema {
68	struct mtx	as_lock;
69	char		as_name[32];
70	struct cv	as_cv;
71	UINT32		as_maxunits;
72	UINT32		as_units;
73	int		as_waiters;
74	int		as_reset;
75};
76
77ACPI_STATUS
78AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
79    ACPI_SEMAPHORE *OutHandle)
80{
81	struct acpi_sema	*as;
82
83	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
84
85	if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
86		return_ACPI_STATUS (AE_BAD_PARAMETER);
87
88	if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
89		return_ACPI_STATUS (AE_NO_MEMORY);
90
91	snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
92	mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
93	cv_init(&as->as_cv, as->as_name);
94	as->as_maxunits = MaxUnits;
95	as->as_units = InitialUnits;
96
97	*OutHandle = (ACPI_SEMAPHORE)as;
98
99	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
100	    as->as_name, MaxUnits, InitialUnits));
101
102	return_ACPI_STATUS (AE_OK);
103}
104
105ACPI_STATUS
106AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
107{
108	struct acpi_sema	*as = (struct acpi_sema *)Handle;
109
110	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
111
112	if (as == NULL)
113		return_ACPI_STATUS (AE_BAD_PARAMETER);
114
115	mtx_lock(&as->as_lock);
116
117	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name));
118
119	if (as->as_waiters > 0) {
120		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
121		    "reset %s, units %u, waiters %d\n",
122		    as->as_name, as->as_units, as->as_waiters));
123		as->as_reset = 1;
124		cv_broadcast(&as->as_cv);
125		while (as->as_waiters > 0) {
126			if (mtx_sleep(&as->as_reset, &as->as_lock,
127			    PCATCH, "acsrst", hz) == EINTR) {
128				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
129				    "failed to reset %s, waiters %d\n",
130				    as->as_name, as->as_waiters));
131				mtx_unlock(&as->as_lock);
132				return_ACPI_STATUS (AE_ERROR);
133			}
134			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
135			    "wait %s, units %u, waiters %d\n",
136			    as->as_name, as->as_units, as->as_waiters));
137		}
138	}
139
140	mtx_unlock(&as->as_lock);
141
142	mtx_destroy(&as->as_lock);
143	cv_destroy(&as->as_cv);
144	free(as, M_ACPISEM);
145
146	return_ACPI_STATUS (AE_OK);
147}
148
149#define	ACPISEM_AVAIL(s, u)	((s)->as_units >= (u))
150
151ACPI_STATUS
152AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
153{
154	struct acpi_sema	*as = (struct acpi_sema *)Handle;
155	int			error, prevtick, slptick, tmo;
156	ACPI_STATUS		status = AE_OK;
157
158	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
159
160	if (as == NULL || Units == 0)
161		return_ACPI_STATUS (AE_BAD_PARAMETER);
162
163	mtx_lock(&as->as_lock);
164
165	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
166	    "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
167	    Units, as->as_name, as->as_units, as->as_waiters, Timeout));
168
169	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
170		mtx_unlock(&as->as_lock);
171		return_ACPI_STATUS (AE_LIMIT);
172	}
173
174	switch (Timeout) {
175	case ACPI_DO_NOT_WAIT:
176		if (!ACPISEM_AVAIL(as, Units))
177			status = AE_TIME;
178		break;
179	case ACPI_WAIT_FOREVER:
180		while (!ACPISEM_AVAIL(as, Units)) {
181			as->as_waiters++;
182			error = cv_wait_sig(&as->as_cv, &as->as_lock);
183			as->as_waiters--;
184			if (error == EINTR || as->as_reset) {
185				status = AE_ERROR;
186				break;
187			}
188			if (ACPISEM_AVAIL(as, Units))
189				break;
190		}
191		break;
192	default:
193		tmo = timeout2hz(Timeout);
194		while (!ACPISEM_AVAIL(as, Units)) {
195			prevtick = ticks;
196			as->as_waiters++;
197			error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
198			as->as_waiters--;
199			if (error == EINTR || as->as_reset) {
200				status = AE_ERROR;
201				break;
202			}
203			if (ACPISEM_AVAIL(as, Units))
204				break;
205			slptick = ticks - prevtick;
206			if (slptick >= tmo || slptick < 0) {
207				status = AE_TIME;
208				break;
209			}
210			tmo -= slptick;
211		}
212	}
213	if (status == AE_OK)
214		as->as_units -= Units;
215
216	mtx_unlock(&as->as_lock);
217
218	return_ACPI_STATUS (status);
219}
220
221ACPI_STATUS
222AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
223{
224	struct acpi_sema	*as = (struct acpi_sema *)Handle;
225	UINT32			i;
226
227	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
228
229	if (as == NULL || Units == 0)
230		return_ACPI_STATUS (AE_BAD_PARAMETER);
231
232	mtx_lock(&as->as_lock);
233
234	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
235	    "return %u units to %s, units %u, waiters %d\n",
236	    Units, as->as_name, as->as_units, as->as_waiters));
237
238	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT &&
239	    (as->as_maxunits < Units ||
240	    as->as_maxunits - Units < as->as_units)) {
241		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
242		    "exceeded max units %u\n", as->as_maxunits));
243		mtx_unlock(&as->as_lock);
244		return_ACPI_STATUS (AE_LIMIT);
245	}
246
247	as->as_units += Units;
248	if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units))
249		for (i = 0; i < Units; i++)
250			cv_signal(&as->as_cv);
251
252	mtx_unlock(&as->as_lock);
253
254	return_ACPI_STATUS (AE_OK);
255}
256
257#undef ACPISEM_AVAIL
258
259/*
260 * ACPI_MUTEX
261 */
262struct acpi_mutex {
263	struct mtx	am_lock;
264	char		am_name[32];
265	struct thread	*am_owner;
266	int		am_nested;
267	int		am_waiters;
268	int		am_reset;
269};
270
271ACPI_STATUS
272AcpiOsCreateMutex(ACPI_MUTEX *OutHandle)
273{
274	struct acpi_mutex	*am;
275
276	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
277
278	if (OutHandle == NULL)
279		return_ACPI_STATUS (AE_BAD_PARAMETER);
280
281	if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
282		return_ACPI_STATUS (AE_NO_MEMORY);
283
284	snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am);
285	mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF);
286
287	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name));
288
289	*OutHandle = (ACPI_MUTEX)am;
290
291	return_ACPI_STATUS (AE_OK);
292}
293
294#define	ACPIMTX_AVAIL(m)	((m)->am_owner == NULL)
295#define	ACPIMTX_OWNED(m)	((m)->am_owner == curthread)
296
297void
298AcpiOsDeleteMutex(ACPI_MUTEX Handle)
299{
300	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
301
302	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
303
304	if (am == NULL) {
305		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n"));
306		return_VOID;
307	}
308
309	mtx_lock(&am->am_lock);
310
311	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name));
312
313	if (am->am_waiters > 0) {
314		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
315		    "reset %s, owner %p\n", am->am_name, am->am_owner));
316		am->am_reset = 1;
317		wakeup(am);
318		while (am->am_waiters > 0) {
319			if (mtx_sleep(&am->am_reset, &am->am_lock,
320			    PCATCH, "acmrst", hz) == EINTR) {
321				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
322				    "failed to reset %s, waiters %d\n",
323				    am->am_name, am->am_waiters));
324				mtx_unlock(&am->am_lock);
325				return_VOID;
326			}
327			if (ACPIMTX_AVAIL(am))
328				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
329				    "wait %s, waiters %d\n",
330				    am->am_name, am->am_waiters));
331			else
332				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
333				    "wait %s, owner %p, waiters %d\n",
334				    am->am_name, am->am_owner, am->am_waiters));
335		}
336	}
337
338	mtx_unlock(&am->am_lock);
339
340	mtx_destroy(&am->am_lock);
341	free(am, M_ACPISEM);
342}
343
344ACPI_STATUS
345AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout)
346{
347	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
348	int			error, prevtick, slptick, tmo;
349	ACPI_STATUS		status = AE_OK;
350
351	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
352
353	if (am == NULL)
354		return_ACPI_STATUS (AE_BAD_PARAMETER);
355
356	mtx_lock(&am->am_lock);
357
358	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name));
359
360	if (ACPIMTX_OWNED(am)) {
361		am->am_nested++;
362		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
363		    "acquire nested %s, depth %d\n",
364		    am->am_name, am->am_nested));
365		mtx_unlock(&am->am_lock);
366		return_ACPI_STATUS (AE_OK);
367	}
368
369	switch (Timeout) {
370	case ACPI_DO_NOT_WAIT:
371		if (!ACPIMTX_AVAIL(am))
372			status = AE_TIME;
373		break;
374	case ACPI_WAIT_FOREVER:
375		while (!ACPIMTX_AVAIL(am)) {
376			am->am_waiters++;
377			error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0);
378			am->am_waiters--;
379			if (error == EINTR || am->am_reset) {
380				status = AE_ERROR;
381				break;
382			}
383			if (ACPIMTX_AVAIL(am))
384				break;
385		}
386		break;
387	default:
388		tmo = timeout2hz(Timeout);
389		while (!ACPIMTX_AVAIL(am)) {
390			prevtick = ticks;
391			am->am_waiters++;
392			error = mtx_sleep(am, &am->am_lock, PCATCH,
393			    "acmtx", tmo);
394			am->am_waiters--;
395			if (error == EINTR || am->am_reset) {
396				status = AE_ERROR;
397				break;
398			}
399			if (ACPIMTX_AVAIL(am))
400				break;
401			slptick = ticks - prevtick;
402			if (slptick >= tmo || slptick < 0) {
403				status = AE_TIME;
404				break;
405			}
406			tmo -= slptick;
407		}
408	}
409	if (status == AE_OK)
410		am->am_owner = curthread;
411
412	mtx_unlock(&am->am_lock);
413
414	return_ACPI_STATUS (status);
415}
416
417void
418AcpiOsReleaseMutex(ACPI_MUTEX Handle)
419{
420	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
421
422	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
423
424	if (am == NULL)
425		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
426		    "cannot release null mutex\n"));
427
428	mtx_lock(&am->am_lock);
429
430	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name));
431
432	if (ACPIMTX_OWNED(am)) {
433		if (am->am_nested > 0) {
434			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
435			    "release nested %s, depth %d\n",
436			    am->am_name, am->am_nested));
437			am->am_nested--;
438		} else
439			am->am_owner = NULL;
440	} else {
441		if (ACPIMTX_AVAIL(am))
442			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
443			    "release already available %s\n", am->am_name));
444		else
445			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
446			    "release unowned %s from %p, depth %d\n",
447			    am->am_name, am->am_owner, am->am_nested));
448	}
449	if (am->am_waiters > 0 && ACPIMTX_AVAIL(am))
450		wakeup_one(am);
451
452	mtx_unlock(&am->am_lock);
453}
454
455#undef ACPIMTX_AVAIL
456#undef ACPIMTX_OWNED
457
458/*
459 * ACPI_SPINLOCK
460 */
461struct acpi_spinlock {
462	struct mtx	al_lock;
463	char		al_name[32];
464	int		al_nested;
465};
466
467ACPI_STATUS
468AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
469{
470	struct acpi_spinlock	*al;
471
472	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
473
474	if (OutHandle == NULL)
475		return_ACPI_STATUS (AE_BAD_PARAMETER);
476
477	if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
478		return_ACPI_STATUS (AE_NO_MEMORY);
479
480#ifdef ACPI_DEBUG
481	if (OutHandle == &AcpiGbl_GpeLock)
482		snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)");
483	else if (OutHandle == &AcpiGbl_HardwareLock)
484		snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)");
485	else
486#endif
487	snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al);
488	mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN);
489
490	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name));
491
492	*OutHandle = (ACPI_SPINLOCK)al;
493
494	return_ACPI_STATUS (AE_OK);
495}
496
497void
498AcpiOsDeleteLock(ACPI_SPINLOCK Handle)
499{
500	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
501
502	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
503
504	if (al == NULL) {
505		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
506		    "cannot delete null spinlock\n"));
507		return_VOID;
508	}
509
510	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name));
511
512	mtx_destroy(&al->al_lock);
513	free(al, M_ACPISEM);
514}
515
516ACPI_CPU_FLAGS
517AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
518{
519	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
520
521	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
522
523	if (al == NULL) {
524		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
525		    "cannot acquire null spinlock\n"));
526		return (0);
527	}
528
529	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));
530
531	if (mtx_owned(&al->al_lock)) {
532		al->al_nested++;
533		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
534		    "acquire nested %s, depth %d\n",
535		    al->al_name, al->al_nested));
536	} else
537		mtx_lock_spin(&al->al_lock);
538
539	return (0);
540}
541
542void
543AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
544{
545	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
546
547	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
548
549	if (al == NULL) {
550		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
551		    "cannot release null spinlock\n"));
552		return_VOID;
553	}
554
555	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));
556
557	if (mtx_owned(&al->al_lock)) {
558		if (al->al_nested > 0) {
559			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
560			    "release nested %s, depth %d\n",
561			    al->al_name, al->al_nested));
562			al->al_nested--;
563		} else
564			mtx_unlock_spin(&al->al_lock);
565	} else
566		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
567		    "cannot release unowned %s\n", al->al_name));
568}
569
570/* Section 5.2.10.1: global lock acquire/release functions */
571#define	GL_ACQUIRED	(-1)
572#define	GL_BUSY		0
573#define	GL_BIT_PENDING	0x01
574#define	GL_BIT_OWNED	0x02
575#define	GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
576
577/*
578 * Acquire the global lock.  If busy, set the pending bit.  The caller
579 * will wait for notification from the BIOS that the lock is available
580 * and then attempt to acquire it again.
581 */
582int
583acpi_acquire_global_lock(uint32_t *lock)
584{
585	uint32_t	new, old;
586
587	do {
588		old = *lock;
589		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
590			((old >> 1) & GL_BIT_PENDING);
591	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
592
593	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
594}
595
596/*
597 * Release the global lock, returning whether there is a waiter pending.
598 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
599 * releases the lock.
600 */
601int
602acpi_release_global_lock(uint32_t *lock)
603{
604	uint32_t	new, old;
605
606	do {
607		old = *lock;
608		new = old & ~GL_BIT_MASK;
609	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
610
611	return (old & GL_BIT_PENDING);
612}
613