1/*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * 6.1 : Mutual Exclusion and Synchronisation 30 */ 31 32#include <sys/cdefs.h>
|
71static int acpi_semaphore_debug = 0; 72TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug); 73SYSCTL_DECL(_debug_acpi); 74SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW, 75 &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages"); 76#endif /* !ACPI_NO_SEMAPHORES */ 77 78ACPI_STATUS 79AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, 80 ACPI_HANDLE *OutHandle) 81{ 82#ifndef ACPI_NO_SEMAPHORES 83 struct acpi_semaphore *as; 84 85 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 86 87 if (OutHandle == NULL) 88 return_ACPI_STATUS (AE_BAD_PARAMETER); 89 if (InitialUnits > MaxUnits) 90 return_ACPI_STATUS (AE_BAD_PARAMETER); 91 92 if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) 93 return_ACPI_STATUS (AE_NO_MEMORY); 94 95 mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF); 96 as->as_units = InitialUnits; 97 as->as_maxunits = MaxUnits; 98 as->as_pendings = as->as_resetting = as->as_timeouts = 0; 99 100 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 101 "created semaphore %p max %d, initial %d\n", 102 as, InitialUnits, MaxUnits)); 103 104 *OutHandle = (ACPI_HANDLE)as; 105#else 106 *OutHandle = (ACPI_HANDLE)OutHandle; 107#endif /* !ACPI_NO_SEMAPHORES */ 108 109 return_ACPI_STATUS (AE_OK); 110} 111 112ACPI_STATUS 113AcpiOsDeleteSemaphore(ACPI_HANDLE Handle) 114{ 115#ifndef ACPI_NO_SEMAPHORES 116 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; 117 118 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 119 120 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as)); 121 mtx_destroy(&as->as_mtx); 122 free(Handle, M_ACPISEM); 123#endif /* !ACPI_NO_SEMAPHORES */ 124 125 return_ACPI_STATUS (AE_OK); 126} 127 128/* 129 * This implementation has a bug, in that it has to stall for the entire 130 * timeout before it will return AE_TIME. A better implementation would 131 * use getmicrotime() to correctly adjust the timeout after being woken up. 132 */ 133ACPI_STATUS 134AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout) 135{ 136#ifndef ACPI_NO_SEMAPHORES 137 ACPI_STATUS result; 138 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; 139 int rv, tmo; 140 struct timeval timeouttv, currenttv, timelefttv; 141 142 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 143 144 if (as == NULL) 145 return_ACPI_STATUS (AE_BAD_PARAMETER); 146 147 if (cold) 148 return_ACPI_STATUS (AE_OK); 149 150#if 0 151 if (as->as_units < Units && as->as_timeouts > 10) { 152 printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as); 153 AS_LOCK(as); 154 as->as_units = as->as_maxunits; 155 if (as->as_pendings) 156 as->as_resetting = 1; 157 as->as_timeouts = 0; 158 wakeup(as); 159 AS_UNLOCK(as); 160 return_ACPI_STATUS (AE_TIME); 161 } 162 163 if (as->as_resetting) 164 return_ACPI_STATUS (AE_TIME); 165#endif 166 167 /* a timeout of ACPI_WAIT_FOREVER means "forever" */ 168 if (Timeout == ACPI_WAIT_FOREVER) { 169 tmo = 0; 170 timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */ 171 timeouttv.tv_usec = 0; 172 } else { 173 /* compute timeout using microseconds per tick */ 174 tmo = (Timeout * 1000) / (1000000 / hz); 175 if (tmo <= 0) 176 tmo = 1; 177 timeouttv.tv_sec = Timeout / 1000; 178 timeouttv.tv_usec = (Timeout % 1000) * 1000; 179 } 180 181 /* calculate timeout value in timeval */ 182 getmicrotime(¤ttv); 183 timevaladd(&timeouttv, ¤ttv); 184 185 AS_LOCK(as); 186 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 187 "get %d units from semaphore %p (has %d), timeout %d\n", 188 Units, as, as->as_units, Timeout)); 189 for (;;) { 190 if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) { 191 result = AE_OK; 192 break; 193 } 194 if (as->as_units >= Units) { 195 as->as_units -= Units; 196 result = AE_OK; 197 break; 198 } 199
|
201 if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) { 202 result = AE_TIME; 203 break; 204 } 205 206 /* if timeout values of zero is specified, return immediately */ 207 if (Timeout == 0) { 208 result = AE_TIME; 209 break; 210 } 211 212 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 213 "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n", 214 as, &as->as_mtx, PCATCH, tmo)); 215 216 as->as_pendings++; 217 218 if (acpi_semaphore_debug) { 219 printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n", 220 __func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId()); 221 } 222 223 rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo); 224 225 as->as_pendings--; 226 227#if 0 228 if (as->as_resetting) { 229 /* semaphore reset, return immediately */ 230 if (as->as_pendings == 0) { 231 as->as_resetting = 0; 232 } 233 result = AE_TIME; 234 break; 235 } 236#endif 237 238 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv)); 239 if (rv == EWOULDBLOCK) { 240 result = AE_TIME; 241 break; 242 } 243 244 /* check if we already awaited enough */ 245 timelefttv = timeouttv; 246 getmicrotime(¤ttv); 247 timevalsub(&timelefttv, ¤ttv); 248 if (timelefttv.tv_sec < 0) { 249 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n", 250 as)); 251 result = AE_TIME; 252 break; 253 } 254 255 /* adjust timeout for the next sleep */ 256 tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) / 257 (1000000 / hz); 258 if (tmo <= 0) 259 tmo = 1; 260 261 if (acpi_semaphore_debug) { 262 printf("%s: Wakeup timeleft(%jd, %lu), tmo %u, sem %p, thread %d\n", 263 __func__, (intmax_t)timelefttv.tv_sec, timelefttv.tv_usec, tmo, as, 264 AcpiOsGetThreadId()); 265 } 266 } 267 268 if (acpi_semaphore_debug) { 269 if (result == AE_TIME && Timeout > 0) { 270 printf("%s: Timeout %d, pending %d, semaphore %p\n", 271 __func__, Timeout, as->as_pendings, as); 272 } 273 if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) { 274 printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n", 275 __func__, Units, as->as_units, as->as_pendings, as, 276 AcpiOsGetThreadId()); 277 } 278 } 279 280 if (result == AE_TIME) 281 as->as_timeouts++; 282 else 283 as->as_timeouts = 0; 284 285 AS_UNLOCK(as); 286 return_ACPI_STATUS (result); 287#else 288 return_ACPI_STATUS (AE_OK); 289#endif /* !ACPI_NO_SEMAPHORES */ 290} 291 292ACPI_STATUS 293AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units) 294{ 295#ifndef ACPI_NO_SEMAPHORES 296 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; 297 298 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 299 300 if (as == NULL) 301 return_ACPI_STATUS(AE_BAD_PARAMETER); 302 303 AS_LOCK(as); 304 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 305 "return %d units to semaphore %p (has %d)\n", 306 Units, as, as->as_units)); 307 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) { 308 as->as_units += Units; 309 if (as->as_units > as->as_maxunits) 310 as->as_units = as->as_maxunits; 311 } 312 313 if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) { 314 printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n", 315 __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId()); 316 } 317 318 wakeup(as); 319 AS_UNLOCK(as); 320#endif /* !ACPI_NO_SEMAPHORES */ 321 322 return_ACPI_STATUS (AE_OK); 323} 324 325ACPI_STATUS 326AcpiOsCreateLock (ACPI_HANDLE *OutHandle) 327{ 328 struct mtx *m; 329 330 if (OutHandle == NULL) 331 return (AE_BAD_PARAMETER); 332 m = malloc(sizeof(*m), M_ACPISEM, M_NOWAIT | M_ZERO); 333 if (m == NULL) 334 return (AE_NO_MEMORY); 335 336 mtx_init(m, "acpica subsystem lock", NULL, MTX_DEF); 337 *OutHandle = (ACPI_HANDLE)m; 338 return (AE_OK); 339} 340 341void 342AcpiOsDeleteLock (ACPI_HANDLE Handle) 343{ 344 struct mtx *m = (struct mtx *)Handle; 345 346 if (Handle == NULL) 347 return; 348 mtx_destroy(m); 349} 350 351/* 352 * The Flags parameter seems to state whether or not caller is an ISR 353 * (and thus can't block) but since we have ithreads, we don't worry 354 * about potentially blocking. 355 */ 356ACPI_NATIVE_UINT 357AcpiOsAcquireLock (ACPI_HANDLE Handle) 358{ 359 struct mtx *m = (struct mtx *)Handle; 360 361 if (Handle == NULL) 362 return (0); 363 mtx_lock(m); 364 return (0); 365} 366 367void 368AcpiOsReleaseLock (ACPI_HANDLE Handle, ACPI_NATIVE_UINT Flags) 369{ 370 struct mtx *m = (struct mtx *)Handle; 371 372 if (Handle == NULL) 373 return; 374 mtx_unlock(m); 375} 376 377/* Section 5.2.9.1: global lock acquire/release functions */ 378#define GL_ACQUIRED (-1) 379#define GL_BUSY 0 380#define GL_BIT_PENDING 0x1 381#define GL_BIT_OWNED 0x2 382#define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED) 383 384/* 385 * Acquire the global lock. If busy, set the pending bit. The caller 386 * will wait for notification from the BIOS that the lock is available 387 * and then attempt to acquire it again. 388 */ 389int 390acpi_acquire_global_lock(uint32_t *lock) 391{ 392 uint32_t new, old; 393 394 do { 395 old = *lock; 396 new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) | 397 ((old >> 1) & GL_BIT_PENDING); 398 } while (atomic_cmpset_acq_int(lock, old, new) == 0); 399 400 return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY); 401} 402 403/* 404 * Release the global lock, returning whether there is a waiter pending. 405 * If the BIOS set the pending bit, OSPM must notify the BIOS when it 406 * releases the lock. 407 */ 408int 409acpi_release_global_lock(uint32_t *lock) 410{ 411 uint32_t new, old; 412 413 do { 414 old = *lock; 415 new = old & ~GL_BIT_MASK; 416 } while (atomic_cmpset_rel_int(lock, old, new) == 0); 417 418 return (old & GL_BIT_PENDING); 419}
|