167760Smsmith/*-
267760Smsmith * Copyright (c) 2000 Michael Smith
367760Smsmith * Copyright (c) 2000 BSDi
4193963Sjkim * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
567760Smsmith * All rights reserved.
667760Smsmith *
767760Smsmith * Redistribution and use in source and binary forms, with or without
867760Smsmith * modification, are permitted provided that the following conditions
967760Smsmith * are met:
1067760Smsmith * 1. Redistributions of source code must retain the above copyright
1167760Smsmith *    notice, this list of conditions and the following disclaimer.
1267760Smsmith * 2. Redistributions in binary form must reproduce the above copyright
1367760Smsmith *    notice, this list of conditions and the following disclaimer in the
1467760Smsmith *    documentation and/or other materials provided with the distribution.
1567760Smsmith *
1667760Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1767760Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1867760Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1967760Smsmith * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2067760Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2167760Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2267760Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2367760Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2467760Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2567760Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2667760Smsmith * SUCH DAMAGE.
2767760Smsmith */
2867760Smsmith
2967760Smsmith/*
3067760Smsmith * 6.3 : Scheduling services
3167760Smsmith */
3267760Smsmith
33148318Snjl#include <sys/cdefs.h>
34148318Snjl__FBSDID("$FreeBSD$");
35148318Snjl
3688420Siwasaki#include "opt_acpi.h"
3788420Siwasaki#include <sys/param.h>
3888420Siwasaki#include <sys/systm.h>
3988420Siwasaki#include <sys/bus.h>
4088420Siwasaki#include <sys/interrupt.h>
4167760Smsmith#include <sys/kernel.h>
4288420Siwasaki#include <sys/kthread.h>
4367760Smsmith#include <sys/malloc.h>
4477466Smsmith#include <sys/proc.h>
4567760Smsmith#include <sys/taskqueue.h>
4667760Smsmith
47193530Sjkim#include <contrib/dev/acpica/include/acpi.h>
48193530Sjkim#include <contrib/dev/acpica/include/accommon.h>
49193530Sjkim
5088420Siwasaki#include <dev/acpica/acpivar.h>
5188420Siwasaki
5277432Smsmith#define _COMPONENT	ACPI_OS_SERVICES
5391128SmsmithACPI_MODULE_NAME("SCHEDULE")
5471875Smsmith
5567760Smsmith/*
56193963Sjkim * Allow the user to tune the maximum number of tasks we may enqueue.
57193963Sjkim */
58193963Sjkimstatic int acpi_max_tasks = ACPI_MAX_TASKS;
59193963SjkimTUNABLE_INT("debug.acpi.max_tasks", &acpi_max_tasks);
60265665SsmhSYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks,
61265665Ssmh    0, "Maximum acpi tasks");
62193963Sjkim
63193963Sjkim/*
64145352Snjl * Allow the user to tune the number of task threads we start.  It seems
65145352Snjl * some systems have problems with increased parallelism.
66145352Snjl */
67145352Snjlstatic int acpi_max_threads = ACPI_MAX_THREADS;
68145352SnjlTUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);
69265665SsmhSYSCTL_INT(_debug_acpi, OID_AUTO, max_threads, CTLFLAG_RDTUN, &acpi_max_threads,
70265665Ssmh    0, "Maximum acpi threads");
71145352Snjl
72249132Smavstatic MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
7367760Smsmith
74146021Smarksstruct acpi_task_ctx {
7567760Smsmith    struct task			at_task;
76138300Smarks    ACPI_OSD_EXEC_CALLBACK	at_function;
77146021Smarks    void 			*at_context;
78193963Sjkim    int				at_flag;
79194011Sjkim#define	ACPI_TASK_FREE		0
80193963Sjkim#define	ACPI_TASK_USED		1
81193963Sjkim#define	ACPI_TASK_ENQUEUED	2
8267760Smsmith};
8367760Smsmith
84193963Sjkimstruct taskqueue		*acpi_taskq;
85193963Sjkimstatic struct acpi_task_ctx	*acpi_tasks;
86193963Sjkimstatic int			acpi_task_count;
87193963Sjkimstatic int			acpi_taskq_started;
8888420Siwasaki
89146021Smarks/*
90193963Sjkim * Preallocate some memory for tasks early enough.
91193963Sjkim * malloc(9) cannot be used with spin lock held.
92193963Sjkim */
93193963Sjkimstatic void
94193963Sjkimacpi_task_init(void *arg)
95193963Sjkim{
96193963Sjkim
97193963Sjkim    acpi_tasks = malloc(sizeof(*acpi_tasks) * acpi_max_tasks, M_ACPITASK,
98193963Sjkim	M_WAITOK | M_ZERO);
99193963Sjkim}
100193963Sjkim
101193963SjkimSYSINIT(acpi_tasks, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_task_init, NULL);
102193963Sjkim
103193963Sjkim/*
104193963Sjkim * Initialize ACPI task queue.
105193963Sjkim */
106193963Sjkimstatic void
107193963Sjkimacpi_taskq_init(void *arg)
108193963Sjkim{
109193963Sjkim    int i;
110193963Sjkim
111193963Sjkim    acpi_taskq = taskqueue_create_fast("acpi_task", M_NOWAIT,
112193963Sjkim	&taskqueue_thread_enqueue, &acpi_taskq);
113193963Sjkim    taskqueue_start_threads(&acpi_taskq, acpi_max_threads, PWAIT, "acpi_task");
114193963Sjkim    if (acpi_task_count > 0) {
115193963Sjkim	if (bootverbose)
116193963Sjkim	    printf("AcpiOsExecute: enqueue %d pending tasks\n",
117193963Sjkim		acpi_task_count);
118193963Sjkim	for (i = 0; i < acpi_max_tasks; i++)
119194011Sjkim	    if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_USED,
120193963Sjkim		ACPI_TASK_USED | ACPI_TASK_ENQUEUED))
121193963Sjkim		taskqueue_enqueue(acpi_taskq, &acpi_tasks[i].at_task);
122193963Sjkim    }
123193963Sjkim    acpi_taskq_started = 1;
124193963Sjkim}
125193963Sjkim
126193963SjkimSYSINIT(acpi_taskq, SI_SUB_CONFIGURE, SI_ORDER_SECOND, acpi_taskq_init, NULL);
127193963Sjkim
128193963Sjkim/*
129146021Smarks * Bounce through this wrapper function since ACPI-CA doesn't understand
130146021Smarks * the pending argument for its callbacks.
131146021Smarks */
13288420Siwasakistatic void
133146021Smarksacpi_task_execute(void *context, int pending)
13488420Siwasaki{
135146021Smarks    struct acpi_task_ctx *at;
13688420Siwasaki
137146021Smarks    at = (struct acpi_task_ctx *)context;
138146021Smarks    at->at_function(at->at_context);
139193963Sjkim    atomic_clear_int(&at->at_flag, ACPI_TASK_USED | ACPI_TASK_ENQUEUED);
140193963Sjkim    acpi_task_count--;
14188420Siwasaki}
14288420Siwasaki
143193963Sjkimstatic ACPI_STATUS
144193963Sjkimacpi_task_enqueue(int priority, ACPI_OSD_EXEC_CALLBACK Function, void *Context)
145193963Sjkim{
146193963Sjkim    struct acpi_task_ctx *at;
147193963Sjkim    int i;
148193963Sjkim
149193963Sjkim    for (at = NULL, i = 0; i < acpi_max_tasks; i++)
150194011Sjkim	if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_FREE,
151194011Sjkim	    ACPI_TASK_USED)) {
152193963Sjkim	    at = &acpi_tasks[i];
153193963Sjkim	    acpi_task_count++;
154193963Sjkim	    break;
155193963Sjkim	}
156193963Sjkim    if (at == NULL) {
157193963Sjkim	printf("AcpiOsExecute: failed to enqueue task, consider increasing "
158193963Sjkim	    "the debug.acpi.max_tasks tunable\n");
159193963Sjkim	return (AE_NO_MEMORY);
160193963Sjkim    }
161193963Sjkim
162193963Sjkim    TASK_INIT(&at->at_task, priority, acpi_task_execute, at);
163193963Sjkim    at->at_function = Function;
164193963Sjkim    at->at_context = Context;
165193963Sjkim
166193963Sjkim    /*
167193963Sjkim     * If the task queue is ready, enqueue it now.
168193963Sjkim     */
169193963Sjkim    if (acpi_taskq_started) {
170193963Sjkim	atomic_set_int(&at->at_flag, ACPI_TASK_ENQUEUED);
171193963Sjkim	taskqueue_enqueue(acpi_taskq, &at->at_task);
172193963Sjkim	return (AE_OK);
173193963Sjkim    }
174193963Sjkim    if (bootverbose)
175193963Sjkim	printf("AcpiOsExecute: task queue not started\n");
176193963Sjkim
177193963Sjkim    return (AE_OK);
178193963Sjkim}
179193963Sjkim
180146021Smarks/*
181146021Smarks * This function may be called in interrupt context, i.e. when a GPE fires.
182146021Smarks * We allocate and queue a task for one of our taskqueue threads to process.
183146021Smarks */
18467760SmsmithACPI_STATUS
185167814SjkimAcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
186128228Snjl    void *Context)
18767760Smsmith{
18885503Sjhb    int pri;
18967760Smsmith
19096926Speter    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
19171875Smsmith
19267760Smsmith    if (Function == NULL)
193128228Snjl	return_ACPI_STATUS (AE_BAD_PARAMETER);
19467760Smsmith
195167814Sjkim    switch (Type) {
196167814Sjkim    case OSL_GPE_HANDLER:
197175256Snjl    case OSL_NOTIFY_HANDLER:
198175256Snjl	/*
199175256Snjl	 * Run GPEs and Notifies at the same priority.  This allows
200175256Snjl	 * Notifies that are generated by running a GPE's method (e.g., _L00)
201175256Snjl	 * to not be pre-empted by a later GPE that arrives during the
202175256Snjl	 * Notify handler execution.
203175256Snjl	 */
204167814Sjkim	pri = 10;
20570236Siwasaki	break;
206167814Sjkim    case OSL_GLOBAL_LOCK_HANDLER:
207167814Sjkim    case OSL_EC_POLL_HANDLER:
208167814Sjkim    case OSL_EC_BURST_HANDLER:
209167814Sjkim	pri = 5;
210167814Sjkim	break;
211167814Sjkim    case OSL_DEBUGGER_THREAD:
212167814Sjkim	pri = 0;
21367760Smsmith	break;
21467760Smsmith    default:
215128228Snjl	return_ACPI_STATUS (AE_BAD_PARAMETER);
21667760Smsmith    }
21767760Smsmith
218193963Sjkim    return_ACPI_STATUS (acpi_task_enqueue(pri, Function, Context));
21967760Smsmith}
22067760Smsmith
22167760Smsmithvoid
222202771SjkimAcpiOsSleep(UINT64 Milliseconds)
22367760Smsmith{
22467760Smsmith    int		timo;
22567760Smsmith
22696926Speter    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
22771875Smsmith
228138300Smarks    timo = Milliseconds * hz / 1000;
229120662Snjl
230146021Smarks    /*
231120662Snjl     * If requested sleep time is less than our hz resolution, use
232120662Snjl     * DELAY instead for better granularity.
233120662Snjl     */
234120662Snjl    if (timo > 0)
235166909Sjhb	pause("acpislp", timo);
236120662Snjl    else
237120662Snjl	DELAY(Milliseconds * 1000);
238120662Snjl
23971875Smsmith    return_VOID;
24067760Smsmith}
24167760Smsmith
242138300Smarks/*
243138300Smarks * Return the current time in 100 nanosecond units
244138300Smarks */
245138300SmarksUINT64
246138300SmarksAcpiOsGetTimer(void)
247138300Smarks{
248138300Smarks    struct bintime bt;
249138300Smarks    UINT64 t;
250138300Smarks
251138300Smarks    /* XXX During early boot there is no (decent) timer available yet. */
252217236Sjkim    KASSERT(cold == 0, ("acpi: timer op not yet supported during boot"));
253138300Smarks
254138300Smarks    binuptime(&bt);
255138300Smarks    t = ((UINT64)10000000 * (uint32_t)(bt.frac >> 32)) >> 32;
256138300Smarks    t += bt.sec * 10000000;
257138300Smarks
258138300Smarks    return (t);
259138300Smarks}
260138300Smarks
26167760Smsmithvoid
262120662SnjlAcpiOsStall(UINT32 Microseconds)
26367760Smsmith{
26496926Speter    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
26571875Smsmith
266120607Snjl    DELAY(Microseconds);
26771875Smsmith    return_VOID;
26867760Smsmith}
26977432Smsmith
270167814SjkimACPI_THREAD_ID
271120662SnjlAcpiOsGetThreadId(void)
27277432Smsmith{
27377432Smsmith
274128228Snjl    /* XXX do not add ACPI_FUNCTION_TRACE here, results in recursive call. */
275128228Snjl
276128228Snjl    /* Returning 0 is not allowed. */
277184596Smav    return (curthread->td_tid);
27877432Smsmith}
279