OsdSchedule.c revision 120571
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/acpica/Osd/OsdSchedule.c 120571 2003-09-29 07:29:26Z njl $
28 */
29
30/*
31 * 6.3 : Scheduling services
32 */
33
34#include "acpi.h"
35
36#include "opt_acpi.h"
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/interrupt.h>
41#include <sys/kernel.h>
42#include <sys/kthread.h>
43#include <sys/malloc.h>
44#include <sys/proc.h>
45#include <sys/taskqueue.h>
46#include <machine/clock.h>
47
48#include <sys/bus.h>
49
50#include <dev/acpica/acpivar.h>
51
52#define _COMPONENT	ACPI_OS_SERVICES
53ACPI_MODULE_NAME("SCHEDULE")
54
55/*
56 * This is a little complicated due to the fact that we need to build and then
57 * free a 'struct task' for each task we enqueue.
58 */
59
60MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
61
62static void	AcpiOsExecuteQueue(void *arg, int pending);
63
64struct acpi_task {
65    struct task			at_task;
66    OSD_EXECUTION_CALLBACK	at_function;
67    void			*at_context;
68};
69
70struct acpi_task_queue {
71    STAILQ_ENTRY(acpi_task_queue) at_q;
72    struct acpi_task		*at;
73};
74
75#if __FreeBSD_version >= 500000
76/*
77 * Private task queue definition for ACPI
78 */
79TASKQUEUE_DECLARE(acpi);
80static void	*taskqueue_acpi_ih;
81
82static void
83taskqueue_acpi_enqueue(void *context)
84{
85    swi_sched(taskqueue_acpi_ih, 0);
86}
87
88static void
89taskqueue_acpi_run(void *dummy)
90{
91    taskqueue_run(taskqueue_acpi);
92}
93
94TASKQUEUE_DEFINE(acpi, taskqueue_acpi_enqueue, 0,
95		 swi_add(NULL, "acpitaskq", taskqueue_acpi_run, NULL,
96		     SWI_TQ, 0, &taskqueue_acpi_ih));
97
98#ifdef ACPI_USE_THREADS
99STAILQ_HEAD(, acpi_task_queue) acpi_task_queue;
100static struct mtx	acpi_task_mtx;
101
102static void
103acpi_task_thread(void *arg)
104{
105    struct acpi_task_queue	*atq;
106    OSD_EXECUTION_CALLBACK	Function;
107    void			*Context;
108
109    for (;;) {
110	mtx_lock(&acpi_task_mtx);
111	if ((atq = STAILQ_FIRST(&acpi_task_queue)) == NULL) {
112	    msleep(&acpi_task_queue, &acpi_task_mtx, PCATCH, "actask", 0);
113	    mtx_unlock(&acpi_task_mtx);
114	    continue;
115	}
116
117	STAILQ_REMOVE_HEAD(&acpi_task_queue, at_q);
118	mtx_unlock(&acpi_task_mtx);
119
120	Function = (OSD_EXECUTION_CALLBACK)atq->at->at_function;
121	Context = atq->at->at_context;
122
123	mtx_lock(&Giant);
124	Function(Context);
125
126	free(atq->at, M_ACPITASK);
127	free(atq, M_ACPITASK);
128	mtx_unlock(&Giant);
129    }
130
131    kthread_exit(0);
132}
133
134int
135acpi_task_thread_init(void)
136{
137    int		i, err;
138    struct proc	*acpi_kthread_proc;
139
140    err = 0;
141    STAILQ_INIT(&acpi_task_queue);
142    mtx_init(&acpi_task_mtx, "ACPI task", NULL, MTX_DEF);
143
144    for (i = 0; i < ACPI_MAX_THREADS; i++) {
145	err = kthread_create(acpi_task_thread, NULL, &acpi_kthread_proc,
146			     0, 0, "acpi_task%d", i);
147	if (err != 0) {
148	    printf("%s: kthread_create failed(%d)\n", __func__, err);
149	    break;
150	}
151    }
152    return (err);
153}
154#endif
155#endif
156
157ACPI_STATUS
158AcpiOsQueueForExecution(UINT32 Priority, OSD_EXECUTION_CALLBACK Function, void *Context)
159{
160    struct acpi_task	*at;
161    int pri;
162
163    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
164
165    if (Function == NULL)
166	return_ACPI_STATUS(AE_BAD_PARAMETER);
167
168    at = malloc(sizeof(*at), M_ACPITASK, M_NOWAIT);	/* Interrupt Context */
169    if (at == NULL)
170	return_ACPI_STATUS(AE_NO_MEMORY);
171    bzero(at, sizeof(*at));
172
173    at->at_function = Function;
174    at->at_context = Context;
175    switch (Priority) {
176    case OSD_PRIORITY_GPE:
177	pri = 4;
178	break;
179    case OSD_PRIORITY_HIGH:
180	pri = 3;
181	break;
182    case OSD_PRIORITY_MED:
183	pri = 2;
184	break;
185    case OSD_PRIORITY_LO:
186	pri = 1;
187	break;
188    default:
189	free(at, M_ACPITASK);
190	return_ACPI_STATUS(AE_BAD_PARAMETER);
191    }
192    TASK_INIT(&at->at_task, pri, AcpiOsExecuteQueue, at);
193
194#if __FreeBSD_version < 500000
195    taskqueue_enqueue(taskqueue_swi, (struct task *)at);
196#else
197    taskqueue_enqueue(taskqueue_acpi, (struct task *)at);
198#endif
199    return_ACPI_STATUS(AE_OK);
200}
201
202static void
203AcpiOsExecuteQueue(void *arg, int pending)
204{
205    struct acpi_task		*at;
206    struct acpi_task_queue	*atq;
207    OSD_EXECUTION_CALLBACK	Function;
208    void			*Context;
209
210    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
211
212    at = (struct acpi_task *)arg;
213    atq = NULL;
214    Function = NULL;
215    Context = NULL;
216
217#ifdef ACPI_USE_THREADS
218    atq = malloc(sizeof(*atq), M_ACPITASK, M_NOWAIT);
219    if (atq == NULL) {
220	printf("%s: no memory\n", __func__);
221	return;
222    }
223
224    atq->at = at;
225
226    mtx_lock(&acpi_task_mtx);
227    STAILQ_INSERT_TAIL(&acpi_task_queue, atq, at_q);
228    mtx_unlock(&acpi_task_mtx);
229    wakeup_one(&acpi_task_queue);
230#else
231    Function = (OSD_EXECUTION_CALLBACK)at->at_function;
232    Context = at->at_context;
233
234    Function(Context);
235    free(at, M_ACPITASK);
236#endif
237
238    return_VOID;
239}
240
241/*
242 * We don't have any sleep granularity better than hz, so
243 * make do with that.
244 */
245void
246AcpiOsSleep (UINT32 Seconds, UINT32 Milliseconds)
247{
248    int		timo;
249    static int	dummy;
250
251    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
252
253    timo = (Seconds * hz) + Milliseconds * hz / 1000;
254    if (timo == 0)
255	timo = 1;
256    tsleep(&dummy, 0, "acpislp", timo);
257    return_VOID;
258}
259
260void
261AcpiOsStall (UINT32 Microseconds)
262{
263    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
264
265    /*
266     * Maximum length for stall is 100 us.  If longer, assume caller
267     * really meant "sleep".
268     */
269    if (Microseconds <= 100)
270	DELAY(Microseconds);
271    else
272	AcpiOsSleep(Microseconds / 1000, Microseconds % 1000);
273
274    return_VOID;
275}
276
277UINT32
278AcpiOsGetThreadId (void)
279{
280    struct proc *p;
281    /* XXX do not add FUNCTION_TRACE here, results in recursive call */
282
283    p = curproc;
284#if __FreeBSD_version < 500000
285    if (p == NULL)
286	p = &proc0;
287#endif
288    KASSERT(p != NULL, ("%s: curproc is NULL!", __func__));
289    return(p->p_pid + 1);	/* can't return 0 */
290}
291