Deleted Added
full compact
OsdSchedule.c (88900) OsdSchedule.c (91128)
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/acpica/Osd/OsdSchedule.c 88900 2002-01-05 08:47:13Z jhb $
27 * $FreeBSD: head/sys/dev/acpica/Osd/OsdSchedule.c 91128 2002-02-23 05:31:38Z msmith $
28 */
29
30/*
31 * 6.3 : Scheduling services
32 */
33
34#include "acpi.h"
35
36#include "opt_acpi.h"
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/interrupt.h>
41#include <sys/kernel.h>
42#include <sys/kthread.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/taskqueue.h>
48#include <machine/clock.h>
49
50#include <sys/bus.h>
51
52#include <dev/acpica/acpivar.h>
53
54#define _COMPONENT ACPI_OS_SERVICES
28 */
29
30/*
31 * 6.3 : Scheduling services
32 */
33
34#include "acpi.h"
35
36#include "opt_acpi.h"
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/interrupt.h>
41#include <sys/kernel.h>
42#include <sys/kthread.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/taskqueue.h>
48#include <machine/clock.h>
49
50#include <sys/bus.h>
51
52#include <dev/acpica/acpivar.h>
53
54#define _COMPONENT ACPI_OS_SERVICES
55MODULE_NAME("SCHEDULE")
55ACPI_MODULE_NAME("SCHEDULE")
56
57/*
58 * This is a little complicated due to the fact that we need to build and then
59 * free a 'struct task' for each task we enqueue.
60 */
61
62MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
63
64static void AcpiOsExecuteQueue(void *arg, int pending);
65
66struct acpi_task {
67 struct task at_task;
68 OSD_EXECUTION_CALLBACK at_function;
69 void *at_context;
70};
71
72struct acpi_task_queue {
73 STAILQ_ENTRY(acpi_task_queue) at_q;
74 struct acpi_task *at;
75};
76
77/*
78 * Private task queue definition for ACPI
79 */
80TASKQUEUE_DECLARE(acpi);
81static void *taskqueue_acpi_ih;
82
83static void
84taskqueue_acpi_enqueue(void *context)
85{
86 swi_sched(taskqueue_acpi_ih, 0);
87}
88
89static void
90taskqueue_acpi_run(void *dummy)
91{
92 taskqueue_run(taskqueue_acpi);
93}
94
95TASKQUEUE_DEFINE(acpi, taskqueue_acpi_enqueue, 0,
96 swi_add(NULL, "acpitaskq", taskqueue_acpi_run, NULL,
97 SWI_TQ, 0, &taskqueue_acpi_ih));
98
99#if defined(ACPI_MAX_THREADS) && ACPI_MAX_THREADS > 0
100#define ACPI_USE_THREADS
101#endif
102
103#ifdef ACPI_USE_THREADS
104STAILQ_HEAD(, acpi_task_queue) acpi_task_queue;
105static struct mtx acpi_task_mtx;
106
107static void
108acpi_task_thread(void *arg)
109{
110 struct acpi_task_queue *atq;
111 OSD_EXECUTION_CALLBACK Function;
112 void *Context;
113
114 for (;;) {
115 mtx_lock(&acpi_task_mtx);
116 if ((atq = STAILQ_FIRST(&acpi_task_queue)) == NULL) {
117 msleep(&acpi_task_queue, &acpi_task_mtx, PCATCH, "actask", 0);
118 mtx_unlock(&acpi_task_mtx);
119 continue;
120 }
121
122 STAILQ_REMOVE_HEAD(&acpi_task_queue, at_q);
123 mtx_unlock(&acpi_task_mtx);
124
125 Function = (OSD_EXECUTION_CALLBACK)atq->at->at_function;
126 Context = atq->at->at_context;
127
128 mtx_lock(&Giant);
129 Function(Context);
130
131 free(atq->at, M_ACPITASK);
132 free(atq, M_ACPITASK);
133 mtx_unlock(&Giant);
134 }
135
136 kthread_exit(0);
137}
138
139int
140acpi_task_thread_init(void)
141{
142 int i, err;
143 struct proc *acpi_kthread_proc;
144
145 err = 0;
146 STAILQ_INIT(&acpi_task_queue);
147 mtx_init(&acpi_task_mtx, "ACPI task", MTX_DEF);
148
149 for (i = 0; i < ACPI_MAX_THREADS; i++) {
150 err = kthread_create(acpi_task_thread, NULL, &acpi_kthread_proc,
151 0, "acpi_task%d", i);
152 if (err != 0) {
153 printf("%s: kthread_create failed(%d)\n", __func__, err);
154 break;
155 }
156 }
157 return (err);
158}
159#endif
160
161ACPI_STATUS
162AcpiOsQueueForExecution(UINT32 Priority, OSD_EXECUTION_CALLBACK Function, void *Context)
163{
164 struct acpi_task *at;
165 int pri;
166
56
57/*
58 * This is a little complicated due to the fact that we need to build and then
59 * free a 'struct task' for each task we enqueue.
60 */
61
62MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
63
64static void AcpiOsExecuteQueue(void *arg, int pending);
65
66struct acpi_task {
67 struct task at_task;
68 OSD_EXECUTION_CALLBACK at_function;
69 void *at_context;
70};
71
72struct acpi_task_queue {
73 STAILQ_ENTRY(acpi_task_queue) at_q;
74 struct acpi_task *at;
75};
76
77/*
78 * Private task queue definition for ACPI
79 */
80TASKQUEUE_DECLARE(acpi);
81static void *taskqueue_acpi_ih;
82
83static void
84taskqueue_acpi_enqueue(void *context)
85{
86 swi_sched(taskqueue_acpi_ih, 0);
87}
88
89static void
90taskqueue_acpi_run(void *dummy)
91{
92 taskqueue_run(taskqueue_acpi);
93}
94
95TASKQUEUE_DEFINE(acpi, taskqueue_acpi_enqueue, 0,
96 swi_add(NULL, "acpitaskq", taskqueue_acpi_run, NULL,
97 SWI_TQ, 0, &taskqueue_acpi_ih));
98
99#if defined(ACPI_MAX_THREADS) && ACPI_MAX_THREADS > 0
100#define ACPI_USE_THREADS
101#endif
102
103#ifdef ACPI_USE_THREADS
104STAILQ_HEAD(, acpi_task_queue) acpi_task_queue;
105static struct mtx acpi_task_mtx;
106
107static void
108acpi_task_thread(void *arg)
109{
110 struct acpi_task_queue *atq;
111 OSD_EXECUTION_CALLBACK Function;
112 void *Context;
113
114 for (;;) {
115 mtx_lock(&acpi_task_mtx);
116 if ((atq = STAILQ_FIRST(&acpi_task_queue)) == NULL) {
117 msleep(&acpi_task_queue, &acpi_task_mtx, PCATCH, "actask", 0);
118 mtx_unlock(&acpi_task_mtx);
119 continue;
120 }
121
122 STAILQ_REMOVE_HEAD(&acpi_task_queue, at_q);
123 mtx_unlock(&acpi_task_mtx);
124
125 Function = (OSD_EXECUTION_CALLBACK)atq->at->at_function;
126 Context = atq->at->at_context;
127
128 mtx_lock(&Giant);
129 Function(Context);
130
131 free(atq->at, M_ACPITASK);
132 free(atq, M_ACPITASK);
133 mtx_unlock(&Giant);
134 }
135
136 kthread_exit(0);
137}
138
139int
140acpi_task_thread_init(void)
141{
142 int i, err;
143 struct proc *acpi_kthread_proc;
144
145 err = 0;
146 STAILQ_INIT(&acpi_task_queue);
147 mtx_init(&acpi_task_mtx, "ACPI task", MTX_DEF);
148
149 for (i = 0; i < ACPI_MAX_THREADS; i++) {
150 err = kthread_create(acpi_task_thread, NULL, &acpi_kthread_proc,
151 0, "acpi_task%d", i);
152 if (err != 0) {
153 printf("%s: kthread_create failed(%d)\n", __func__, err);
154 break;
155 }
156 }
157 return (err);
158}
159#endif
160
161ACPI_STATUS
162AcpiOsQueueForExecution(UINT32 Priority, OSD_EXECUTION_CALLBACK Function, void *Context)
163{
164 struct acpi_task *at;
165 int pri;
166
167 FUNCTION_TRACE(__func__);
167 ACPI_FUNCTION_TRACE(__func__);
168
169 if (Function == NULL)
170 return_ACPI_STATUS(AE_BAD_PARAMETER);
171
172 at = malloc(sizeof(*at), M_ACPITASK, M_NOWAIT); /* Interrupt Context */
173 if (at == NULL)
174 return_ACPI_STATUS(AE_NO_MEMORY);
175 bzero(at, sizeof(*at));
176
177 at->at_function = Function;
178 at->at_context = Context;
179 switch (Priority) {
180 case OSD_PRIORITY_GPE:
181 pri = 4;
182 break;
183 case OSD_PRIORITY_HIGH:
184 pri = 3;
185 break;
186 case OSD_PRIORITY_MED:
187 pri = 2;
188 break;
189 case OSD_PRIORITY_LO:
190 pri = 1;
191 break;
192 default:
193 free(at, M_ACPITASK);
194 return_ACPI_STATUS(AE_BAD_PARAMETER);
195 }
196 TASK_INIT(&at->at_task, pri, AcpiOsExecuteQueue, at);
197
198 taskqueue_enqueue(taskqueue_acpi, (struct task *)at);
199 return_ACPI_STATUS(AE_OK);
200}
201
202static void
203AcpiOsExecuteQueue(void *arg, int pending)
204{
205 struct acpi_task *at;
206 struct acpi_task_queue *atq;
207 OSD_EXECUTION_CALLBACK Function;
208 void *Context;
209
168
169 if (Function == NULL)
170 return_ACPI_STATUS(AE_BAD_PARAMETER);
171
172 at = malloc(sizeof(*at), M_ACPITASK, M_NOWAIT); /* Interrupt Context */
173 if (at == NULL)
174 return_ACPI_STATUS(AE_NO_MEMORY);
175 bzero(at, sizeof(*at));
176
177 at->at_function = Function;
178 at->at_context = Context;
179 switch (Priority) {
180 case OSD_PRIORITY_GPE:
181 pri = 4;
182 break;
183 case OSD_PRIORITY_HIGH:
184 pri = 3;
185 break;
186 case OSD_PRIORITY_MED:
187 pri = 2;
188 break;
189 case OSD_PRIORITY_LO:
190 pri = 1;
191 break;
192 default:
193 free(at, M_ACPITASK);
194 return_ACPI_STATUS(AE_BAD_PARAMETER);
195 }
196 TASK_INIT(&at->at_task, pri, AcpiOsExecuteQueue, at);
197
198 taskqueue_enqueue(taskqueue_acpi, (struct task *)at);
199 return_ACPI_STATUS(AE_OK);
200}
201
202static void
203AcpiOsExecuteQueue(void *arg, int pending)
204{
205 struct acpi_task *at;
206 struct acpi_task_queue *atq;
207 OSD_EXECUTION_CALLBACK Function;
208 void *Context;
209
210 FUNCTION_TRACE(__func__);
210 ACPI_FUNCTION_TRACE(__func__);
211
212 at = (struct acpi_task *)arg;
213 atq = NULL;
214 Function = NULL;
215 Context = NULL;
216
217#ifdef ACPI_USE_THREADS
218 atq = malloc(sizeof(*atq), M_ACPITASK, M_NOWAIT);
219 if (atq == NULL) {
220 printf("%s: no memory\n", __func__);
221 return;
222 }
223
224 atq->at = at;
225
226 mtx_lock(&acpi_task_mtx);
227 STAILQ_INSERT_TAIL(&acpi_task_queue, atq, at_q);
228 mtx_unlock(&acpi_task_mtx);
229 wakeup_one(&acpi_task_queue);
230#else
231 Function = (OSD_EXECUTION_CALLBACK)at->at_function;
232 Context = at->at_context;
233
234 Function(Context);
235 free(at, M_ACPITASK);
236#endif
237
238 return_VOID;
239}
240
241/*
242 * We don't have any sleep granularity better than hz, so
243 * make do with that.
244 */
245void
246AcpiOsSleep (UINT32 Seconds, UINT32 Milliseconds)
247{
248 int timo;
249 static int dummy;
250
211
212 at = (struct acpi_task *)arg;
213 atq = NULL;
214 Function = NULL;
215 Context = NULL;
216
217#ifdef ACPI_USE_THREADS
218 atq = malloc(sizeof(*atq), M_ACPITASK, M_NOWAIT);
219 if (atq == NULL) {
220 printf("%s: no memory\n", __func__);
221 return;
222 }
223
224 atq->at = at;
225
226 mtx_lock(&acpi_task_mtx);
227 STAILQ_INSERT_TAIL(&acpi_task_queue, atq, at_q);
228 mtx_unlock(&acpi_task_mtx);
229 wakeup_one(&acpi_task_queue);
230#else
231 Function = (OSD_EXECUTION_CALLBACK)at->at_function;
232 Context = at->at_context;
233
234 Function(Context);
235 free(at, M_ACPITASK);
236#endif
237
238 return_VOID;
239}
240
241/*
242 * We don't have any sleep granularity better than hz, so
243 * make do with that.
244 */
245void
246AcpiOsSleep (UINT32 Seconds, UINT32 Milliseconds)
247{
248 int timo;
249 static int dummy;
250
251 FUNCTION_TRACE(__func__);
251 ACPI_FUNCTION_TRACE(__func__);
252
253 timo = (Seconds * hz) + Milliseconds * hz / 1000;
254 if (timo == 0)
255 timo = 1;
256 tsleep(&dummy, 0, "acpislp", timo);
257 return_VOID;
258}
259
260void
261AcpiOsStall (UINT32 Microseconds)
262{
252
253 timo = (Seconds * hz) + Milliseconds * hz / 1000;
254 if (timo == 0)
255 timo = 1;
256 tsleep(&dummy, 0, "acpislp", timo);
257 return_VOID;
258}
259
260void
261AcpiOsStall (UINT32 Microseconds)
262{
263 FUNCTION_TRACE(__func__);
263 ACPI_FUNCTION_TRACE(__func__);
264
265 DELAY(Microseconds);
266 return_VOID;
267}
268
269UINT32
270AcpiOsGetThreadId (void)
271{
272 /* XXX do not add FUNCTION_TRACE here, results in recursive call */
273
274 KASSERT(curproc != NULL, (__func__ ": curproc is NULL!"));
275 return(curproc->p_pid + 1); /* can't return 0 */
276}
264
265 DELAY(Microseconds);
266 return_VOID;
267}
268
269UINT32
270AcpiOsGetThreadId (void)
271{
272 /* XXX do not add FUNCTION_TRACE here, results in recursive call */
273
274 KASSERT(curproc != NULL, (__func__ ": curproc is NULL!"));
275 return(curproc->p_pid + 1); /* can't return 0 */
276}