subr_taskqueue.c revision 85521
1/*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/kern/subr_taskqueue.c 85521 2001-10-26 06:32:21Z jhb $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/bus.h> 32#include <sys/kernel.h> 33#include <sys/lock.h> 34#include <sys/interrupt.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/taskqueue.h> 38 39static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 40 41static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 42 43static void *taskqueue_ih; 44static struct mtx taskqueue_queues_mutex; 45 46struct taskqueue { 47 STAILQ_ENTRY(taskqueue) tq_link; 48 STAILQ_HEAD(, task) tq_queue; 49 const char *tq_name; 50 taskqueue_enqueue_fn tq_enqueue; 51 void *tq_context; 52 int tq_draining; 53 struct mtx tq_mutex; 54}; 55 56static void init_taskqueue_list(void *data); 57 58static void 59init_taskqueue_list(void *data __unused) 60{ 61 62 mtx_init(&taskqueue_queues_mutex, "taskqueue list", MTX_DEF); 63 STAILQ_INIT(&taskqueue_queues); 64} 65SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list, 66 NULL); 67 68void 69task_init(struct task *task, int priority, task_fn_t *func, void *context) 70{ 71 72 KASSERT(task != NULL, ("task == NULL")); 73 74 mtx_init(&task->ta_mutex, "task", MTX_DEF); 75 mtx_lock(&task->ta_mutex); 76 task->ta_pending = 0; 77 task->ta_priority = priority; 78 task->ta_func = func; 79 task->ta_context = context; 80 mtx_unlock(&task->ta_mutex); 81} 82 83void 84task_destroy(struct task *task) 85{ 86 87 mtx_destroy(&task->ta_mutex); 88} 89 90struct taskqueue * 91taskqueue_create(const char *name, int mflags, 92 taskqueue_enqueue_fn enqueue, void *context) 93{ 94 struct taskqueue *queue; 95 96 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 97 if (!queue) 98 return 0; 99 100 STAILQ_INIT(&queue->tq_queue); 101 queue->tq_name = name; 102 queue->tq_enqueue = enqueue; 103 queue->tq_context = context; 104 queue->tq_draining = 0; 105 mtx_init(&queue->tq_mutex, "taskqueue", MTX_DEF); 106 107 mtx_lock(&taskqueue_queues_mutex); 108 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 109 mtx_unlock(&taskqueue_queues_mutex); 110 111 return queue; 112} 113 114void 115taskqueue_free(struct taskqueue *queue) 116{ 117 118 mtx_lock(&queue->tq_mutex); 119 queue->tq_draining = 1; 120 mtx_unlock(&queue->tq_mutex); 121 122 taskqueue_run(queue); 123 124 mtx_lock(&taskqueue_queues_mutex); 125 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); 126 mtx_unlock(&taskqueue_queues_mutex); 127 128 mtx_destroy(&queue->tq_mutex); 129 free(queue, M_TASKQUEUE); 130} 131 132/* 133 * Returns with the taskqueue locked. 134 */ 135struct taskqueue * 136taskqueue_find(const char *name) 137{ 138 struct taskqueue *queue; 139 140 mtx_lock(&taskqueue_queues_mutex); 141 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { 142 mtx_lock(&queue->tq_mutex); 143 if (!strcmp(queue->tq_name, name)) { 144 mtx_unlock(&taskqueue_queues_mutex); 145 return queue; 146 } 147 mtx_unlock(&queue->tq_mutex); 148 } 149 mtx_unlock(&taskqueue_queues_mutex); 150 return 0; 151} 152 153int 154taskqueue_enqueue(struct taskqueue *queue, struct task *task) 155{ 156 struct task *ins; 157 struct task *prev; 158 159 /* 160 * Don't allow new tasks on a queue which is being freed. 161 */ 162 mtx_lock(&queue->tq_mutex); 163 if (queue->tq_draining) { 164 mtx_unlock(&queue->tq_mutex); 165 return EPIPE; 166 } 167 168 /* 169 * Count multiple enqueues. 170 */ 171 mtx_lock(&task->ta_mutex); 172 if (task->ta_pending) { 173 task->ta_pending++; 174 mtx_unlock(&task->ta_mutex); 175 mtx_unlock(&queue->tq_mutex); 176 return 0; 177 } 178 179 /* 180 * Optimise the case when all tasks have the same priority. 181 */ 182 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 183 if (!prev || prev->ta_priority >= task->ta_priority) { 184 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 185 } else { 186 prev = 0; 187 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 188 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 189 if (ins->ta_priority < task->ta_priority) 190 break; 191 192 if (prev) 193 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 194 else 195 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 196 } 197 198 task->ta_pending = 1; 199 mtx_unlock(&task->ta_mutex); 200 201 if (queue->tq_enqueue) 202 queue->tq_enqueue(queue->tq_context); 203 mtx_unlock(&queue->tq_mutex); 204 return 0; 205} 206 207void 208taskqueue_run(struct taskqueue *queue) 209{ 210 struct task *task; 211 task_fn_t *saved_func; 212 void *arg; 213 int pending; 214 215 mtx_lock(&queue->tq_mutex); 216 while (STAILQ_FIRST(&queue->tq_queue)) { 217 /* 218 * Carefully remove the first task from the queue and 219 * zero its pending count. 220 */ 221 task = STAILQ_FIRST(&queue->tq_queue); 222 mtx_lock(&task->ta_mutex); 223 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 224 mtx_unlock(&queue->tq_mutex); 225 pending = task->ta_pending; 226 task->ta_pending = 0; 227 saved_func = task->ta_func; 228 arg = task->ta_context; 229 mtx_unlock(&task->ta_mutex); 230 231 saved_func(arg, pending); 232 233 mtx_lock(&queue->tq_mutex); 234 } 235 mtx_unlock(&queue->tq_mutex); 236} 237 238static void 239taskqueue_swi_enqueue(void *context) 240{ 241 swi_sched(taskqueue_ih, SWI_NOSWITCH); 242} 243 244static void 245taskqueue_swi_run(void *dummy) 246{ 247 taskqueue_run(taskqueue_swi); 248} 249 250TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0, 251 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 0, 252 &taskqueue_ih)); 253