subr_taskqueue.c revision 116182
1314564Sdim/*- 2314564Sdim * Copyright (c) 2000 Doug Rabson 3258882Semaste * All rights reserved. 4258882Semaste * 5258882Semaste * Redistribution and use in source and binary forms, with or without 6258882Semaste * modification, are permitted provided that the following conditions 7258882Semaste * are met: 8258882Semaste * 1. Redistributions of source code must retain the above copyright 9258882Semaste * notice, this list of conditions and the following disclaimer. 10258882Semaste * 2. Redistributions in binary form must reproduce the above copyright 11258882Semaste * notice, this list of conditions and the following disclaimer in the 12258882Semaste * documentation and/or other materials provided with the distribution. 13258882Semaste * 14258882Semaste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15258882Semaste * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16258882Semaste * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17258882Semaste * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18258882Semaste * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19258882Semaste * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20258882Semaste * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21258882Semaste * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22314564Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23258882Semaste * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24258882Semaste * SUCH DAMAGE. 25314564Sdim */ 26258882Semaste 27258882Semaste#include <sys/cdefs.h> 28258882Semaste__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 116182 2003-06-11 00:56:59Z obrien $"); 29258882Semaste 30258882Semaste#include <sys/param.h> 31314564Sdim#include <sys/systm.h> 32258882Semaste#include <sys/bus.h> 33258882Semaste#include <sys/interrupt.h> 34258882Semaste#include <sys/kernel.h> 35258882Semaste#include <sys/lock.h> 36258882Semaste#include <sys/malloc.h> 37258882Semaste#include <sys/mutex.h> 38314564Sdim#include <sys/taskqueue.h> 39314564Sdim 40314564Sdimstatic MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 41314564Sdim 42314564Sdimstatic STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 43314564Sdim 44314564Sdimstatic void *taskqueue_ih; 45314564Sdimstatic void *taskqueue_giant_ih; 46314564Sdimstatic struct mtx taskqueue_queues_mutex; 47314564Sdim 48314564Sdimstruct taskqueue { 49314564Sdim STAILQ_ENTRY(taskqueue) tq_link; 50314564Sdim STAILQ_HEAD(, task) tq_queue; 51314564Sdim const char *tq_name; 52314564Sdim taskqueue_enqueue_fn tq_enqueue; 53314564Sdim void *tq_context; 54314564Sdim int tq_draining; 55314564Sdim struct mtx tq_mutex; 56314564Sdim}; 57314564Sdim 58314564Sdimstatic void init_taskqueue_list(void *data); 59314564Sdim 60314564Sdimstatic void 61258882Semasteinit_taskqueue_list(void *data __unused) 62258882Semaste{ 63314564Sdim 64314564Sdim mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF); 65314564Sdim STAILQ_INIT(&taskqueue_queues); 66314564Sdim} 67258882SemasteSYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list, 68258882Semaste NULL); 69314564Sdim 70258882Semastestruct taskqueue * 71314564Sdimtaskqueue_create(const char *name, int mflags, 72258882Semaste taskqueue_enqueue_fn enqueue, void *context) 73258882Semaste{ 74314564Sdim struct taskqueue *queue; 75314564Sdim 76314564Sdim queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 77314564Sdim if (!queue) 78258882Semaste return 0; 79258882Semaste 80314564Sdim STAILQ_INIT(&queue->tq_queue); 81258882Semaste queue->tq_name = name; 82258882Semaste queue->tq_enqueue = enqueue; 83314564Sdim queue->tq_context = context; 84314564Sdim queue->tq_draining = 0; 85314564Sdim mtx_init(&queue->tq_mutex, "taskqueue", NULL, MTX_DEF); 86314564Sdim 87258882Semaste mtx_lock(&taskqueue_queues_mutex); 88258882Semaste STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 89314564Sdim mtx_unlock(&taskqueue_queues_mutex); 90314564Sdim 91314564Sdim return queue; 92314564Sdim} 93258882Semaste 94314564Sdimvoid 95314564Sdimtaskqueue_free(struct taskqueue *queue) 96314564Sdim{ 97314564Sdim 98314564Sdim mtx_lock(&queue->tq_mutex); 99314564Sdim KASSERT(queue->tq_draining == 0, ("free'ing a draining taskqueue")); 100258882Semaste queue->tq_draining = 1; 101258882Semaste mtx_unlock(&queue->tq_mutex); 102314564Sdim 103314564Sdim taskqueue_run(queue); 104314564Sdim 105314564Sdim mtx_lock(&taskqueue_queues_mutex); 106258882Semaste STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); 107258882Semaste mtx_unlock(&taskqueue_queues_mutex); 108314564Sdim 109314564Sdim mtx_destroy(&queue->tq_mutex); 110314564Sdim free(queue, M_TASKQUEUE); 111258882Semaste} 112258882Semaste 113314564Sdim/* 114314564Sdim * Returns with the taskqueue locked. 115314564Sdim */ 116258882Semastestruct taskqueue * 117258882Semastetaskqueue_find(const char *name) 118314564Sdim{ 119314564Sdim struct taskqueue *queue; 120314564Sdim 121314564Sdim mtx_lock(&taskqueue_queues_mutex); 122314564Sdim STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { 123258882Semaste mtx_lock(&queue->tq_mutex); 124 if (!strcmp(queue->tq_name, name)) { 125 mtx_unlock(&taskqueue_queues_mutex); 126 return queue; 127 } 128 mtx_unlock(&queue->tq_mutex); 129 } 130 mtx_unlock(&taskqueue_queues_mutex); 131 return 0; 132} 133 134int 135taskqueue_enqueue(struct taskqueue *queue, struct task *task) 136{ 137 struct task *ins; 138 struct task *prev; 139 140 mtx_lock(&queue->tq_mutex); 141 142 /* 143 * Don't allow new tasks on a queue which is being freed. 144 */ 145 if (queue->tq_draining) { 146 mtx_unlock(&queue->tq_mutex); 147 return EPIPE; 148 } 149 150 /* 151 * Count multiple enqueues. 152 */ 153 if (task->ta_pending) { 154 task->ta_pending++; 155 mtx_unlock(&queue->tq_mutex); 156 return 0; 157 } 158 159 /* 160 * Optimise the case when all tasks have the same priority. 161 */ 162 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 163 if (!prev || prev->ta_priority >= task->ta_priority) { 164 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 165 } else { 166 prev = 0; 167 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 168 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 169 if (ins->ta_priority < task->ta_priority) 170 break; 171 172 if (prev) 173 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 174 else 175 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 176 } 177 178 task->ta_pending = 1; 179 if (queue->tq_enqueue) 180 queue->tq_enqueue(queue->tq_context); 181 182 mtx_unlock(&queue->tq_mutex); 183 184 return 0; 185} 186 187void 188taskqueue_run(struct taskqueue *queue) 189{ 190 struct task *task; 191 int pending; 192 193 mtx_lock(&queue->tq_mutex); 194 while (STAILQ_FIRST(&queue->tq_queue)) { 195 /* 196 * Carefully remove the first task from the queue and 197 * zero its pending count. 198 */ 199 task = STAILQ_FIRST(&queue->tq_queue); 200 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 201 pending = task->ta_pending; 202 task->ta_pending = 0; 203 mtx_unlock(&queue->tq_mutex); 204 205 task->ta_func(task->ta_context, pending); 206 207 mtx_lock(&queue->tq_mutex); 208 } 209 mtx_unlock(&queue->tq_mutex); 210} 211 212static void 213taskqueue_swi_enqueue(void *context) 214{ 215 swi_sched(taskqueue_ih, 0); 216} 217 218static void 219taskqueue_swi_run(void *dummy) 220{ 221 taskqueue_run(taskqueue_swi); 222} 223 224static void 225taskqueue_swi_giant_enqueue(void *context) 226{ 227 swi_sched(taskqueue_giant_ih, 0); 228} 229 230static void 231taskqueue_swi_giant_run(void *dummy) 232{ 233 taskqueue_run(taskqueue_swi_giant); 234} 235 236TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0, 237 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 238 INTR_MPSAFE, &taskqueue_ih)); 239 240TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0, 241 swi_add(NULL, "Giant task queue", taskqueue_swi_giant_run, 242 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 243