1/*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 11 unchanged lines hidden (view full) --- 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> |
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 180588 2008-07-18 07:10:33Z kmacy $"); |
29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> |
35#include <sys/kthread.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/mutex.h> 39#include <sys/proc.h> 40#include <sys/sched.h> 41#include <sys/taskqueue.h> 42#include <sys/unistd.h> 43#include <machine/stdarg.h> 44 45static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46static void *taskqueue_giant_ih; 47static void *taskqueue_ih; 48static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 49static struct mtx taskqueue_queues_mutex; 50 |
51struct taskqueue { 52 STAILQ_ENTRY(taskqueue) tq_link; 53 STAILQ_HEAD(, task) tq_queue; 54 const char *tq_name; 55 taskqueue_enqueue_fn tq_enqueue; 56 void *tq_context; 57 struct task *tq_running; 58 struct mtx tq_mutex; 59 struct thread **tq_threads; 60 int tq_tcount; |
61 int tq_spin; |
62 int tq_flags; 63}; 64 65#define TQ_FLAGS_ACTIVE (1 << 0) 66#define TQ_FLAGS_BLOCKED (1 << 1) 67#define TQ_FLAGS_PENDING (1 << 2) |
68 |
69static __inline void 70TQ_LOCK(struct taskqueue *tq) 71{ 72 if (tq->tq_spin) 73 mtx_lock_spin(&tq->tq_mutex); 74 else 75 mtx_lock(&tq->tq_mutex); 76} |
77 |
78static __inline void 79TQ_UNLOCK(struct taskqueue *tq) 80{ 81 if (tq->tq_spin) 82 mtx_unlock_spin(&tq->tq_mutex); 83 else 84 mtx_unlock(&tq->tq_mutex); 85} |
86 |
87static void init_taskqueue_list(void *data); 88 89static __inline int 90TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 91 int t) 92{ |
93 if (tq->tq_spin) |
94 return (msleep_spin(p, m, wm, t)); 95 return (msleep(p, m, pri, wm, t)); 96} 97 98static void 99init_taskqueue_list(void *data __unused) 100{ 101 --- 4 unchanged lines hidden (view full) --- 106 NULL); 107 108static struct taskqueue * 109_taskqueue_create(const char *name, int mflags, 110 taskqueue_enqueue_fn enqueue, void *context, 111 int mtxflags, const char *mtxname) 112{ 113 struct taskqueue *queue; |
114 |
115 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 116 if (!queue) 117 return 0; |
118 |
119 STAILQ_INIT(&queue->tq_queue); 120 queue->tq_name = name; 121 queue->tq_enqueue = enqueue; 122 queue->tq_context = context; |
123 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 124 queue->tq_flags |= TQ_FLAGS_ACTIVE; |
125 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 126 127 mtx_lock(&taskqueue_queues_mutex); 128 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 129 mtx_unlock(&taskqueue_queues_mutex); 130 131 return queue; 132} --- 62 unchanged lines hidden (view full) --- 195 struct task *ins; 196 struct task *prev; 197 198 TQ_LOCK(queue); 199 200 /* 201 * Count multiple enqueues. 202 */ |
203 if (task->ta_pending) { |
204 task->ta_pending++; |
205 TQ_UNLOCK(queue); 206 return 0; 207 } 208 209 /* 210 * Optimise the case when all tasks have the same priority. 211 */ 212 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); --- 8 unchanged lines hidden (view full) --- 221 222 if (prev) 223 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 224 else 225 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 226 } 227 228 task->ta_pending = 1; |
229 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) |
230 queue->tq_enqueue(queue->tq_context); |
231 else |
232 queue->tq_flags |= TQ_FLAGS_PENDING; 233 234 TQ_UNLOCK(queue); 235 236 return 0; 237} 238 239void --- 52 unchanged lines hidden (view full) --- 292 */ 293 if (!owned) 294 TQ_UNLOCK(queue); 295} 296 297void 298taskqueue_drain(struct taskqueue *queue, struct task *task) 299{ |
300 if (queue->tq_spin) { /* XXX */ |
301 mtx_lock_spin(&queue->tq_mutex); 302 while (task->ta_pending != 0 || task == queue->tq_running) 303 msleep_spin(task, &queue->tq_mutex, "-", 0); 304 mtx_unlock_spin(&queue->tq_mutex); 305 } else { 306 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 307 308 mtx_lock(&queue->tq_mutex); --- 151 unchanged lines hidden (view full) --- 460taskqueue_fast_run(void *dummy) 461{ 462 taskqueue_run(taskqueue_fast); 463} 464 465TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, 0, 466 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 467 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); |