1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/zfs_context.h>
27
28int taskq_now;
29taskq_t *system_taskq;
30
31typedef struct task {
32	struct task	*task_next;
33	struct task	*task_prev;
34	task_func_t	*task_func;
35	void		*task_arg;
36} task_t;
37
38#define	TASKQ_ACTIVE	0x00010000
39
40struct taskq {
41	kmutex_t	tq_lock;
42	krwlock_t	tq_threadlock;
43	kcondvar_t	tq_dispatch_cv;
44	kcondvar_t	tq_wait_cv;
45	thread_t	*tq_threadlist;
46	int		tq_flags;
47	int		tq_active;
48	int		tq_nthreads;
49	int		tq_nalloc;
50	int		tq_minalloc;
51	int		tq_maxalloc;
52	kcondvar_t	tq_maxalloc_cv;
53	int		tq_maxalloc_wait;
54	task_t		*tq_freelist;
55	task_t		tq_task;
56};
57
58static task_t *
59task_alloc(taskq_t *tq, int tqflags)
60{
61	task_t *t;
62	int rv;
63
64again:	if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
65		tq->tq_freelist = t->task_next;
66	} else {
67		if (tq->tq_nalloc >= tq->tq_maxalloc) {
68			if (!(tqflags & KM_SLEEP))
69				return (NULL);
70
71			/*
72			 * We don't want to exceed tq_maxalloc, but we can't
73			 * wait for other tasks to complete (and thus free up
74			 * task structures) without risking deadlock with
75			 * the caller.  So, we just delay for one second
76			 * to throttle the allocation rate. If we have tasks
77			 * complete before one second timeout expires then
78			 * taskq_ent_free will signal us and we will
79			 * immediately retry the allocation.
80			 */
81			tq->tq_maxalloc_wait++;
82			rv = cv_timedwait(&tq->tq_maxalloc_cv,
83			    &tq->tq_lock, ddi_get_lbolt() + hz);
84			tq->tq_maxalloc_wait--;
85			if (rv > 0)
86				goto again;		/* signaled */
87		}
88		mutex_exit(&tq->tq_lock);
89
90		t = kmem_alloc(sizeof (task_t), tqflags & KM_SLEEP);
91
92		mutex_enter(&tq->tq_lock);
93		if (t != NULL)
94			tq->tq_nalloc++;
95	}
96	return (t);
97}
98
99static void
100task_free(taskq_t *tq, task_t *t)
101{
102	if (tq->tq_nalloc <= tq->tq_minalloc) {
103		t->task_next = tq->tq_freelist;
104		tq->tq_freelist = t;
105	} else {
106		tq->tq_nalloc--;
107		mutex_exit(&tq->tq_lock);
108		kmem_free(t, sizeof (task_t));
109		mutex_enter(&tq->tq_lock);
110	}
111
112	if (tq->tq_maxalloc_wait)
113		cv_signal(&tq->tq_maxalloc_cv);
114}
115
116taskqid_t
117taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
118{
119	task_t *t;
120
121	if (taskq_now) {
122		func(arg);
123		return (1);
124	}
125
126	mutex_enter(&tq->tq_lock);
127	ASSERT(tq->tq_flags & TASKQ_ACTIVE);
128	if ((t = task_alloc(tq, tqflags)) == NULL) {
129		mutex_exit(&tq->tq_lock);
130		return (0);
131	}
132	if (tqflags & TQ_FRONT) {
133		t->task_next = tq->tq_task.task_next;
134		t->task_prev = &tq->tq_task;
135	} else {
136		t->task_next = &tq->tq_task;
137		t->task_prev = tq->tq_task.task_prev;
138	}
139	t->task_next->task_prev = t;
140	t->task_prev->task_next = t;
141	t->task_func = func;
142	t->task_arg = arg;
143	cv_signal(&tq->tq_dispatch_cv);
144	mutex_exit(&tq->tq_lock);
145	return (1);
146}
147
148void
149taskq_wait(taskq_t *tq)
150{
151	mutex_enter(&tq->tq_lock);
152	while (tq->tq_task.task_next != &tq->tq_task || tq->tq_active != 0)
153		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
154	mutex_exit(&tq->tq_lock);
155}
156
157static void *
158taskq_thread(void *arg)
159{
160	taskq_t *tq = arg;
161	task_t *t;
162
163	mutex_enter(&tq->tq_lock);
164	while (tq->tq_flags & TASKQ_ACTIVE) {
165		if ((t = tq->tq_task.task_next) == &tq->tq_task) {
166			if (--tq->tq_active == 0)
167				cv_broadcast(&tq->tq_wait_cv);
168			cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
169			tq->tq_active++;
170			continue;
171		}
172		t->task_prev->task_next = t->task_next;
173		t->task_next->task_prev = t->task_prev;
174		mutex_exit(&tq->tq_lock);
175
176		rw_enter(&tq->tq_threadlock, RW_READER);
177		t->task_func(t->task_arg);
178		rw_exit(&tq->tq_threadlock);
179
180		mutex_enter(&tq->tq_lock);
181		task_free(tq, t);
182	}
183	tq->tq_nthreads--;
184	cv_broadcast(&tq->tq_wait_cv);
185	mutex_exit(&tq->tq_lock);
186	return (NULL);
187}
188
189/*ARGSUSED*/
190taskq_t *
191taskq_create(const char *name, int nthreads, pri_t pri,
192	int minalloc, int maxalloc, uint_t flags)
193{
194	taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
195	int t;
196
197	if (flags & TASKQ_THREADS_CPU_PCT) {
198		int pct;
199		ASSERT3S(nthreads, >=, 0);
200		ASSERT3S(nthreads, <=, 100);
201		pct = MIN(nthreads, 100);
202		pct = MAX(pct, 0);
203
204		nthreads = (sysconf(_SC_NPROCESSORS_ONLN) * pct) / 100;
205		nthreads = MAX(nthreads, 1);	/* need at least 1 thread */
206	} else {
207		ASSERT3S(nthreads, >=, 1);
208	}
209
210	rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
211	mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
212	cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
213	cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
214	cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
215	tq->tq_flags = flags | TASKQ_ACTIVE;
216	tq->tq_active = nthreads;
217	tq->tq_nthreads = nthreads;
218	tq->tq_minalloc = minalloc;
219	tq->tq_maxalloc = maxalloc;
220	tq->tq_task.task_next = &tq->tq_task;
221	tq->tq_task.task_prev = &tq->tq_task;
222	tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
223
224	if (flags & TASKQ_PREPOPULATE) {
225		mutex_enter(&tq->tq_lock);
226		while (minalloc-- > 0)
227			task_free(tq, task_alloc(tq, KM_SLEEP));
228		mutex_exit(&tq->tq_lock);
229	}
230
231	for (t = 0; t < nthreads; t++)
232		(void) thr_create(0, 0, taskq_thread,
233		    tq, THR_BOUND, &tq->tq_threadlist[t]);
234
235	return (tq);
236}
237
238void
239taskq_destroy(taskq_t *tq)
240{
241	int t;
242	int nthreads = tq->tq_nthreads;
243
244	taskq_wait(tq);
245
246	mutex_enter(&tq->tq_lock);
247
248	tq->tq_flags &= ~TASKQ_ACTIVE;
249	cv_broadcast(&tq->tq_dispatch_cv);
250
251	while (tq->tq_nthreads != 0)
252		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
253
254	tq->tq_minalloc = 0;
255	while (tq->tq_nalloc != 0) {
256		ASSERT(tq->tq_freelist != NULL);
257		task_free(tq, task_alloc(tq, KM_SLEEP));
258	}
259
260	mutex_exit(&tq->tq_lock);
261
262	for (t = 0; t < nthreads; t++)
263		(void) thr_join(tq->tq_threadlist[t], NULL, NULL);
264
265	kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
266
267	rw_destroy(&tq->tq_threadlock);
268	mutex_destroy(&tq->tq_lock);
269	cv_destroy(&tq->tq_dispatch_cv);
270	cv_destroy(&tq->tq_wait_cv);
271	cv_destroy(&tq->tq_maxalloc_cv);
272
273	kmem_free(tq, sizeof (taskq_t));
274}
275
276int
277taskq_member(taskq_t *tq, void *t)
278{
279	int i;
280
281	if (taskq_now)
282		return (1);
283
284	for (i = 0; i < tq->tq_nthreads; i++)
285		if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
286			return (1);
287
288	return (0);
289}
290
291void
292system_taskq_init(void)
293{
294	system_taskq = taskq_create("system_taskq", 64, minclsyspri, 4, 512,
295	    TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
296}
297
298void
299system_taskq_fini(void)
300{
301	taskq_destroy(system_taskq);
302	system_taskq = NULL; /* defensive */
303}
304