kern_alq.c revision 154903
1/*-
2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/kern_alq.c 154903 2006-01-27 11:25:06Z pjd $");
29
30#include "opt_mac.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/lock.h>
37#include <sys/mac.h>
38#include <sys/mutex.h>
39#include <sys/namei.h>
40#include <sys/proc.h>
41#include <sys/vnode.h>
42#include <sys/alq.h>
43#include <sys/malloc.h>
44#include <sys/unistd.h>
45#include <sys/fcntl.h>
46#include <sys/eventhandler.h>
47
48/* Async. Logging Queue */
49struct alq {
50	int	aq_entmax;		/* Max entries */
51	int	aq_entlen;		/* Entry length */
52	char	*aq_entbuf;		/* Buffer for stored entries */
53	int	aq_flags;		/* Queue flags */
54	struct mtx	aq_mtx;		/* Queue lock */
55	struct vnode	*aq_vp;		/* Open vnode handle */
56	struct ucred	*aq_cred;	/* Credentials of the opening thread */
57	struct ale	*aq_first;	/* First ent */
58	struct ale	*aq_entfree;	/* First free ent */
59	struct ale	*aq_entvalid;	/* First ent valid for writing */
60	LIST_ENTRY(alq)	aq_act;		/* List of active queues */
61	LIST_ENTRY(alq)	aq_link;	/* List of all queues */
62};
63
64#define	AQ_WANTED	0x0001		/* Wakeup sleeper when io is done */
65#define	AQ_ACTIVE	0x0002		/* on the active list */
66#define	AQ_FLUSHING	0x0004		/* doing IO */
67#define	AQ_SHUTDOWN	0x0008		/* Queue no longer valid */
68
69#define	ALQ_LOCK(alq)	mtx_lock_spin(&(alq)->aq_mtx)
70#define	ALQ_UNLOCK(alq)	mtx_unlock_spin(&(alq)->aq_mtx)
71
72static MALLOC_DEFINE(M_ALD, "ALD", "ALD");
73
74/*
75 * The ald_mtx protects the ald_queues list and the ald_active list.
76 */
77static struct mtx ald_mtx;
78static LIST_HEAD(, alq) ald_queues;
79static LIST_HEAD(, alq) ald_active;
80static int ald_shutingdown = 0;
81struct thread *ald_thread;
82static struct proc *ald_proc;
83
84#define	ALD_LOCK()	mtx_lock(&ald_mtx)
85#define	ALD_UNLOCK()	mtx_unlock(&ald_mtx)
86
87/* Daemon functions */
88static int ald_add(struct alq *);
89static int ald_rem(struct alq *);
90static void ald_startup(void *);
91static void ald_daemon(void);
92static void ald_shutdown(void *, int);
93static void ald_activate(struct alq *);
94static void ald_deactivate(struct alq *);
95
96/* Internal queue functions */
97static void alq_shutdown(struct alq *);
98static int alq_doio(struct alq *);
99
100
101/*
102 * Add a new queue to the global list.  Fail if we're shutting down.
103 */
104static int
105ald_add(struct alq *alq)
106{
107	int error;
108
109	error = 0;
110
111	ALD_LOCK();
112	if (ald_shutingdown) {
113		error = EBUSY;
114		goto done;
115	}
116	LIST_INSERT_HEAD(&ald_queues, alq, aq_link);
117done:
118	ALD_UNLOCK();
119	return (error);
120}
121
122/*
123 * Remove a queue from the global list unless we're shutting down.  If so,
124 * the ald will take care of cleaning up it's resources.
125 */
126static int
127ald_rem(struct alq *alq)
128{
129	int error;
130
131	error = 0;
132
133	ALD_LOCK();
134	if (ald_shutingdown) {
135		error = EBUSY;
136		goto done;
137	}
138	LIST_REMOVE(alq, aq_link);
139done:
140	ALD_UNLOCK();
141	return (error);
142}
143
144/*
145 * Put a queue on the active list.  This will schedule it for writing.
146 */
147static void
148ald_activate(struct alq *alq)
149{
150	LIST_INSERT_HEAD(&ald_active, alq, aq_act);
151	wakeup(&ald_active);
152}
153
154static void
155ald_deactivate(struct alq *alq)
156{
157	LIST_REMOVE(alq, aq_act);
158	alq->aq_flags &= ~AQ_ACTIVE;
159}
160
161static void
162ald_startup(void *unused)
163{
164	mtx_init(&ald_mtx, "ALDmtx", NULL, MTX_DEF|MTX_QUIET);
165	LIST_INIT(&ald_queues);
166	LIST_INIT(&ald_active);
167}
168
169static void
170ald_daemon(void)
171{
172	int needwakeup;
173	struct alq *alq;
174
175	mtx_lock(&Giant);
176
177	ald_thread = FIRST_THREAD_IN_PROC(ald_proc);
178
179	EVENTHANDLER_REGISTER(shutdown_pre_sync, ald_shutdown, NULL,
180	    SHUTDOWN_PRI_FIRST);
181
182	ALD_LOCK();
183
184	for (;;) {
185		while ((alq = LIST_FIRST(&ald_active)) == NULL)
186			msleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);
187
188		ALQ_LOCK(alq);
189		ald_deactivate(alq);
190		ALD_UNLOCK();
191		needwakeup = alq_doio(alq);
192		ALQ_UNLOCK(alq);
193		if (needwakeup)
194			wakeup(alq);
195		ALD_LOCK();
196	}
197}
198
199static void
200ald_shutdown(void *arg, int howto)
201{
202	struct alq *alq;
203
204	ALD_LOCK();
205	ald_shutingdown = 1;
206
207	while ((alq = LIST_FIRST(&ald_queues)) != NULL) {
208		LIST_REMOVE(alq, aq_link);
209		ALD_UNLOCK();
210		alq_shutdown(alq);
211		ALD_LOCK();
212	}
213	ALD_UNLOCK();
214}
215
216static void
217alq_shutdown(struct alq *alq)
218{
219	ALQ_LOCK(alq);
220
221	/* Stop any new writers. */
222	alq->aq_flags |= AQ_SHUTDOWN;
223
224	/* Drain IO */
225	while (alq->aq_flags & (AQ_FLUSHING|AQ_ACTIVE)) {
226		alq->aq_flags |= AQ_WANTED;
227		ALQ_UNLOCK(alq);
228		tsleep(alq, PWAIT, "aldclose", 0);
229		ALQ_LOCK(alq);
230	}
231	ALQ_UNLOCK(alq);
232
233	vn_close(alq->aq_vp, FWRITE, alq->aq_cred,
234	    curthread);
235	crfree(alq->aq_cred);
236}
237
238/*
239 * Flush all pending data to disk.  This operation will block.
240 */
241static int
242alq_doio(struct alq *alq)
243{
244	struct thread *td;
245	struct mount *mp;
246	struct vnode *vp;
247	struct uio auio;
248	struct iovec aiov[2];
249	struct ale *ale;
250	struct ale *alstart;
251	int totlen;
252	int iov;
253
254	vp = alq->aq_vp;
255	td = curthread;
256	totlen = 0;
257	iov = 0;
258
259	alstart = ale = alq->aq_entvalid;
260	alq->aq_entvalid = NULL;
261
262	bzero(&aiov, sizeof(aiov));
263	bzero(&auio, sizeof(auio));
264
265	do {
266		if (aiov[iov].iov_base == NULL)
267			aiov[iov].iov_base = ale->ae_data;
268		aiov[iov].iov_len += alq->aq_entlen;
269		totlen += alq->aq_entlen;
270		/* Check to see if we're wrapping the buffer */
271		if (ale->ae_data + alq->aq_entlen != ale->ae_next->ae_data)
272			iov++;
273		ale->ae_flags &= ~AE_VALID;
274		ale = ale->ae_next;
275	} while (ale->ae_flags & AE_VALID);
276
277	alq->aq_flags |= AQ_FLUSHING;
278	ALQ_UNLOCK(alq);
279
280	if (iov == 2 || aiov[iov].iov_base == NULL)
281		iov--;
282
283	auio.uio_iov = &aiov[0];
284	auio.uio_offset = 0;
285	auio.uio_segflg = UIO_SYSSPACE;
286	auio.uio_rw = UIO_WRITE;
287	auio.uio_iovcnt = iov + 1;
288	auio.uio_resid = totlen;
289	auio.uio_td = td;
290
291	/*
292	 * Do all of the junk required to write now.
293	 */
294	vn_start_write(vp, &mp, V_WAIT);
295	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
296	VOP_LEASE(vp, td, alq->aq_cred, LEASE_WRITE);
297	/*
298	 * XXX: VOP_WRITE error checks are ignored.
299	 */
300#ifdef MAC
301	if (mac_check_vnode_write(alq->aq_cred, NOCRED, vp) == 0)
302#endif
303		VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, alq->aq_cred);
304	VOP_UNLOCK(vp, 0, td);
305	vn_finished_write(mp);
306
307	ALQ_LOCK(alq);
308	alq->aq_flags &= ~AQ_FLUSHING;
309
310	if (alq->aq_entfree == NULL)
311		alq->aq_entfree = alstart;
312
313	if (alq->aq_flags & AQ_WANTED) {
314		alq->aq_flags &= ~AQ_WANTED;
315		return (1);
316	}
317
318	return(0);
319}
320
321static struct kproc_desc ald_kp = {
322        "ALQ Daemon",
323        ald_daemon,
324        &ald_proc
325};
326
327SYSINIT(aldthread, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, kproc_start, &ald_kp)
328SYSINIT(ald, SI_SUB_LOCK, SI_ORDER_ANY, ald_startup, NULL)
329
330
331/* User visible queue functions */
332
333/*
334 * Create the queue data structure, allocate the buffer, and open the file.
335 */
336int
337alq_open(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
338    int size, int count)
339{
340	struct thread *td;
341	struct nameidata nd;
342	struct ale *ale;
343	struct ale *alp;
344	struct alq *alq;
345	char *bufp;
346	int flags;
347	int error;
348	int i;
349
350	*alqp = NULL;
351	td = curthread;
352
353	NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, file, td);
354	flags = FWRITE | O_NOFOLLOW | O_CREAT;
355
356	error = vn_open_cred(&nd, &flags, cmode, cred, -1);
357	if (error)
358		return (error);
359
360	NDFREE(&nd, NDF_ONLY_PNBUF);
361	/* We just unlock so we hold a reference */
362	VOP_UNLOCK(nd.ni_vp, 0, td);
363
364	alq = malloc(sizeof(*alq), M_ALD, M_WAITOK|M_ZERO);
365	alq->aq_entbuf = malloc(count * size, M_ALD, M_WAITOK|M_ZERO);
366	alq->aq_first = malloc(sizeof(*ale) * count, M_ALD, M_WAITOK|M_ZERO);
367	alq->aq_vp = nd.ni_vp;
368	alq->aq_cred = crhold(cred);
369	alq->aq_entmax = count;
370	alq->aq_entlen = size;
371	alq->aq_entfree = alq->aq_first;
372
373	mtx_init(&alq->aq_mtx, "ALD Queue", NULL, MTX_SPIN|MTX_QUIET);
374
375	bufp = alq->aq_entbuf;
376	ale = alq->aq_first;
377	alp = NULL;
378
379	/* Match up entries with buffers */
380	for (i = 0; i < count; i++) {
381		if (alp)
382			alp->ae_next = ale;
383		ale->ae_data = bufp;
384		alp = ale;
385		ale++;
386		bufp += size;
387	}
388
389	alp->ae_next = alq->aq_first;
390
391	if ((error = ald_add(alq)) != 0)
392		return (error);
393	*alqp = alq;
394
395	return (0);
396}
397
398/*
399 * Copy a new entry into the queue.  If the operation would block either
400 * wait or return an error depending on the value of waitok.
401 */
402int
403alq_write(struct alq *alq, void *data, int waitok)
404{
405	struct ale *ale;
406
407	if ((ale = alq_get(alq, waitok)) == NULL)
408		return (EWOULDBLOCK);
409
410	bcopy(data, ale->ae_data, alq->aq_entlen);
411	alq_post(alq, ale);
412
413	return (0);
414}
415
416struct ale *
417alq_get(struct alq *alq, int waitok)
418{
419	struct ale *ale;
420	struct ale *aln;
421
422	ale = NULL;
423
424	ALQ_LOCK(alq);
425
426	/* Loop until we get an entry or we're shutting down */
427	while ((alq->aq_flags & AQ_SHUTDOWN) == 0 &&
428	    (ale = alq->aq_entfree) == NULL &&
429	    (waitok & ALQ_WAITOK)) {
430		alq->aq_flags |= AQ_WANTED;
431		ALQ_UNLOCK(alq);
432		tsleep(alq, PWAIT, "alqget", 0);
433		ALQ_LOCK(alq);
434	}
435
436	if (ale != NULL) {
437		aln = ale->ae_next;
438		if ((aln->ae_flags & AE_VALID) == 0)
439			alq->aq_entfree = aln;
440		else
441			alq->aq_entfree = NULL;
442	} else
443		ALQ_UNLOCK(alq);
444
445
446	return (ale);
447}
448
449void
450alq_post(struct alq *alq, struct ale *ale)
451{
452	int activate;
453
454	ale->ae_flags |= AE_VALID;
455
456	if (alq->aq_entvalid == NULL)
457		alq->aq_entvalid = ale;
458
459	if ((alq->aq_flags & AQ_ACTIVE) == 0) {
460		alq->aq_flags |= AQ_ACTIVE;
461		activate = 1;
462	} else
463		activate = 0;
464
465	ALQ_UNLOCK(alq);
466	if (activate) {
467		ALD_LOCK();
468		ald_activate(alq);
469		ALD_UNLOCK();
470	}
471}
472
473void
474alq_flush(struct alq *alq)
475{
476	int needwakeup = 0;
477
478	ALD_LOCK();
479	ALQ_LOCK(alq);
480	if (alq->aq_flags & AQ_ACTIVE) {
481		ald_deactivate(alq);
482		ALD_UNLOCK();
483		needwakeup = alq_doio(alq);
484	} else
485		ALD_UNLOCK();
486	ALQ_UNLOCK(alq);
487
488	if (needwakeup)
489		wakeup(alq);
490}
491
492/*
493 * Flush remaining data, close the file and free all resources.
494 */
495void
496alq_close(struct alq *alq)
497{
498	/*
499	 * If we're already shuting down someone else will flush and close
500	 * the vnode.
501	 */
502	if (ald_rem(alq) != 0)
503		return;
504
505	/*
506	 * Drain all pending IO.
507	 */
508	alq_shutdown(alq);
509
510	mtx_destroy(&alq->aq_mtx);
511	free(alq->aq_first, M_ALD);
512	free(alq->aq_entbuf, M_ALD);
513	free(alq, M_ALD);
514}
515