geom_io.c revision 131160
1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 131160 2004-06-26 23:27:42Z rwatson $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/bio.h>
44
45#include <sys/errno.h>
46#include <geom/geom.h>
47#include <geom/geom_int.h>
48#include <sys/devicestat.h>
49
50#include <vm/uma.h>
51
52static struct g_bioq g_bio_run_down;
53static struct g_bioq g_bio_run_up;
54static struct g_bioq g_bio_run_task;
55
56static u_int pace;
57static uma_zone_t	biozone;
58
59#include <machine/atomic.h>
60
61static void
62g_bioq_lock(struct g_bioq *bq)
63{
64
65	mtx_lock(&bq->bio_queue_lock);
66}
67
68static void
69g_bioq_unlock(struct g_bioq *bq)
70{
71
72	mtx_unlock(&bq->bio_queue_lock);
73}
74
75#if 0
76static void
77g_bioq_destroy(struct g_bioq *bq)
78{
79
80	mtx_destroy(&bq->bio_queue_lock);
81}
82#endif
83
84static void
85g_bioq_init(struct g_bioq *bq)
86{
87
88	TAILQ_INIT(&bq->bio_queue);
89	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
90}
91
92static struct bio *
93g_bioq_first(struct g_bioq *bq)
94{
95	struct bio *bp;
96
97	bp = TAILQ_FIRST(&bq->bio_queue);
98	if (bp != NULL) {
99		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
100		bq->bio_queue_length--;
101	}
102	return (bp);
103}
104
105static void
106g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
107{
108
109	g_bioq_lock(rq);
110	TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
111	rq->bio_queue_length++;
112	g_bioq_unlock(rq);
113}
114
115struct bio *
116g_new_bio(void)
117{
118	struct bio *bp;
119
120	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
121	return (bp);
122}
123
124void
125g_destroy_bio(struct bio *bp)
126{
127
128	uma_zfree(biozone, bp);
129}
130
131struct bio *
132g_clone_bio(struct bio *bp)
133{
134	struct bio *bp2;
135
136	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
137	if (bp2 != NULL) {
138		bp2->bio_parent = bp;
139		bp2->bio_cmd = bp->bio_cmd;
140		bp2->bio_length = bp->bio_length;
141		bp2->bio_offset = bp->bio_offset;
142		bp2->bio_data = bp->bio_data;
143		bp2->bio_attribute = bp->bio_attribute;
144		bp->bio_children++;
145	}
146	return(bp2);
147}
148
149void
150g_io_init()
151{
152
153	g_bioq_init(&g_bio_run_down);
154	g_bioq_init(&g_bio_run_up);
155	g_bioq_init(&g_bio_run_task);
156	biozone = uma_zcreate("g_bio", sizeof (struct bio),
157	    NULL, NULL,
158	    NULL, NULL,
159	    0, 0);
160}
161
162int
163g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
164{
165	struct bio *bp;
166	int error;
167
168	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
169	bp = g_new_bio();
170	bp->bio_cmd = BIO_GETATTR;
171	bp->bio_done = NULL;
172	bp->bio_attribute = attr;
173	bp->bio_length = *len;
174	bp->bio_data = ptr;
175	g_io_request(bp, cp);
176	error = biowait(bp, "ggetattr");
177	*len = bp->bio_completed;
178	g_destroy_bio(bp);
179	return (error);
180}
181
182static int
183g_io_check(struct bio *bp)
184{
185	struct g_consumer *cp;
186	struct g_provider *pp;
187
188	cp = bp->bio_from;
189	pp = bp->bio_to;
190
191	/* Fail if access counters dont allow the operation */
192	switch(bp->bio_cmd) {
193	case BIO_READ:
194	case BIO_GETATTR:
195		if (cp->acr == 0)
196			return (EPERM);
197		break;
198	case BIO_WRITE:
199	case BIO_DELETE:
200		if (cp->acw == 0)
201			return (EPERM);
202		break;
203	default:
204		return (EPERM);
205	}
206	/* if provider is marked for error, don't disturb. */
207	if (pp->error)
208		return (pp->error);
209
210	switch(bp->bio_cmd) {
211	case BIO_READ:
212	case BIO_WRITE:
213	case BIO_DELETE:
214		/* Zero sectorsize is a probably lack of media */
215		if (pp->sectorsize == 0)
216			return (ENXIO);
217		/* Reject I/O not on sector boundary */
218		if (bp->bio_offset % pp->sectorsize)
219			return (EINVAL);
220		/* Reject I/O not integral sector long */
221		if (bp->bio_length % pp->sectorsize)
222			return (EINVAL);
223		/* Reject requests before or past the end of media. */
224		if (bp->bio_offset < 0)
225			return (EIO);
226		if (bp->bio_offset > pp->mediasize)
227			return (EIO);
228		break;
229	default:
230		break;
231	}
232	return (0);
233}
234
235void
236g_io_request(struct bio *bp, struct g_consumer *cp)
237{
238	struct g_provider *pp;
239
240	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
241	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
242	KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
243	pp = cp->provider;
244	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
245
246	bp->bio_from = cp;
247	bp->bio_to = pp;
248	bp->bio_error = 0;
249	bp->bio_completed = 0;
250
251	if (g_collectstats & 1)
252		devstat_start_transaction_bio(pp->stat, bp);
253	pp->nstart++;
254	if (g_collectstats & 2)
255		devstat_start_transaction_bio(cp->stat, bp);
256	cp->nstart++;
257
258	/* Pass it on down. */
259	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
260	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
261	g_bioq_enqueue_tail(bp, &g_bio_run_down);
262	wakeup(&g_wait_down);
263}
264
265void
266g_io_deliver(struct bio *bp, int error)
267{
268	struct g_consumer *cp;
269	struct g_provider *pp;
270
271	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
272	pp = bp->bio_to;
273	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
274	cp = bp->bio_from;
275	if (cp == NULL) {
276		bp->bio_error = error;
277		bp->bio_done(bp);
278		return;
279	}
280	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
281	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
282	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
283	KASSERT(bp->bio_completed <= bp->bio_length,
284	    ("bio_completed can't be greater than bio_length"));
285
286	g_trace(G_T_BIO,
287"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
288	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
289	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
290
291	bp->bio_bcount = bp->bio_length;
292	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
293	if (g_collectstats & 1)
294		devstat_end_transaction_bio(pp->stat, bp);
295	if (g_collectstats & 2)
296		devstat_end_transaction_bio(cp->stat, bp);
297	cp->nend++;
298	pp->nend++;
299
300	if (error == ENOMEM) {
301		if (bootverbose)
302			printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
303		g_io_request(bp, cp);
304		pace++;
305		return;
306	}
307	bp->bio_error = error;
308	g_bioq_enqueue_tail(bp, &g_bio_run_up);
309	wakeup(&g_wait_up);
310}
311
312void
313g_io_schedule_down(struct thread *tp __unused)
314{
315	struct bio *bp;
316	off_t excess;
317	int error;
318#ifdef WITNESS
319	struct mtx mymutex;
320
321	bzero(&mymutex, sizeof mymutex);
322	mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF);
323#endif
324
325	for(;;) {
326		g_bioq_lock(&g_bio_run_down);
327		bp = g_bioq_first(&g_bio_run_down);
328		if (bp == NULL) {
329			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
330			    PRIBIO | PDROP, "-", hz/10);
331			continue;
332		}
333		g_bioq_unlock(&g_bio_run_down);
334		if (pace > 0) {
335			msleep(&error, NULL, PRIBIO, "g_down", hz/10);
336			pace--;
337		}
338		error = g_io_check(bp);
339		if (error) {
340			g_io_deliver(bp, error);
341			continue;
342		}
343		switch (bp->bio_cmd) {
344		case BIO_READ:
345		case BIO_WRITE:
346		case BIO_DELETE:
347			/* Truncate requests to the end of providers media. */
348			excess = bp->bio_offset + bp->bio_length;
349			if (excess > bp->bio_to->mediasize) {
350				excess -= bp->bio_to->mediasize;
351				bp->bio_length -= excess;
352			}
353			/* Deliver zero length transfers right here. */
354			if (bp->bio_length == 0) {
355				g_io_deliver(bp, 0);
356				continue;
357			}
358			break;
359		default:
360			break;
361		}
362#ifdef WITNESS
363		mtx_lock(&mymutex);
364#endif
365		bp->bio_to->geom->start(bp);
366#ifdef WITNESS
367		mtx_unlock(&mymutex);
368#endif
369	}
370}
371
372void
373bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
374{
375	bp->bio_task = func;
376	bp->bio_task_arg = arg;
377	/*
378	 * The taskqueue is actually just a second queue off the "up"
379	 * queue, so we use the same lock.
380	 */
381	g_bioq_lock(&g_bio_run_up);
382	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
383	g_bio_run_task.bio_queue_length++;
384	wakeup(&g_wait_up);
385	g_bioq_unlock(&g_bio_run_up);
386}
387
388
389void
390g_io_schedule_up(struct thread *tp __unused)
391{
392	struct bio *bp;
393#ifdef WITNESS
394	struct mtx mymutex;
395
396	bzero(&mymutex, sizeof mymutex);
397	mtx_init(&mymutex, "g_xup", NULL, MTX_DEF);
398#endif
399	for(;;) {
400		g_bioq_lock(&g_bio_run_up);
401		bp = g_bioq_first(&g_bio_run_task);
402		if (bp != NULL) {
403			g_bioq_unlock(&g_bio_run_up);
404#ifdef WITNESS
405			mtx_lock(&mymutex);
406#endif
407			bp->bio_task(bp->bio_task_arg);
408#ifdef WITNESS
409			mtx_unlock(&mymutex);
410#endif
411			continue;
412		}
413		bp = g_bioq_first(&g_bio_run_up);
414		if (bp != NULL) {
415			g_bioq_unlock(&g_bio_run_up);
416#ifdef WITNESS
417			mtx_lock(&mymutex);
418#endif
419			biodone(bp);
420#ifdef WITNESS
421			mtx_unlock(&mymutex);
422#endif
423			continue;
424		}
425		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
426		    PRIBIO | PDROP, "-", hz/10);
427	}
428}
429
430void *
431g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
432{
433	struct bio *bp;
434	void *ptr;
435	int errorc;
436
437	KASSERT(length >= 512 && length <= DFLTPHYS,
438		("g_read_data(): invalid length %jd", (intmax_t)length));
439
440	bp = g_new_bio();
441	bp->bio_cmd = BIO_READ;
442	bp->bio_done = NULL;
443	bp->bio_offset = offset;
444	bp->bio_length = length;
445	ptr = g_malloc(length, M_WAITOK);
446	bp->bio_data = ptr;
447	g_io_request(bp, cp);
448	errorc = biowait(bp, "gread");
449	if (error != NULL)
450		*error = errorc;
451	g_destroy_bio(bp);
452	if (errorc) {
453		g_free(ptr);
454		ptr = NULL;
455	}
456	return (ptr);
457}
458
459int
460g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
461{
462	struct bio *bp;
463	int error;
464
465	KASSERT(length >= 512 && length <= DFLTPHYS,
466		("g_write_data(): invalid length %jd", (intmax_t)length));
467
468	bp = g_new_bio();
469	bp->bio_cmd = BIO_WRITE;
470	bp->bio_done = NULL;
471	bp->bio_offset = offset;
472	bp->bio_length = length;
473	bp->bio_data = ptr;
474	g_io_request(bp, cp);
475	error = biowait(bp, "gwrite");
476	g_destroy_bio(bp);
477	return (error);
478}
479
480void
481g_print_bio(struct bio *bp)
482{
483	const char *pname, *cmd = NULL;
484
485	if (bp->bio_to != NULL)
486		pname = bp->bio_to->name;
487	else
488		pname = "[unknown]";
489
490	switch (bp->bio_cmd) {
491	case BIO_GETATTR:
492		cmd = "GETATTR";
493		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
494		return;
495	case BIO_READ:
496		cmd = "READ";
497	case BIO_WRITE:
498		if (cmd == NULL)
499			cmd = "WRITE";
500	case BIO_DELETE:
501		if (cmd == NULL)
502			cmd = "DELETE";
503		printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
504		    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
505		return;
506	default:
507		cmd = "UNKNOWN";
508		printf("%s[%s()]", pname, cmd);
509		return;
510	}
511	/* NOTREACHED */
512}
513