geom_io.c revision 150177
1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 150177 2005-09-15 19:05:37Z jhb $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/bio.h>
44#include <sys/ktr.h>
45#include <sys/proc.h>
46#include <sys/stack.h>
47
48#include <sys/errno.h>
49#include <geom/geom.h>
50#include <geom/geom_int.h>
51#include <sys/devicestat.h>
52
53#include <vm/uma.h>
54
55static struct g_bioq g_bio_run_down;
56static struct g_bioq g_bio_run_up;
57static struct g_bioq g_bio_run_task;
58
59static u_int pace;
60static uma_zone_t	biozone;
61
62#include <machine/atomic.h>
63
64static void
65g_bioq_lock(struct g_bioq *bq)
66{
67
68	mtx_lock(&bq->bio_queue_lock);
69}
70
71static void
72g_bioq_unlock(struct g_bioq *bq)
73{
74
75	mtx_unlock(&bq->bio_queue_lock);
76}
77
78#if 0
79static void
80g_bioq_destroy(struct g_bioq *bq)
81{
82
83	mtx_destroy(&bq->bio_queue_lock);
84}
85#endif
86
87static void
88g_bioq_init(struct g_bioq *bq)
89{
90
91	TAILQ_INIT(&bq->bio_queue);
92	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
93}
94
95static struct bio *
96g_bioq_first(struct g_bioq *bq)
97{
98	struct bio *bp;
99
100	bp = TAILQ_FIRST(&bq->bio_queue);
101	if (bp != NULL) {
102		KASSERT((bp->bio_flags & BIO_ONQUEUE),
103		    ("Bio not on queue bp=%p target %p", bp, bq));
104		bp->bio_flags &= ~BIO_ONQUEUE;
105		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
106		bq->bio_queue_length--;
107	}
108	return (bp);
109}
110
111struct bio *
112g_new_bio(void)
113{
114	struct bio *bp;
115
116	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
117#ifdef KTR
118	if (KTR_COMPILE & KTR_GEOM) {
119		struct stack st;
120
121		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
122		stack_save(&st);
123		CTRSTACK(KTR_GEOM, &st, 3, 0);
124	}
125#endif
126	return (bp);
127}
128
129struct bio *
130g_alloc_bio(void)
131{
132	struct bio *bp;
133
134	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
135#ifdef KTR
136	if (KTR_COMPILE & KTR_GEOM) {
137		struct stack st;
138
139		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
140		stack_save(&st);
141		CTRSTACK(KTR_GEOM, &st, 3, 0);
142	}
143#endif
144	return (bp);
145}
146
147void
148g_destroy_bio(struct bio *bp)
149{
150#ifdef KTR
151	if (KTR_COMPILE & KTR_GEOM) {
152		struct stack st;
153
154		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
155		stack_save(&st);
156		CTRSTACK(KTR_GEOM, &st, 3, 0);
157	}
158#endif
159	uma_zfree(biozone, bp);
160}
161
162struct bio *
163g_clone_bio(struct bio *bp)
164{
165	struct bio *bp2;
166
167	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
168	if (bp2 != NULL) {
169		bp2->bio_parent = bp;
170		bp2->bio_cmd = bp->bio_cmd;
171		bp2->bio_length = bp->bio_length;
172		bp2->bio_offset = bp->bio_offset;
173		bp2->bio_data = bp->bio_data;
174		bp2->bio_attribute = bp->bio_attribute;
175		bp->bio_children++;
176	}
177#ifdef KTR
178	if (KTR_COMPILE & KTR_GEOM) {
179		struct stack st;
180
181		CTR2(KTR_GEOM, "g_close_bio(%p): %p", bp, bp2);
182		stack_save(&st);
183		CTRSTACK(KTR_GEOM, &st, 3, 0);
184	}
185#endif
186	return(bp2);
187}
188
189void
190g_io_init()
191{
192
193	g_bioq_init(&g_bio_run_down);
194	g_bioq_init(&g_bio_run_up);
195	g_bioq_init(&g_bio_run_task);
196	biozone = uma_zcreate("g_bio", sizeof (struct bio),
197	    NULL, NULL,
198	    NULL, NULL,
199	    0, 0);
200}
201
202int
203g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
204{
205	struct bio *bp;
206	int error;
207
208	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
209	bp = g_alloc_bio();
210	bp->bio_cmd = BIO_GETATTR;
211	bp->bio_done = NULL;
212	bp->bio_attribute = attr;
213	bp->bio_length = *len;
214	bp->bio_data = ptr;
215	g_io_request(bp, cp);
216	error = biowait(bp, "ggetattr");
217	*len = bp->bio_completed;
218	g_destroy_bio(bp);
219	return (error);
220}
221
222static int
223g_io_check(struct bio *bp)
224{
225	struct g_consumer *cp;
226	struct g_provider *pp;
227
228	cp = bp->bio_from;
229	pp = bp->bio_to;
230
231	/* Fail if access counters dont allow the operation */
232	switch(bp->bio_cmd) {
233	case BIO_READ:
234	case BIO_GETATTR:
235		if (cp->acr == 0)
236			return (EPERM);
237		break;
238	case BIO_WRITE:
239	case BIO_DELETE:
240		if (cp->acw == 0)
241			return (EPERM);
242		break;
243	default:
244		return (EPERM);
245	}
246	/* if provider is marked for error, don't disturb. */
247	if (pp->error)
248		return (pp->error);
249
250	switch(bp->bio_cmd) {
251	case BIO_READ:
252	case BIO_WRITE:
253	case BIO_DELETE:
254		/* Zero sectorsize is a probably lack of media */
255		if (pp->sectorsize == 0)
256			return (ENXIO);
257		/* Reject I/O not on sector boundary */
258		if (bp->bio_offset % pp->sectorsize)
259			return (EINVAL);
260		/* Reject I/O not integral sector long */
261		if (bp->bio_length % pp->sectorsize)
262			return (EINVAL);
263		/* Reject requests before or past the end of media. */
264		if (bp->bio_offset < 0)
265			return (EIO);
266		if (bp->bio_offset > pp->mediasize)
267			return (EIO);
268		break;
269	default:
270		break;
271	}
272	return (0);
273}
274
275void
276g_io_request(struct bio *bp, struct g_consumer *cp)
277{
278	struct g_provider *pp;
279
280	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
281	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
282	KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
283	pp = cp->provider;
284	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
285
286	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
287		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
288		    ("wrong offset %jd for sectorsize %u",
289		    bp->bio_offset, cp->provider->sectorsize));
290		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
291		    ("wrong length %jd for sectorsize %u",
292		    bp->bio_length, cp->provider->sectorsize));
293	}
294
295	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
296	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
297
298	bp->bio_from = cp;
299	bp->bio_to = pp;
300	bp->bio_error = 0;
301	bp->bio_completed = 0;
302
303	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
304	    ("Bio already on queue bp=%p", bp));
305	bp->bio_flags |= BIO_ONQUEUE;
306
307	binuptime(&bp->bio_t0);
308
309	/*
310	 * The statistics collection is lockless, as such, but we
311	 * can not update one instance of the statistics from more
312	 * than one thread at a time, so grab the lock first.
313	 */
314	g_bioq_lock(&g_bio_run_down);
315	if (g_collectstats & 1)
316		devstat_start_transaction(pp->stat, &bp->bio_t0);
317	if (g_collectstats & 2)
318		devstat_start_transaction(cp->stat, &bp->bio_t0);
319
320	pp->nstart++;
321	cp->nstart++;
322	TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
323	g_bio_run_down.bio_queue_length++;
324	g_bioq_unlock(&g_bio_run_down);
325
326	/* Pass it on down. */
327	wakeup(&g_wait_down);
328}
329
330void
331g_io_deliver(struct bio *bp, int error)
332{
333	struct g_consumer *cp;
334	struct g_provider *pp;
335
336	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
337	pp = bp->bio_to;
338	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
339	cp = bp->bio_from;
340	if (cp == NULL) {
341		bp->bio_error = error;
342		bp->bio_done(bp);
343		return;
344	}
345	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
346	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
347	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
348	KASSERT(bp->bio_completed <= bp->bio_length,
349	    ("bio_completed can't be greater than bio_length"));
350
351	g_trace(G_T_BIO,
352"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
353	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
354	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
355
356	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
357	    ("Bio already on queue bp=%p", bp));
358
359	/*
360	 * XXX: next two doesn't belong here
361	 */
362	bp->bio_bcount = bp->bio_length;
363	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
364
365	/*
366	 * The statistics collection is lockless, as such, but we
367	 * can not update one instance of the statistics from more
368	 * than one thread at a time, so grab the lock first.
369	 */
370	g_bioq_lock(&g_bio_run_up);
371	if (g_collectstats & 1)
372		devstat_end_transaction_bio(pp->stat, bp);
373	if (g_collectstats & 2)
374		devstat_end_transaction_bio(cp->stat, bp);
375
376	cp->nend++;
377	pp->nend++;
378	if (error != ENOMEM) {
379		bp->bio_error = error;
380		TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
381		bp->bio_flags |= BIO_ONQUEUE;
382		g_bio_run_up.bio_queue_length++;
383		g_bioq_unlock(&g_bio_run_up);
384		wakeup(&g_wait_up);
385		return;
386	}
387	g_bioq_unlock(&g_bio_run_up);
388
389	if (bootverbose)
390		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
391	bp->bio_children = 0;
392	bp->bio_inbed = 0;
393	g_io_request(bp, cp);
394	pace++;
395	return;
396}
397
398void
399g_io_schedule_down(struct thread *tp __unused)
400{
401	struct bio *bp;
402	off_t excess;
403	int error;
404
405	for(;;) {
406		g_bioq_lock(&g_bio_run_down);
407		bp = g_bioq_first(&g_bio_run_down);
408		if (bp == NULL) {
409			CTR0(KTR_GEOM, "g_down going to sleep");
410			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
411			    PRIBIO | PDROP, "-", hz/10);
412			continue;
413		}
414		CTR0(KTR_GEOM, "g_down has work to do");
415		g_bioq_unlock(&g_bio_run_down);
416		if (pace > 0) {
417			CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
418			msleep(&error, NULL, PRIBIO, "g_down", hz/10);
419			pace--;
420		}
421		error = g_io_check(bp);
422		if (error) {
423			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
424			    "%s returned %d", bp, bp->bio_to->name, error);
425			g_io_deliver(bp, error);
426			continue;
427		}
428		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
429		    bp->bio_to->name);
430		switch (bp->bio_cmd) {
431		case BIO_READ:
432		case BIO_WRITE:
433		case BIO_DELETE:
434			/* Truncate requests to the end of providers media. */
435			/*
436			 * XXX: What if we truncate because of offset being
437			 * bad, not length?
438			 */
439			excess = bp->bio_offset + bp->bio_length;
440			if (excess > bp->bio_to->mediasize) {
441				excess -= bp->bio_to->mediasize;
442				bp->bio_length -= excess;
443				if (excess > 0)
444					CTR3(KTR_GEOM, "g_down truncated bio "
445					    "%p provider %s by %d", bp,
446					    bp->bio_to->name, excess);
447			}
448			/* Deliver zero length transfers right here. */
449			if (bp->bio_length == 0) {
450				g_io_deliver(bp, 0);
451				CTR2(KTR_GEOM, "g_down terminated 0-length "
452				    "bp %p provider %s", bp, bp->bio_to->name);
453				continue;
454			}
455			break;
456		default:
457			break;
458		}
459		THREAD_NO_SLEEPING();
460		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
461		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
462		    bp->bio_length);
463		bp->bio_to->geom->start(bp);
464		THREAD_SLEEPING_OK();
465	}
466}
467
468void
469bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
470{
471	bp->bio_task = func;
472	bp->bio_task_arg = arg;
473	/*
474	 * The taskqueue is actually just a second queue off the "up"
475	 * queue, so we use the same lock.
476	 */
477	g_bioq_lock(&g_bio_run_up);
478	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
479	    ("Bio already on queue bp=%p target taskq", bp));
480	bp->bio_flags |= BIO_ONQUEUE;
481	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
482	g_bio_run_task.bio_queue_length++;
483	wakeup(&g_wait_up);
484	g_bioq_unlock(&g_bio_run_up);
485}
486
487
488void
489g_io_schedule_up(struct thread *tp __unused)
490{
491	struct bio *bp;
492	for(;;) {
493		g_bioq_lock(&g_bio_run_up);
494		bp = g_bioq_first(&g_bio_run_task);
495		if (bp != NULL) {
496			g_bioq_unlock(&g_bio_run_up);
497			THREAD_NO_SLEEPING();
498			CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
499			bp->bio_task(bp->bio_task_arg);
500			THREAD_SLEEPING_OK();
501			continue;
502		}
503		bp = g_bioq_first(&g_bio_run_up);
504		if (bp != NULL) {
505			g_bioq_unlock(&g_bio_run_up);
506			THREAD_NO_SLEEPING();
507			CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
508			    "%ld len %ld", bp, bp->bio_to->name,
509			    bp->bio_offset, bp->bio_length);
510			biodone(bp);
511			THREAD_SLEEPING_OK();
512			continue;
513		}
514		CTR0(KTR_GEOM, "g_up going to sleep");
515		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
516		    PRIBIO | PDROP, "-", hz/10);
517	}
518}
519
520void *
521g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
522{
523	struct bio *bp;
524	void *ptr;
525	int errorc;
526
527	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
528	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
529	    (intmax_t)length));
530
531	bp = g_alloc_bio();
532	bp->bio_cmd = BIO_READ;
533	bp->bio_done = NULL;
534	bp->bio_offset = offset;
535	bp->bio_length = length;
536	ptr = g_malloc(length, M_WAITOK);
537	bp->bio_data = ptr;
538	g_io_request(bp, cp);
539	errorc = biowait(bp, "gread");
540	if (error != NULL)
541		*error = errorc;
542	g_destroy_bio(bp);
543	if (errorc) {
544		g_free(ptr);
545		ptr = NULL;
546	}
547	return (ptr);
548}
549
550int
551g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
552{
553	struct bio *bp;
554	int error;
555
556	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
557	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
558	    (intmax_t)length));
559
560	bp = g_alloc_bio();
561	bp->bio_cmd = BIO_WRITE;
562	bp->bio_done = NULL;
563	bp->bio_offset = offset;
564	bp->bio_length = length;
565	bp->bio_data = ptr;
566	g_io_request(bp, cp);
567	error = biowait(bp, "gwrite");
568	g_destroy_bio(bp);
569	return (error);
570}
571
572void
573g_print_bio(struct bio *bp)
574{
575	const char *pname, *cmd = NULL;
576
577	if (bp->bio_to != NULL)
578		pname = bp->bio_to->name;
579	else
580		pname = "[unknown]";
581
582	switch (bp->bio_cmd) {
583	case BIO_GETATTR:
584		cmd = "GETATTR";
585		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
586		return;
587	case BIO_READ:
588		cmd = "READ";
589	case BIO_WRITE:
590		if (cmd == NULL)
591			cmd = "WRITE";
592	case BIO_DELETE:
593		if (cmd == NULL)
594			cmd = "DELETE";
595		printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
596		    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
597		return;
598	default:
599		cmd = "UNKNOWN";
600		printf("%s[%s()]", pname, cmd);
601		return;
602	}
603	/* NOTREACHED */
604}
605