geom_io.c revision 287405
1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8 * and NAI Labs, the Security Research Division of Network Associates, Inc.
9 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10 * DARPA CHATS research program.
11 *
12 * Portions of this software were developed by Konstantin Belousov
13 * under sponsorship from the FreeBSD Foundation.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 * 3. The names of the authors may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/geom/geom_io.c 287405 2015-09-02 17:29:30Z imp $");
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/kernel.h>
46#include <sys/malloc.h>
47#include <sys/bio.h>
48#include <sys/ktr.h>
49#include <sys/proc.h>
50#include <sys/stack.h>
51#include <sys/sysctl.h>
52#include <sys/vmem.h>
53
54#include <sys/errno.h>
55#include <geom/geom.h>
56#include <geom/geom_int.h>
57#include <sys/devicestat.h>
58
59#include <vm/uma.h>
60#include <vm/vm.h>
61#include <vm/vm_param.h>
62#include <vm/vm_kern.h>
63#include <vm/vm_page.h>
64#include <vm/vm_object.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_map.h>
67
68static int	g_io_transient_map_bio(struct bio *bp);
69
70static struct g_bioq g_bio_run_down;
71static struct g_bioq g_bio_run_up;
72static struct g_bioq g_bio_run_task;
73
74/*
75 * Pace is a hint that we've had some trouble recently allocating
76 * bios, so we should back off trying to send I/O down the stack
77 * a bit to let the problem resolve. When pacing, we also turn
78 * off direct dispatch to also reduce memory pressure from I/Os
79 * there, at the expxense of some added latency while the memory
80 * pressures exist. See g_io_schedule_down() for more details
81 * and limitations.
82 */
83static volatile u_int pace;
84
85static uma_zone_t	biozone;
86
87/*
88 * The head of the list of classifiers used in g_io_request.
89 * Use g_register_classifier() and g_unregister_classifier()
90 * to add/remove entries to the list.
91 * Classifiers are invoked in registration order.
92 */
93static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
94    g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
95
96#include <machine/atomic.h>
97
98static void
99g_bioq_lock(struct g_bioq *bq)
100{
101
102	mtx_lock(&bq->bio_queue_lock);
103}
104
105static void
106g_bioq_unlock(struct g_bioq *bq)
107{
108
109	mtx_unlock(&bq->bio_queue_lock);
110}
111
112#if 0
113static void
114g_bioq_destroy(struct g_bioq *bq)
115{
116
117	mtx_destroy(&bq->bio_queue_lock);
118}
119#endif
120
121static void
122g_bioq_init(struct g_bioq *bq)
123{
124
125	TAILQ_INIT(&bq->bio_queue);
126	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
127}
128
129static struct bio *
130g_bioq_first(struct g_bioq *bq)
131{
132	struct bio *bp;
133
134	bp = TAILQ_FIRST(&bq->bio_queue);
135	if (bp != NULL) {
136		KASSERT((bp->bio_flags & BIO_ONQUEUE),
137		    ("Bio not on queue bp=%p target %p", bp, bq));
138		bp->bio_flags &= ~BIO_ONQUEUE;
139		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
140		bq->bio_queue_length--;
141	}
142	return (bp);
143}
144
145struct bio *
146g_new_bio(void)
147{
148	struct bio *bp;
149
150	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
151#ifdef KTR
152	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
153		struct stack st;
154
155		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
156		stack_save(&st);
157		CTRSTACK(KTR_GEOM, &st, 3, 0);
158	}
159#endif
160	return (bp);
161}
162
163struct bio *
164g_alloc_bio(void)
165{
166	struct bio *bp;
167
168	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
169#ifdef KTR
170	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
171		struct stack st;
172
173		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
174		stack_save(&st);
175		CTRSTACK(KTR_GEOM, &st, 3, 0);
176	}
177#endif
178	return (bp);
179}
180
181void
182g_destroy_bio(struct bio *bp)
183{
184#ifdef KTR
185	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
186		struct stack st;
187
188		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
189		stack_save(&st);
190		CTRSTACK(KTR_GEOM, &st, 3, 0);
191	}
192#endif
193	uma_zfree(biozone, bp);
194}
195
196struct bio *
197g_clone_bio(struct bio *bp)
198{
199	struct bio *bp2;
200
201	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
202	if (bp2 != NULL) {
203		bp2->bio_parent = bp;
204		bp2->bio_cmd = bp->bio_cmd;
205		/*
206		 *  BIO_ORDERED flag may be used by disk drivers to enforce
207		 *  ordering restrictions, so this flag needs to be cloned.
208		 *  BIO_UNMAPPED should be inherited, to properly indicate
209		 *  which way the buffer is passed.
210		 *  Other bio flags are not suitable for cloning.
211		 */
212		bp2->bio_flags = bp->bio_flags & (BIO_ORDERED | BIO_UNMAPPED);
213		bp2->bio_length = bp->bio_length;
214		bp2->bio_offset = bp->bio_offset;
215		bp2->bio_data = bp->bio_data;
216		bp2->bio_ma = bp->bio_ma;
217		bp2->bio_ma_n = bp->bio_ma_n;
218		bp2->bio_ma_offset = bp->bio_ma_offset;
219		bp2->bio_attribute = bp->bio_attribute;
220		/* Inherit classification info from the parent */
221		bp2->bio_classifier1 = bp->bio_classifier1;
222		bp2->bio_classifier2 = bp->bio_classifier2;
223		bp->bio_children++;
224	}
225#ifdef KTR
226	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
227		struct stack st;
228
229		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
230		stack_save(&st);
231		CTRSTACK(KTR_GEOM, &st, 3, 0);
232	}
233#endif
234	return(bp2);
235}
236
237struct bio *
238g_duplicate_bio(struct bio *bp)
239{
240	struct bio *bp2;
241
242	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
243	bp2->bio_flags = bp->bio_flags & BIO_UNMAPPED;
244	bp2->bio_parent = bp;
245	bp2->bio_cmd = bp->bio_cmd;
246	bp2->bio_length = bp->bio_length;
247	bp2->bio_offset = bp->bio_offset;
248	bp2->bio_data = bp->bio_data;
249	bp2->bio_ma = bp->bio_ma;
250	bp2->bio_ma_n = bp->bio_ma_n;
251	bp2->bio_ma_offset = bp->bio_ma_offset;
252	bp2->bio_attribute = bp->bio_attribute;
253	bp->bio_children++;
254#ifdef KTR
255	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
256		struct stack st;
257
258		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
259		stack_save(&st);
260		CTRSTACK(KTR_GEOM, &st, 3, 0);
261	}
262#endif
263	return(bp2);
264}
265
266void
267g_io_init()
268{
269
270	g_bioq_init(&g_bio_run_down);
271	g_bioq_init(&g_bio_run_up);
272	g_bioq_init(&g_bio_run_task);
273	biozone = uma_zcreate("g_bio", sizeof (struct bio),
274	    NULL, NULL,
275	    NULL, NULL,
276	    0, 0);
277}
278
279int
280g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
281{
282	struct bio *bp;
283	int error;
284
285	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
286	bp = g_alloc_bio();
287	bp->bio_cmd = BIO_GETATTR;
288	bp->bio_done = NULL;
289	bp->bio_attribute = attr;
290	bp->bio_length = *len;
291	bp->bio_data = ptr;
292	g_io_request(bp, cp);
293	error = biowait(bp, "ggetattr");
294	*len = bp->bio_completed;
295	g_destroy_bio(bp);
296	return (error);
297}
298
299int
300g_io_flush(struct g_consumer *cp)
301{
302	struct bio *bp;
303	int error;
304
305	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
306	bp = g_alloc_bio();
307	bp->bio_cmd = BIO_FLUSH;
308	bp->bio_flags |= BIO_ORDERED;
309	bp->bio_done = NULL;
310	bp->bio_attribute = NULL;
311	bp->bio_offset = cp->provider->mediasize;
312	bp->bio_length = 0;
313	bp->bio_data = NULL;
314	g_io_request(bp, cp);
315	error = biowait(bp, "gflush");
316	g_destroy_bio(bp);
317	return (error);
318}
319
320static int
321g_io_check(struct bio *bp)
322{
323	struct g_consumer *cp;
324	struct g_provider *pp;
325	off_t excess;
326	int error;
327
328	cp = bp->bio_from;
329	pp = bp->bio_to;
330
331	/* Fail if access counters dont allow the operation */
332	switch(bp->bio_cmd) {
333	case BIO_READ:
334	case BIO_GETATTR:
335		if (cp->acr == 0)
336			return (EPERM);
337		break;
338	case BIO_WRITE:
339	case BIO_DELETE:
340	case BIO_FLUSH:
341		if (cp->acw == 0)
342			return (EPERM);
343		break;
344	default:
345		return (EPERM);
346	}
347	/* if provider is marked for error, don't disturb. */
348	if (pp->error)
349		return (pp->error);
350	if (cp->flags & G_CF_ORPHAN)
351		return (ENXIO);
352
353	switch(bp->bio_cmd) {
354	case BIO_READ:
355	case BIO_WRITE:
356	case BIO_DELETE:
357		/* Zero sectorsize or mediasize is probably a lack of media. */
358		if (pp->sectorsize == 0 || pp->mediasize == 0)
359			return (ENXIO);
360		/* Reject I/O not on sector boundary */
361		if (bp->bio_offset % pp->sectorsize)
362			return (EINVAL);
363		/* Reject I/O not integral sector long */
364		if (bp->bio_length % pp->sectorsize)
365			return (EINVAL);
366		/* Reject requests before or past the end of media. */
367		if (bp->bio_offset < 0)
368			return (EIO);
369		if (bp->bio_offset > pp->mediasize)
370			return (EIO);
371
372		/* Truncate requests to the end of providers media. */
373		excess = bp->bio_offset + bp->bio_length;
374		if (excess > bp->bio_to->mediasize) {
375			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
376			    round_page(bp->bio_ma_offset +
377			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
378			    ("excess bio %p too short", bp));
379			excess -= bp->bio_to->mediasize;
380			bp->bio_length -= excess;
381			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
382				bp->bio_ma_n = round_page(bp->bio_ma_offset +
383				    bp->bio_length) / PAGE_SIZE;
384			}
385			if (excess > 0)
386				CTR3(KTR_GEOM, "g_down truncated bio "
387				    "%p provider %s by %d", bp,
388				    bp->bio_to->name, excess);
389		}
390
391		/* Deliver zero length transfers right here. */
392		if (bp->bio_length == 0) {
393			CTR2(KTR_GEOM, "g_down terminated 0-length "
394			    "bp %p provider %s", bp, bp->bio_to->name);
395			return (0);
396		}
397
398		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
399		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
400		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
401			if ((error = g_io_transient_map_bio(bp)) >= 0)
402				return (error);
403		}
404		break;
405	default:
406		break;
407	}
408	return (EJUSTRETURN);
409}
410
411/*
412 * bio classification support.
413 *
414 * g_register_classifier() and g_unregister_classifier()
415 * are used to add/remove a classifier from the list.
416 * The list is protected using the g_bio_run_down lock,
417 * because the classifiers are called in this path.
418 *
419 * g_io_request() passes bio's that are not already classified
420 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
421 * Classifiers can store their result in the two fields
422 * bio_classifier1 and bio_classifier2.
423 * A classifier that updates one of the fields should
424 * return a non-zero value.
425 * If no classifier updates the field, g_run_classifiers() sets
426 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
427 */
428
429int
430g_register_classifier(struct g_classifier_hook *hook)
431{
432
433	g_bioq_lock(&g_bio_run_down);
434	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
435	g_bioq_unlock(&g_bio_run_down);
436
437	return (0);
438}
439
440void
441g_unregister_classifier(struct g_classifier_hook *hook)
442{
443	struct g_classifier_hook *entry;
444
445	g_bioq_lock(&g_bio_run_down);
446	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
447		if (entry == hook) {
448			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
449			break;
450		}
451	}
452	g_bioq_unlock(&g_bio_run_down);
453}
454
455static void
456g_run_classifiers(struct bio *bp)
457{
458	struct g_classifier_hook *hook;
459	int classified = 0;
460
461	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
462		classified |= hook->func(hook->arg, bp);
463
464	if (!classified)
465		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
466}
467
468void
469g_io_request(struct bio *bp, struct g_consumer *cp)
470{
471	struct g_provider *pp;
472	struct mtx *mtxp;
473	int direct, error, first;
474
475	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
476	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
477	pp = cp->provider;
478	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
479#ifdef DIAGNOSTIC
480	KASSERT(bp->bio_driver1 == NULL,
481	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
482	KASSERT(bp->bio_driver2 == NULL,
483	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
484	KASSERT(bp->bio_pflags == 0,
485	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
486	/*
487	 * Remember consumer's private fields, so we can detect if they were
488	 * modified by the provider.
489	 */
490	bp->_bio_caller1 = bp->bio_caller1;
491	bp->_bio_caller2 = bp->bio_caller2;
492	bp->_bio_cflags = bp->bio_cflags;
493#endif
494
495	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) {
496		KASSERT(bp->bio_data != NULL,
497		    ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd));
498	}
499	if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) {
500		KASSERT(bp->bio_data == NULL,
501		    ("non-NULL bp->data in g_io_request(cmd=%hhu)",
502		    bp->bio_cmd));
503	}
504	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
505		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
506		    ("wrong offset %jd for sectorsize %u",
507		    bp->bio_offset, cp->provider->sectorsize));
508		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
509		    ("wrong length %jd for sectorsize %u",
510		    bp->bio_length, cp->provider->sectorsize));
511	}
512
513	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
514	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
515
516	bp->bio_from = cp;
517	bp->bio_to = pp;
518	bp->bio_error = 0;
519	bp->bio_completed = 0;
520
521	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
522	    ("Bio already on queue bp=%p", bp));
523	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
524	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
525		binuptime(&bp->bio_t0);
526	else
527		getbinuptime(&bp->bio_t0);
528
529#ifdef GET_STACK_USAGE
530	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
531	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
532	    !g_is_geom_thread(curthread) &&
533	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
534	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
535	    pace == 0;
536	if (direct) {
537		/* Block direct execution if less then half of stack left. */
538		size_t	st, su;
539		GET_STACK_USAGE(st, su);
540		if (su * 2 > st)
541			direct = 0;
542	}
543#else
544	direct = 0;
545#endif
546
547	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
548		g_bioq_lock(&g_bio_run_down);
549		g_run_classifiers(bp);
550		g_bioq_unlock(&g_bio_run_down);
551	}
552
553	/*
554	 * The statistics collection is lockless, as such, but we
555	 * can not update one instance of the statistics from more
556	 * than one thread at a time, so grab the lock first.
557	 */
558	mtxp = mtx_pool_find(mtxpool_sleep, pp);
559	mtx_lock(mtxp);
560	if (g_collectstats & G_STATS_PROVIDERS)
561		devstat_start_transaction(pp->stat, &bp->bio_t0);
562	if (g_collectstats & G_STATS_CONSUMERS)
563		devstat_start_transaction(cp->stat, &bp->bio_t0);
564	pp->nstart++;
565	cp->nstart++;
566	mtx_unlock(mtxp);
567
568	if (direct) {
569		error = g_io_check(bp);
570		if (error >= 0) {
571			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
572			    "provider %s returned %d", bp, bp->bio_to->name,
573			    error);
574			g_io_deliver(bp, error);
575			return;
576		}
577		bp->bio_to->geom->start(bp);
578	} else {
579		g_bioq_lock(&g_bio_run_down);
580		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
581		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
582		bp->bio_flags |= BIO_ONQUEUE;
583		g_bio_run_down.bio_queue_length++;
584		g_bioq_unlock(&g_bio_run_down);
585		/* Pass it on down. */
586		if (first)
587			wakeup(&g_wait_down);
588	}
589}
590
591void
592g_io_deliver(struct bio *bp, int error)
593{
594	struct bintime now;
595	struct g_consumer *cp;
596	struct g_provider *pp;
597	struct mtx *mtxp;
598	int direct, first;
599
600	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
601	pp = bp->bio_to;
602	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
603	cp = bp->bio_from;
604	if (cp == NULL) {
605		bp->bio_error = error;
606		bp->bio_done(bp);
607		return;
608	}
609	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
610	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
611#ifdef DIAGNOSTIC
612	/*
613	 * Some classes - GJournal in particular - can modify bio's
614	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
615	 * flag means it's an expected behaviour for that particular geom.
616	 */
617	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
618		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
619		    ("bio_caller1 used by the provider %s", pp->name));
620		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
621		    ("bio_caller2 used by the provider %s", pp->name));
622		KASSERT(bp->bio_cflags == bp->_bio_cflags,
623		    ("bio_cflags used by the provider %s", pp->name));
624	}
625#endif
626	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
627	KASSERT(bp->bio_completed <= bp->bio_length,
628	    ("bio_completed can't be greater than bio_length"));
629
630	g_trace(G_T_BIO,
631"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
632	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
633	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
634
635	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
636	    ("Bio already on queue bp=%p", bp));
637
638	/*
639	 * XXX: next two doesn't belong here
640	 */
641	bp->bio_bcount = bp->bio_length;
642	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
643
644#ifdef GET_STACK_USAGE
645	direct = (pp->flags & G_PF_DIRECT_SEND) &&
646		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
647		 !g_is_geom_thread(curthread);
648	if (direct) {
649		/* Block direct execution if less then half of stack left. */
650		size_t	st, su;
651		GET_STACK_USAGE(st, su);
652		if (su * 2 > st)
653			direct = 0;
654	}
655#else
656	direct = 0;
657#endif
658
659	/*
660	 * The statistics collection is lockless, as such, but we
661	 * can not update one instance of the statistics from more
662	 * than one thread at a time, so grab the lock first.
663	 */
664	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
665	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
666		binuptime(&now);
667	mtxp = mtx_pool_find(mtxpool_sleep, cp);
668	mtx_lock(mtxp);
669	if (g_collectstats & G_STATS_PROVIDERS)
670		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
671	if (g_collectstats & G_STATS_CONSUMERS)
672		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
673	cp->nend++;
674	pp->nend++;
675	mtx_unlock(mtxp);
676
677	if (error != ENOMEM) {
678		bp->bio_error = error;
679		if (direct) {
680			biodone(bp);
681		} else {
682			g_bioq_lock(&g_bio_run_up);
683			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
684			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
685			bp->bio_flags |= BIO_ONQUEUE;
686			g_bio_run_up.bio_queue_length++;
687			g_bioq_unlock(&g_bio_run_up);
688			if (first)
689				wakeup(&g_wait_up);
690		}
691		return;
692	}
693
694	if (bootverbose)
695		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
696	bp->bio_children = 0;
697	bp->bio_inbed = 0;
698	bp->bio_driver1 = NULL;
699	bp->bio_driver2 = NULL;
700	bp->bio_pflags = 0;
701	g_io_request(bp, cp);
702	pace = 1;
703	return;
704}
705
706SYSCTL_DECL(_kern_geom);
707
708static long transient_maps;
709SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
710    &transient_maps, 0,
711    "Total count of the transient mapping requests");
712u_int transient_map_retries = 10;
713SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
714    &transient_map_retries, 0,
715    "Max count of retries used before giving up on creating transient map");
716int transient_map_hard_failures;
717SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
718    &transient_map_hard_failures, 0,
719    "Failures to establish the transient mapping due to retry attempts "
720    "exhausted");
721int transient_map_soft_failures;
722SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
723    &transient_map_soft_failures, 0,
724    "Count of retried failures to establish the transient mapping");
725int inflight_transient_maps;
726SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
727    &inflight_transient_maps, 0,
728    "Current count of the active transient maps");
729
730static int
731g_io_transient_map_bio(struct bio *bp)
732{
733	vm_offset_t addr;
734	long size;
735	u_int retried;
736
737	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
738
739	size = round_page(bp->bio_ma_offset + bp->bio_length);
740	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
741	addr = 0;
742	retried = 0;
743	atomic_add_long(&transient_maps, 1);
744retry:
745	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
746		if (transient_map_retries != 0 &&
747		    retried >= transient_map_retries) {
748			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
749			    bp, bp->bio_to->name);
750			atomic_add_int(&transient_map_hard_failures, 1);
751			return (EDEADLK/* XXXKIB */);
752		} else {
753			/*
754			 * Naive attempt to quisce the I/O to get more
755			 * in-flight requests completed and defragment
756			 * the transient_arena.
757			 */
758			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
759			    bp, bp->bio_to->name, retried);
760			pause("g_d_tra", hz / 10);
761			retried++;
762			atomic_add_int(&transient_map_soft_failures, 1);
763			goto retry;
764		}
765	}
766	atomic_add_int(&inflight_transient_maps, 1);
767	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
768	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
769	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
770	bp->bio_flags &= ~BIO_UNMAPPED;
771	return (EJUSTRETURN);
772}
773
774void
775g_io_schedule_down(struct thread *tp __unused)
776{
777	struct bio *bp;
778	int error;
779
780	for(;;) {
781		g_bioq_lock(&g_bio_run_down);
782		bp = g_bioq_first(&g_bio_run_down);
783		if (bp == NULL) {
784			CTR0(KTR_GEOM, "g_down going to sleep");
785			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
786			    PRIBIO | PDROP, "-", 0);
787			continue;
788		}
789		CTR0(KTR_GEOM, "g_down has work to do");
790		g_bioq_unlock(&g_bio_run_down);
791		if (pace != 0) {
792			/*
793			 * There has been at least one memory allocation
794			 * failure since the last I/O completed. Pause 1ms to
795			 * give the system a chance to free up memory. We only
796			 * do this once because a large number of allocations
797			 * can fail in the direct dispatch case and there's no
798			 * relationship between the number of these failures and
799			 * the length of the outage. If there's still an outage,
800			 * we'll pause again and again until it's
801			 * resolved. Older versions paused longer and once per
802			 * allocation failure. This was OK for a single threaded
803			 * g_down, but with direct dispatch would lead to max of
804			 * 10 IOPs for minutes at a time when transient memory
805			 * issues prevented allocation for a batch of requests
806			 * from the upper layers.
807			 *
808			 * XXX This pacing is really lame. It needs to be solved
809			 * by other methods. This is OK only because the worst
810			 * case scenario is so rare. In the worst case scenario
811			 * all memory is tied up waiting for I/O to complete
812			 * which can never happen since we can't allocate bios
813			 * for that I/O.
814			 */
815			CTR0(KTR_GEOM, "g_down pacing self");
816			pause("g_down", min(hz/1000, 1));
817			pace = 0;
818		}
819		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
820		    bp->bio_to->name);
821		error = g_io_check(bp);
822		if (error >= 0) {
823			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
824			    "%s returned %d", bp, bp->bio_to->name, error);
825			g_io_deliver(bp, error);
826			continue;
827		}
828		THREAD_NO_SLEEPING();
829		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
830		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
831		    bp->bio_length);
832		bp->bio_to->geom->start(bp);
833		THREAD_SLEEPING_OK();
834	}
835}
836
837void
838bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
839{
840	bp->bio_task = func;
841	bp->bio_task_arg = arg;
842	/*
843	 * The taskqueue is actually just a second queue off the "up"
844	 * queue, so we use the same lock.
845	 */
846	g_bioq_lock(&g_bio_run_up);
847	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
848	    ("Bio already on queue bp=%p target taskq", bp));
849	bp->bio_flags |= BIO_ONQUEUE;
850	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
851	g_bio_run_task.bio_queue_length++;
852	wakeup(&g_wait_up);
853	g_bioq_unlock(&g_bio_run_up);
854}
855
856
857void
858g_io_schedule_up(struct thread *tp __unused)
859{
860	struct bio *bp;
861	for(;;) {
862		g_bioq_lock(&g_bio_run_up);
863		bp = g_bioq_first(&g_bio_run_task);
864		if (bp != NULL) {
865			g_bioq_unlock(&g_bio_run_up);
866			THREAD_NO_SLEEPING();
867			CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
868			bp->bio_task(bp->bio_task_arg);
869			THREAD_SLEEPING_OK();
870			continue;
871		}
872		bp = g_bioq_first(&g_bio_run_up);
873		if (bp != NULL) {
874			g_bioq_unlock(&g_bio_run_up);
875			THREAD_NO_SLEEPING();
876			CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
877			    "%jd len %ld", bp, bp->bio_to->name,
878			    bp->bio_offset, bp->bio_length);
879			biodone(bp);
880			THREAD_SLEEPING_OK();
881			continue;
882		}
883		CTR0(KTR_GEOM, "g_up going to sleep");
884		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
885		    PRIBIO | PDROP, "-", 0);
886	}
887}
888
889void *
890g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
891{
892	struct bio *bp;
893	void *ptr;
894	int errorc;
895
896	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
897	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
898	    (intmax_t)length));
899
900	bp = g_alloc_bio();
901	bp->bio_cmd = BIO_READ;
902	bp->bio_done = NULL;
903	bp->bio_offset = offset;
904	bp->bio_length = length;
905	ptr = g_malloc(length, M_WAITOK);
906	bp->bio_data = ptr;
907	g_io_request(bp, cp);
908	errorc = biowait(bp, "gread");
909	if (error != NULL)
910		*error = errorc;
911	g_destroy_bio(bp);
912	if (errorc) {
913		g_free(ptr);
914		ptr = NULL;
915	}
916	return (ptr);
917}
918
919int
920g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
921{
922	struct bio *bp;
923	int error;
924
925	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
926	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
927	    (intmax_t)length));
928
929	bp = g_alloc_bio();
930	bp->bio_cmd = BIO_WRITE;
931	bp->bio_done = NULL;
932	bp->bio_offset = offset;
933	bp->bio_length = length;
934	bp->bio_data = ptr;
935	g_io_request(bp, cp);
936	error = biowait(bp, "gwrite");
937	g_destroy_bio(bp);
938	return (error);
939}
940
941int
942g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
943{
944	struct bio *bp;
945	int error;
946
947	KASSERT(length > 0 && length >= cp->provider->sectorsize,
948	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
949
950	bp = g_alloc_bio();
951	bp->bio_cmd = BIO_DELETE;
952	bp->bio_done = NULL;
953	bp->bio_offset = offset;
954	bp->bio_length = length;
955	bp->bio_data = NULL;
956	g_io_request(bp, cp);
957	error = biowait(bp, "gdelete");
958	g_destroy_bio(bp);
959	return (error);
960}
961
962void
963g_print_bio(struct bio *bp)
964{
965	const char *pname, *cmd = NULL;
966
967	if (bp->bio_to != NULL)
968		pname = bp->bio_to->name;
969	else
970		pname = "[unknown]";
971
972	switch (bp->bio_cmd) {
973	case BIO_GETATTR:
974		cmd = "GETATTR";
975		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
976		return;
977	case BIO_FLUSH:
978		cmd = "FLUSH";
979		printf("%s[%s]", pname, cmd);
980		return;
981	case BIO_READ:
982		cmd = "READ";
983		break;
984	case BIO_WRITE:
985		cmd = "WRITE";
986		break;
987	case BIO_DELETE:
988		cmd = "DELETE";
989		break;
990	default:
991		cmd = "UNKNOWN";
992		printf("%s[%s()]", pname, cmd);
993		return;
994	}
995	printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
996	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
997}
998