1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * Copyright (c) 2013 The FreeBSD Foundation
7 * All rights reserved.
8 *
9 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10 * and NAI Labs, the Security Research Division of Network Associates, Inc.
11 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12 * DARPA CHATS research program.
13 *
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 *    notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 *    notice, this list of conditions and the following disclaimer in the
24 *    documentation and/or other materials provided with the distribution.
25 * 3. The names of the authors may not be used to endorse or promote
26 *    products derived from this software without specific prior written
27 *    permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD$");
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/malloc.h>
49#include <sys/bio.h>
50#include <sys/ktr.h>
51#include <sys/proc.h>
52#include <sys/stack.h>
53#include <sys/sysctl.h>
54#include <sys/vmem.h>
55
56#include <sys/errno.h>
57#include <geom/geom.h>
58#include <geom/geom_int.h>
59#include <sys/devicestat.h>
60
61#include <vm/uma.h>
62#include <vm/vm.h>
63#include <vm/vm_param.h>
64#include <vm/vm_kern.h>
65#include <vm/vm_page.h>
66#include <vm/vm_object.h>
67#include <vm/vm_extern.h>
68#include <vm/vm_map.h>
69
70static int	g_io_transient_map_bio(struct bio *bp);
71
72static struct g_bioq g_bio_run_down;
73static struct g_bioq g_bio_run_up;
74
75/*
76 * Pace is a hint that we've had some trouble recently allocating
77 * bios, so we should back off trying to send I/O down the stack
78 * a bit to let the problem resolve. When pacing, we also turn
79 * off direct dispatch to also reduce memory pressure from I/Os
80 * there, at the expxense of some added latency while the memory
81 * pressures exist. See g_io_schedule_down() for more details
82 * and limitations.
83 */
84static volatile u_int __read_mostly pace;
85
86static uma_zone_t __read_mostly biozone;
87
88/*
89 * The head of the list of classifiers used in g_io_request.
90 * Use g_register_classifier() and g_unregister_classifier()
91 * to add/remove entries to the list.
92 * Classifiers are invoked in registration order.
93 */
94static TAILQ_HEAD(, g_classifier_hook) g_classifier_tailq __read_mostly =
95    TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
96
97#include <machine/atomic.h>
98
99static void
100g_bioq_lock(struct g_bioq *bq)
101{
102
103	mtx_lock(&bq->bio_queue_lock);
104}
105
106static void
107g_bioq_unlock(struct g_bioq *bq)
108{
109
110	mtx_unlock(&bq->bio_queue_lock);
111}
112
113#if 0
114static void
115g_bioq_destroy(struct g_bioq *bq)
116{
117
118	mtx_destroy(&bq->bio_queue_lock);
119}
120#endif
121
122static void
123g_bioq_init(struct g_bioq *bq)
124{
125
126	TAILQ_INIT(&bq->bio_queue);
127	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
128}
129
130static struct bio *
131g_bioq_first(struct g_bioq *bq)
132{
133	struct bio *bp;
134
135	bp = TAILQ_FIRST(&bq->bio_queue);
136	if (bp != NULL) {
137		KASSERT((bp->bio_flags & BIO_ONQUEUE),
138		    ("Bio not on queue bp=%p target %p", bp, bq));
139		bp->bio_flags &= ~BIO_ONQUEUE;
140		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
141		bq->bio_queue_length--;
142	}
143	return (bp);
144}
145
146struct bio *
147g_new_bio(void)
148{
149	struct bio *bp;
150
151	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
152#ifdef KTR
153	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
154		struct stack st;
155
156		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
157		stack_save(&st);
158		CTRSTACK(KTR_GEOM, &st, 3, 0);
159	}
160#endif
161	return (bp);
162}
163
164struct bio *
165g_alloc_bio(void)
166{
167	struct bio *bp;
168
169	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
170#ifdef KTR
171	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
172		struct stack st;
173
174		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
175		stack_save(&st);
176		CTRSTACK(KTR_GEOM, &st, 3, 0);
177	}
178#endif
179	return (bp);
180}
181
182void
183g_destroy_bio(struct bio *bp)
184{
185#ifdef KTR
186	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
187		struct stack st;
188
189		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
190		stack_save(&st);
191		CTRSTACK(KTR_GEOM, &st, 3, 0);
192	}
193#endif
194	uma_zfree(biozone, bp);
195}
196
197struct bio *
198g_clone_bio(struct bio *bp)
199{
200	struct bio *bp2;
201
202	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
203	if (bp2 != NULL) {
204		bp2->bio_parent = bp;
205		bp2->bio_cmd = bp->bio_cmd;
206		/*
207		 *  BIO_ORDERED flag may be used by disk drivers to enforce
208		 *  ordering restrictions, so this flag needs to be cloned.
209		 *  BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
210		 *  indicate which way the buffer is passed.
211		 *  Other bio flags are not suitable for cloning.
212		 */
213		bp2->bio_flags = bp->bio_flags &
214		    (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
215		bp2->bio_length = bp->bio_length;
216		bp2->bio_offset = bp->bio_offset;
217		bp2->bio_data = bp->bio_data;
218		bp2->bio_ma = bp->bio_ma;
219		bp2->bio_ma_n = bp->bio_ma_n;
220		bp2->bio_ma_offset = bp->bio_ma_offset;
221		bp2->bio_attribute = bp->bio_attribute;
222		if (bp->bio_cmd == BIO_ZONE)
223			bcopy(&bp->bio_zone, &bp2->bio_zone,
224			    sizeof(bp->bio_zone));
225		/* Inherit classification info from the parent */
226		bp2->bio_classifier1 = bp->bio_classifier1;
227		bp2->bio_classifier2 = bp->bio_classifier2;
228#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
229		bp2->bio_track_bp = bp->bio_track_bp;
230#endif
231		bp->bio_children++;
232	}
233#ifdef KTR
234	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
235		struct stack st;
236
237		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
238		stack_save(&st);
239		CTRSTACK(KTR_GEOM, &st, 3, 0);
240	}
241#endif
242	return(bp2);
243}
244
245struct bio *
246g_duplicate_bio(struct bio *bp)
247{
248	struct bio *bp2;
249
250	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
251	bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
252	bp2->bio_parent = bp;
253	bp2->bio_cmd = bp->bio_cmd;
254	bp2->bio_length = bp->bio_length;
255	bp2->bio_offset = bp->bio_offset;
256	bp2->bio_data = bp->bio_data;
257	bp2->bio_ma = bp->bio_ma;
258	bp2->bio_ma_n = bp->bio_ma_n;
259	bp2->bio_ma_offset = bp->bio_ma_offset;
260	bp2->bio_attribute = bp->bio_attribute;
261	bp->bio_children++;
262#ifdef KTR
263	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
264		struct stack st;
265
266		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
267		stack_save(&st);
268		CTRSTACK(KTR_GEOM, &st, 3, 0);
269	}
270#endif
271	return(bp2);
272}
273
274void
275g_reset_bio(struct bio *bp)
276{
277
278	bzero(bp, sizeof(*bp));
279}
280
281void
282g_io_init()
283{
284
285	g_bioq_init(&g_bio_run_down);
286	g_bioq_init(&g_bio_run_up);
287	biozone = uma_zcreate("g_bio", sizeof (struct bio),
288	    NULL, NULL,
289	    NULL, NULL,
290	    0, 0);
291}
292
293int
294g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
295{
296	struct bio *bp;
297	int error;
298
299	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
300	bp = g_alloc_bio();
301	bp->bio_cmd = BIO_GETATTR;
302	bp->bio_done = NULL;
303	bp->bio_attribute = attr;
304	bp->bio_length = *len;
305	bp->bio_data = ptr;
306	g_io_request(bp, cp);
307	error = biowait(bp, "ggetattr");
308	*len = bp->bio_completed;
309	g_destroy_bio(bp);
310	return (error);
311}
312
313int
314g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
315{
316	struct bio *bp;
317	int error;
318
319	g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
320	bp = g_alloc_bio();
321	bp->bio_cmd = BIO_ZONE;
322	bp->bio_done = NULL;
323	/*
324	 * XXX KDM need to handle report zone data.
325	 */
326	bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
327	if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
328		bp->bio_length =
329		    zone_args->zone_params.report.entries_allocated *
330		    sizeof(struct disk_zone_rep_entry);
331	else
332		bp->bio_length = 0;
333
334	g_io_request(bp, cp);
335	error = biowait(bp, "gzone");
336	bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
337	g_destroy_bio(bp);
338	return (error);
339}
340
341int
342g_io_flush(struct g_consumer *cp)
343{
344	struct bio *bp;
345	int error;
346
347	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
348	bp = g_alloc_bio();
349	bp->bio_cmd = BIO_FLUSH;
350	bp->bio_flags |= BIO_ORDERED;
351	bp->bio_done = NULL;
352	bp->bio_attribute = NULL;
353	bp->bio_offset = cp->provider->mediasize;
354	bp->bio_length = 0;
355	bp->bio_data = NULL;
356	g_io_request(bp, cp);
357	error = biowait(bp, "gflush");
358	g_destroy_bio(bp);
359	return (error);
360}
361
362static int
363g_io_check(struct bio *bp)
364{
365	struct g_consumer *cp;
366	struct g_provider *pp;
367	off_t excess;
368	int error;
369
370	biotrack(bp, __func__);
371
372	cp = bp->bio_from;
373	pp = bp->bio_to;
374
375	/* Fail if access counters dont allow the operation */
376	switch(bp->bio_cmd) {
377	case BIO_READ:
378	case BIO_GETATTR:
379		if (cp->acr == 0)
380			return (EPERM);
381		break;
382	case BIO_WRITE:
383	case BIO_DELETE:
384	case BIO_FLUSH:
385		if (cp->acw == 0)
386			return (EPERM);
387		break;
388	case BIO_ZONE:
389		if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
390		    (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
391			if (cp->acr == 0)
392				return (EPERM);
393		} else if (cp->acw == 0)
394			return (EPERM);
395		break;
396	default:
397		return (EPERM);
398	}
399	/* if provider is marked for error, don't disturb. */
400	if (pp->error)
401		return (pp->error);
402	if (cp->flags & G_CF_ORPHAN)
403		return (ENXIO);
404
405	switch(bp->bio_cmd) {
406	case BIO_READ:
407	case BIO_WRITE:
408	case BIO_DELETE:
409		/* Zero sectorsize or mediasize is probably a lack of media. */
410		if (pp->sectorsize == 0 || pp->mediasize == 0)
411			return (ENXIO);
412		/* Reject I/O not on sector boundary */
413		if (bp->bio_offset % pp->sectorsize)
414			return (EINVAL);
415		/* Reject I/O not integral sector long */
416		if (bp->bio_length % pp->sectorsize)
417			return (EINVAL);
418		/* Reject requests before or past the end of media. */
419		if (bp->bio_offset < 0)
420			return (EIO);
421		if (bp->bio_offset > pp->mediasize)
422			return (EIO);
423
424		/* Truncate requests to the end of providers media. */
425		excess = bp->bio_offset + bp->bio_length;
426		if (excess > bp->bio_to->mediasize) {
427			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
428			    round_page(bp->bio_ma_offset +
429			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
430			    ("excess bio %p too short", bp));
431			excess -= bp->bio_to->mediasize;
432			bp->bio_length -= excess;
433			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
434				bp->bio_ma_n = round_page(bp->bio_ma_offset +
435				    bp->bio_length) / PAGE_SIZE;
436			}
437			if (excess > 0)
438				CTR3(KTR_GEOM, "g_down truncated bio "
439				    "%p provider %s by %d", bp,
440				    bp->bio_to->name, excess);
441		}
442
443		/* Deliver zero length transfers right here. */
444		if (bp->bio_length == 0) {
445			CTR2(KTR_GEOM, "g_down terminated 0-length "
446			    "bp %p provider %s", bp, bp->bio_to->name);
447			return (0);
448		}
449
450		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
451		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
452		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
453			if ((error = g_io_transient_map_bio(bp)) >= 0)
454				return (error);
455		}
456		break;
457	default:
458		break;
459	}
460	return (EJUSTRETURN);
461}
462
463/*
464 * bio classification support.
465 *
466 * g_register_classifier() and g_unregister_classifier()
467 * are used to add/remove a classifier from the list.
468 * The list is protected using the g_bio_run_down lock,
469 * because the classifiers are called in this path.
470 *
471 * g_io_request() passes bio's that are not already classified
472 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
473 * Classifiers can store their result in the two fields
474 * bio_classifier1 and bio_classifier2.
475 * A classifier that updates one of the fields should
476 * return a non-zero value.
477 * If no classifier updates the field, g_run_classifiers() sets
478 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
479 */
480
481int
482g_register_classifier(struct g_classifier_hook *hook)
483{
484
485	g_bioq_lock(&g_bio_run_down);
486	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
487	g_bioq_unlock(&g_bio_run_down);
488
489	return (0);
490}
491
492void
493g_unregister_classifier(struct g_classifier_hook *hook)
494{
495	struct g_classifier_hook *entry;
496
497	g_bioq_lock(&g_bio_run_down);
498	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
499		if (entry == hook) {
500			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
501			break;
502		}
503	}
504	g_bioq_unlock(&g_bio_run_down);
505}
506
507static void
508g_run_classifiers(struct bio *bp)
509{
510	struct g_classifier_hook *hook;
511	int classified = 0;
512
513	biotrack(bp, __func__);
514
515	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
516		classified |= hook->func(hook->arg, bp);
517
518	if (!classified)
519		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
520}
521
522void
523g_io_request(struct bio *bp, struct g_consumer *cp)
524{
525	struct g_provider *pp;
526	struct mtx *mtxp;
527	int direct, error, first;
528	uint8_t cmd;
529
530	biotrack(bp, __func__);
531
532	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
533	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
534	pp = cp->provider;
535	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
536#ifdef DIAGNOSTIC
537	KASSERT(bp->bio_driver1 == NULL,
538	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
539	KASSERT(bp->bio_driver2 == NULL,
540	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
541	KASSERT(bp->bio_pflags == 0,
542	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
543	/*
544	 * Remember consumer's private fields, so we can detect if they were
545	 * modified by the provider.
546	 */
547	bp->_bio_caller1 = bp->bio_caller1;
548	bp->_bio_caller2 = bp->bio_caller2;
549	bp->_bio_cflags = bp->bio_cflags;
550#endif
551
552	cmd = bp->bio_cmd;
553	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
554		KASSERT(bp->bio_data != NULL,
555		    ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
556	}
557	if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
558		KASSERT(bp->bio_data == NULL,
559		    ("non-NULL bp->data in g_io_request(cmd=%hu)",
560		    bp->bio_cmd));
561	}
562	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
563		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
564		    ("wrong offset %jd for sectorsize %u",
565		    bp->bio_offset, cp->provider->sectorsize));
566		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
567		    ("wrong length %jd for sectorsize %u",
568		    bp->bio_length, cp->provider->sectorsize));
569	}
570
571	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
572	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
573
574	bp->bio_from = cp;
575	bp->bio_to = pp;
576	bp->bio_error = 0;
577	bp->bio_completed = 0;
578
579	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
580	    ("Bio already on queue bp=%p", bp));
581	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
582	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
583		binuptime(&bp->bio_t0);
584	else
585		getbinuptime(&bp->bio_t0);
586
587#ifdef GET_STACK_USAGE
588	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
589	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
590	    !g_is_geom_thread(curthread) &&
591	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
592	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
593	    pace == 0;
594	if (direct) {
595		/* Block direct execution if less then half of stack left. */
596		size_t	st, su;
597		GET_STACK_USAGE(st, su);
598		if (su * 2 > st)
599			direct = 0;
600	}
601#else
602	direct = 0;
603#endif
604
605	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
606		g_bioq_lock(&g_bio_run_down);
607		g_run_classifiers(bp);
608		g_bioq_unlock(&g_bio_run_down);
609	}
610
611	/*
612	 * The statistics collection is lockless, as such, but we
613	 * can not update one instance of the statistics from more
614	 * than one thread at a time, so grab the lock first.
615	 */
616	mtxp = mtx_pool_find(mtxpool_sleep, pp);
617	mtx_lock(mtxp);
618	if (g_collectstats & G_STATS_PROVIDERS)
619		devstat_start_transaction_bio_t0(pp->stat, bp);
620	if (g_collectstats & G_STATS_CONSUMERS)
621		devstat_start_transaction_bio_t0(cp->stat, bp);
622	pp->nstart++;
623	cp->nstart++;
624	mtx_unlock(mtxp);
625
626	if (direct) {
627		error = g_io_check(bp);
628		if (error >= 0) {
629			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
630			    "provider %s returned %d", bp, bp->bio_to->name,
631			    error);
632			g_io_deliver(bp, error);
633			return;
634		}
635		bp->bio_to->geom->start(bp);
636	} else {
637		g_bioq_lock(&g_bio_run_down);
638		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
639		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
640		bp->bio_flags |= BIO_ONQUEUE;
641		g_bio_run_down.bio_queue_length++;
642		g_bioq_unlock(&g_bio_run_down);
643		/* Pass it on down. */
644		if (first)
645			wakeup(&g_wait_down);
646	}
647}
648
649void
650g_io_deliver(struct bio *bp, int error)
651{
652	struct bintime now;
653	struct g_consumer *cp;
654	struct g_provider *pp;
655	struct mtx *mtxp;
656	int direct, first;
657
658	biotrack(bp, __func__);
659
660	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
661	pp = bp->bio_to;
662	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
663	cp = bp->bio_from;
664	if (cp == NULL) {
665		bp->bio_error = error;
666		bp->bio_done(bp);
667		return;
668	}
669	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
670	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
671#ifdef DIAGNOSTIC
672	/*
673	 * Some classes - GJournal in particular - can modify bio's
674	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
675	 * flag means it's an expected behaviour for that particular geom.
676	 */
677	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
678		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
679		    ("bio_caller1 used by the provider %s", pp->name));
680		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
681		    ("bio_caller2 used by the provider %s", pp->name));
682		KASSERT(bp->bio_cflags == bp->_bio_cflags,
683		    ("bio_cflags used by the provider %s", pp->name));
684	}
685#endif
686	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
687	KASSERT(bp->bio_completed <= bp->bio_length,
688	    ("bio_completed can't be greater than bio_length"));
689
690	g_trace(G_T_BIO,
691"g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
692	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
693	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
694
695	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
696	    ("Bio already on queue bp=%p", bp));
697
698	/*
699	 * XXX: next two doesn't belong here
700	 */
701	bp->bio_bcount = bp->bio_length;
702	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
703
704#ifdef GET_STACK_USAGE
705	direct = (pp->flags & G_PF_DIRECT_SEND) &&
706		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
707		 !g_is_geom_thread(curthread);
708	if (direct) {
709		/* Block direct execution if less then half of stack left. */
710		size_t	st, su;
711		GET_STACK_USAGE(st, su);
712		if (su * 2 > st)
713			direct = 0;
714	}
715#else
716	direct = 0;
717#endif
718
719	/*
720	 * The statistics collection is lockless, as such, but we
721	 * can not update one instance of the statistics from more
722	 * than one thread at a time, so grab the lock first.
723	 */
724	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
725	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
726		binuptime(&now);
727	mtxp = mtx_pool_find(mtxpool_sleep, cp);
728	mtx_lock(mtxp);
729	if (g_collectstats & G_STATS_PROVIDERS)
730		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
731	if (g_collectstats & G_STATS_CONSUMERS)
732		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
733	cp->nend++;
734	pp->nend++;
735	mtx_unlock(mtxp);
736
737	if (error != ENOMEM) {
738		bp->bio_error = error;
739		if (direct) {
740			biodone(bp);
741		} else {
742			g_bioq_lock(&g_bio_run_up);
743			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
744			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
745			bp->bio_flags |= BIO_ONQUEUE;
746			g_bio_run_up.bio_queue_length++;
747			g_bioq_unlock(&g_bio_run_up);
748			if (first)
749				wakeup(&g_wait_up);
750		}
751		return;
752	}
753
754	if (bootverbose)
755		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
756	bp->bio_children = 0;
757	bp->bio_inbed = 0;
758	bp->bio_driver1 = NULL;
759	bp->bio_driver2 = NULL;
760	bp->bio_pflags = 0;
761	g_io_request(bp, cp);
762	pace = 1;
763	return;
764}
765
766SYSCTL_DECL(_kern_geom);
767
768static long transient_maps;
769SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
770    &transient_maps, 0,
771    "Total count of the transient mapping requests");
772u_int transient_map_retries = 10;
773SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
774    &transient_map_retries, 0,
775    "Max count of retries used before giving up on creating transient map");
776int transient_map_hard_failures;
777SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
778    &transient_map_hard_failures, 0,
779    "Failures to establish the transient mapping due to retry attempts "
780    "exhausted");
781int transient_map_soft_failures;
782SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
783    &transient_map_soft_failures, 0,
784    "Count of retried failures to establish the transient mapping");
785int inflight_transient_maps;
786SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
787    &inflight_transient_maps, 0,
788    "Current count of the active transient maps");
789
790static int
791g_io_transient_map_bio(struct bio *bp)
792{
793	vm_offset_t addr;
794	long size;
795	u_int retried;
796
797	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
798
799	size = round_page(bp->bio_ma_offset + bp->bio_length);
800	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
801	addr = 0;
802	retried = 0;
803	atomic_add_long(&transient_maps, 1);
804retry:
805	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
806		if (transient_map_retries != 0 &&
807		    retried >= transient_map_retries) {
808			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
809			    bp, bp->bio_to->name);
810			atomic_add_int(&transient_map_hard_failures, 1);
811			return (EDEADLK/* XXXKIB */);
812		} else {
813			/*
814			 * Naive attempt to quisce the I/O to get more
815			 * in-flight requests completed and defragment
816			 * the transient_arena.
817			 */
818			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
819			    bp, bp->bio_to->name, retried);
820			pause("g_d_tra", hz / 10);
821			retried++;
822			atomic_add_int(&transient_map_soft_failures, 1);
823			goto retry;
824		}
825	}
826	atomic_add_int(&inflight_transient_maps, 1);
827	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
828	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
829	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
830	bp->bio_flags &= ~BIO_UNMAPPED;
831	return (EJUSTRETURN);
832}
833
834void
835g_io_schedule_down(struct thread *tp __unused)
836{
837	struct bio *bp;
838	int error;
839
840	for(;;) {
841		g_bioq_lock(&g_bio_run_down);
842		bp = g_bioq_first(&g_bio_run_down);
843		if (bp == NULL) {
844			CTR0(KTR_GEOM, "g_down going to sleep");
845			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
846			    PRIBIO | PDROP, "-", 0);
847			continue;
848		}
849		CTR0(KTR_GEOM, "g_down has work to do");
850		g_bioq_unlock(&g_bio_run_down);
851		biotrack(bp, __func__);
852		if (pace != 0) {
853			/*
854			 * There has been at least one memory allocation
855			 * failure since the last I/O completed. Pause 1ms to
856			 * give the system a chance to free up memory. We only
857			 * do this once because a large number of allocations
858			 * can fail in the direct dispatch case and there's no
859			 * relationship between the number of these failures and
860			 * the length of the outage. If there's still an outage,
861			 * we'll pause again and again until it's
862			 * resolved. Older versions paused longer and once per
863			 * allocation failure. This was OK for a single threaded
864			 * g_down, but with direct dispatch would lead to max of
865			 * 10 IOPs for minutes at a time when transient memory
866			 * issues prevented allocation for a batch of requests
867			 * from the upper layers.
868			 *
869			 * XXX This pacing is really lame. It needs to be solved
870			 * by other methods. This is OK only because the worst
871			 * case scenario is so rare. In the worst case scenario
872			 * all memory is tied up waiting for I/O to complete
873			 * which can never happen since we can't allocate bios
874			 * for that I/O.
875			 */
876			CTR0(KTR_GEOM, "g_down pacing self");
877			pause("g_down", min(hz/1000, 1));
878			pace = 0;
879		}
880		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
881		    bp->bio_to->name);
882		error = g_io_check(bp);
883		if (error >= 0) {
884			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
885			    "%s returned %d", bp, bp->bio_to->name, error);
886			g_io_deliver(bp, error);
887			continue;
888		}
889		THREAD_NO_SLEEPING();
890		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
891		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
892		    bp->bio_length);
893		bp->bio_to->geom->start(bp);
894		THREAD_SLEEPING_OK();
895	}
896}
897
898void
899g_io_schedule_up(struct thread *tp __unused)
900{
901	struct bio *bp;
902
903	for(;;) {
904		g_bioq_lock(&g_bio_run_up);
905		bp = g_bioq_first(&g_bio_run_up);
906		if (bp == NULL) {
907			CTR0(KTR_GEOM, "g_up going to sleep");
908			msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
909			    PRIBIO | PDROP, "-", 0);
910			continue;
911		}
912		g_bioq_unlock(&g_bio_run_up);
913		THREAD_NO_SLEEPING();
914		CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
915		    "%jd len %ld", bp, bp->bio_to->name,
916		    bp->bio_offset, bp->bio_length);
917		biodone(bp);
918		THREAD_SLEEPING_OK();
919	}
920}
921
922void *
923g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
924{
925	struct bio *bp;
926	void *ptr;
927	int errorc;
928
929	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
930	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
931	    (intmax_t)length));
932
933	bp = g_alloc_bio();
934	bp->bio_cmd = BIO_READ;
935	bp->bio_done = NULL;
936	bp->bio_offset = offset;
937	bp->bio_length = length;
938	ptr = g_malloc(length, M_WAITOK);
939	bp->bio_data = ptr;
940	g_io_request(bp, cp);
941	errorc = biowait(bp, "gread");
942	if (error != NULL)
943		*error = errorc;
944	g_destroy_bio(bp);
945	if (errorc) {
946		g_free(ptr);
947		ptr = NULL;
948	}
949	return (ptr);
950}
951
952/*
953 * A read function for use by ffs_sbget when used by GEOM-layer routines.
954 */
955int
956g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
957{
958	struct g_consumer *cp;
959
960	KASSERT(*bufp == NULL,
961	    ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
962
963	cp = (struct g_consumer *)devfd;
964	/*
965	 * Take care not to issue an invalid I/O request. The offset of
966	 * the superblock candidate must be multiples of the provider's
967	 * sector size, otherwise an FFS can't exist on the provider
968	 * anyway.
969	 */
970	if (loc % cp->provider->sectorsize != 0)
971		return (ENOENT);
972	*bufp = g_read_data(cp, loc, size, NULL);
973	if (*bufp == NULL)
974		return (ENOENT);
975	return (0);
976}
977
978int
979g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
980{
981	struct bio *bp;
982	int error;
983
984	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
985	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
986	    (intmax_t)length));
987
988	bp = g_alloc_bio();
989	bp->bio_cmd = BIO_WRITE;
990	bp->bio_done = NULL;
991	bp->bio_offset = offset;
992	bp->bio_length = length;
993	bp->bio_data = ptr;
994	g_io_request(bp, cp);
995	error = biowait(bp, "gwrite");
996	g_destroy_bio(bp);
997	return (error);
998}
999
1000/*
1001 * A write function for use by ffs_sbput when used by GEOM-layer routines.
1002 */
1003int
1004g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
1005{
1006
1007	return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
1008}
1009
1010int
1011g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
1012{
1013	struct bio *bp;
1014	int error;
1015
1016	KASSERT(length > 0 && length >= cp->provider->sectorsize,
1017	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
1018
1019	bp = g_alloc_bio();
1020	bp->bio_cmd = BIO_DELETE;
1021	bp->bio_done = NULL;
1022	bp->bio_offset = offset;
1023	bp->bio_length = length;
1024	bp->bio_data = NULL;
1025	g_io_request(bp, cp);
1026	error = biowait(bp, "gdelete");
1027	g_destroy_bio(bp);
1028	return (error);
1029}
1030
1031void
1032g_print_bio(struct bio *bp)
1033{
1034	const char *pname, *cmd = NULL;
1035
1036	if (bp->bio_to != NULL)
1037		pname = bp->bio_to->name;
1038	else
1039		pname = "[unknown]";
1040
1041	switch (bp->bio_cmd) {
1042	case BIO_GETATTR:
1043		cmd = "GETATTR";
1044		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
1045		return;
1046	case BIO_FLUSH:
1047		cmd = "FLUSH";
1048		printf("%s[%s]", pname, cmd);
1049		return;
1050	case BIO_ZONE: {
1051		char *subcmd = NULL;
1052		cmd = "ZONE";
1053		switch (bp->bio_zone.zone_cmd) {
1054		case DISK_ZONE_OPEN:
1055			subcmd = "OPEN";
1056			break;
1057		case DISK_ZONE_CLOSE:
1058			subcmd = "CLOSE";
1059			break;
1060		case DISK_ZONE_FINISH:
1061			subcmd = "FINISH";
1062			break;
1063		case DISK_ZONE_RWP:
1064			subcmd = "RWP";
1065			break;
1066		case DISK_ZONE_REPORT_ZONES:
1067			subcmd = "REPORT ZONES";
1068			break;
1069		case DISK_ZONE_GET_PARAMS:
1070			subcmd = "GET PARAMS";
1071			break;
1072		default:
1073			subcmd = "UNKNOWN";
1074			break;
1075		}
1076		printf("%s[%s,%s]", pname, cmd, subcmd);
1077		return;
1078	}
1079	case BIO_READ:
1080		cmd = "READ";
1081		break;
1082	case BIO_WRITE:
1083		cmd = "WRITE";
1084		break;
1085	case BIO_DELETE:
1086		cmd = "DELETE";
1087		break;
1088	default:
1089		cmd = "UNKNOWN";
1090		printf("%s[%s()]", pname, cmd);
1091		return;
1092	}
1093	printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1094	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1095}
1096