vfs_bio.c revision 1.295
1/*	$NetBSD: vfs_bio.c,v 1.295 2020/04/27 07:51:02 jdolecek Exp $	*/
2
3/*-
4 * Copyright (c) 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*-
33 * Copyright (c) 1982, 1986, 1989, 1993
34 *	The Regents of the University of California.  All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 *    may be used to endorse or promote products derived from this software
51 *    without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
66 */
67
68/*-
69 * Copyright (c) 1994 Christopher G. Demetriou
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions
73 * are met:
74 * 1. Redistributions of source code must retain the above copyright
75 *    notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce the above copyright
77 *    notice, this list of conditions and the following disclaimer in the
78 *    documentation and/or other materials provided with the distribution.
79 * 3. All advertising materials mentioning features or use of this software
80 *    must display the following acknowledgement:
81 *	This product includes software developed by the University of
82 *	California, Berkeley and its contributors.
83 * 4. Neither the name of the University nor the names of its contributors
84 *    may be used to endorse or promote products derived from this software
85 *    without specific prior written permission.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
88 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
89 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
90 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
91 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
92 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
93 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
94 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
95 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
96 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
97 * SUCH DAMAGE.
98 *
99 *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
100 */
101
102/*
103 * The buffer cache subsystem.
104 *
105 * Some references:
106 *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
107 *	Leffler, et al.: The Design and Implementation of the 4.3BSD
108 *		UNIX Operating System (Addison Welley, 1989)
109 *
110 * Locking
111 *
112 * There are three locks:
113 * - bufcache_lock: protects global buffer cache state.
114 * - BC_BUSY: a long term per-buffer lock.
115 * - buf_t::b_objlock: lock on completion (biowait vs biodone).
116 *
117 * For buffers associated with vnodes (a most common case) b_objlock points
118 * to the vnode_t::v_interlock.  Otherwise, it points to generic buffer_lock.
119 *
120 * Lock order:
121 *	bufcache_lock ->
122 *		buf_t::b_objlock
123 */
124
125#include <sys/cdefs.h>
126__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.295 2020/04/27 07:51:02 jdolecek Exp $");
127
128#ifdef _KERNEL_OPT
129#include "opt_bufcache.h"
130#include "opt_dtrace.h"
131#include "opt_biohist.h"
132#endif
133
134#include <sys/param.h>
135#include <sys/systm.h>
136#include <sys/kernel.h>
137#include <sys/proc.h>
138#include <sys/buf.h>
139#include <sys/vnode.h>
140#include <sys/mount.h>
141#include <sys/resourcevar.h>
142#include <sys/sysctl.h>
143#include <sys/conf.h>
144#include <sys/kauth.h>
145#include <sys/fstrans.h>
146#include <sys/intr.h>
147#include <sys/cpu.h>
148#include <sys/wapbl.h>
149#include <sys/bitops.h>
150#include <sys/cprng.h>
151#include <sys/sdt.h>
152
153#include <uvm/uvm.h>	/* extern struct uvm uvm */
154
155#include <miscfs/specfs/specdev.h>
156
157SDT_PROVIDER_DEFINE(io);
158
159SDT_PROBE_DEFINE4(io, kernel, , bbusy__start,
160    "struct buf *"/*bp*/,
161    "bool"/*intr*/, "int"/*timo*/, "kmutex_t *"/*interlock*/);
162SDT_PROBE_DEFINE5(io, kernel, , bbusy__done,
163    "struct buf *"/*bp*/,
164    "bool"/*intr*/,
165    "int"/*timo*/,
166    "kmutex_t *"/*interlock*/,
167    "int"/*error*/);
168SDT_PROBE_DEFINE0(io, kernel, , getnewbuf__start);
169SDT_PROBE_DEFINE1(io, kernel, , getnewbuf__done,  "struct buf *"/*bp*/);
170SDT_PROBE_DEFINE3(io, kernel, , getblk__start,
171    "struct vnode *"/*vp*/, "daddr_t"/*blkno*/, "int"/*size*/);
172SDT_PROBE_DEFINE4(io, kernel, , getblk__done,
173    "struct vnode *"/*vp*/, "daddr_t"/*blkno*/, "int"/*size*/,
174    "struct buf *"/*bp*/);
175SDT_PROBE_DEFINE2(io, kernel, , brelse, "struct buf *"/*bp*/, "int"/*set*/);
176SDT_PROBE_DEFINE1(io, kernel, , wait__start, "struct buf *"/*bp*/);
177SDT_PROBE_DEFINE1(io, kernel, , wait__done, "struct buf *"/*bp*/);
178
179#ifndef	BUFPAGES
180# define BUFPAGES 0
181#endif
182
183#ifdef BUFCACHE
184# if (BUFCACHE < 5) || (BUFCACHE > 95)
185#  error BUFCACHE is not between 5 and 95
186# endif
187#else
188# define BUFCACHE 15
189#endif
190
191u_int	nbuf;			/* desired number of buffer headers */
192u_int	bufpages = BUFPAGES;	/* optional hardwired count */
193u_int	bufcache = BUFCACHE;	/* max % of RAM to use for buffer cache */
194
195/*
196 * Definitions for the buffer free lists.
197 */
198#define	BQUEUES		3		/* number of free buffer queues */
199
200#define	BQ_LOCKED	0		/* super-blocks &c */
201#define	BQ_LRU		1		/* lru, useful buffers */
202#define	BQ_AGE		2		/* rubbish */
203
204struct bqueue {
205	TAILQ_HEAD(, buf) bq_queue;
206	uint64_t bq_bytes;
207	buf_t *bq_marker;
208};
209static struct bqueue bufqueues[BQUEUES] __cacheline_aligned;
210
211/* Function prototypes */
212static void buf_setwm(void);
213static int buf_trim(void);
214static void *bufpool_page_alloc(struct pool *, int);
215static void bufpool_page_free(struct pool *, void *);
216static buf_t *bio_doread(struct vnode *, daddr_t, int, int);
217static buf_t *getnewbuf(int, int, int);
218static int buf_lotsfree(void);
219static int buf_canrelease(void);
220static u_long buf_mempoolidx(u_long);
221static u_long buf_roundsize(u_long);
222static void *buf_alloc(size_t);
223static void buf_mrelease(void *, size_t);
224static void binsheadfree(buf_t *, struct bqueue *);
225static void binstailfree(buf_t *, struct bqueue *);
226#ifdef DEBUG
227static int checkfreelist(buf_t *, struct bqueue *, int);
228#endif
229static void biointr(void *);
230static void biodone2(buf_t *);
231static void sysctl_kern_buf_setup(void);
232static void sysctl_vm_buf_setup(void);
233
234/* Initialization for biohist */
235
236#include <sys/biohist.h>
237
238BIOHIST_DEFINE(biohist);
239
240void
241biohist_init(void)
242{
243
244	BIOHIST_INIT(biohist, BIOHIST_SIZE);
245}
246
247/*
248 * Definitions for the buffer hash lists.
249 */
250#define	BUFHASH(dvp, lbn)	\
251	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
252LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
253u_long	bufhash;
254
255static kcondvar_t needbuffer_cv;
256
257/*
258 * Buffer queue lock.
259 */
260kmutex_t bufcache_lock __cacheline_aligned;
261kmutex_t buffer_lock __cacheline_aligned;
262
263/* Software ISR for completed transfers. */
264static void *biodone_sih;
265
266/* Buffer pool for I/O buffers. */
267static pool_cache_t buf_cache;
268static pool_cache_t bufio_cache;
269
270#define MEMPOOL_INDEX_OFFSET (ilog2(DEV_BSIZE))	/* smallest pool is 512 bytes */
271#define NMEMPOOLS (ilog2(MAXBSIZE) - MEMPOOL_INDEX_OFFSET + 1)
272__CTASSERT((1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) == MAXBSIZE);
273
274/* Buffer memory pools */
275static struct pool bmempools[NMEMPOOLS];
276
277static struct vm_map *buf_map;
278
279/*
280 * Buffer memory pool allocator.
281 */
282static void *
283bufpool_page_alloc(struct pool *pp, int flags)
284{
285
286	return (void *)uvm_km_alloc(buf_map,
287	    MAXBSIZE, MAXBSIZE,
288	    ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT|UVM_KMF_TRYLOCK)
289	    | UVM_KMF_WIRED);
290}
291
292static void
293bufpool_page_free(struct pool *pp, void *v)
294{
295
296	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
297}
298
299static struct pool_allocator bufmempool_allocator = {
300	.pa_alloc = bufpool_page_alloc,
301	.pa_free = bufpool_page_free,
302	.pa_pagesz = MAXBSIZE,
303};
304
305/* Buffer memory management variables */
306u_long bufmem_valimit;
307u_long bufmem_hiwater;
308u_long bufmem_lowater;
309u_long bufmem;
310
311/*
312 * MD code can call this to set a hard limit on the amount
313 * of virtual memory used by the buffer cache.
314 */
315int
316buf_setvalimit(vsize_t sz)
317{
318
319	/* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
320	if (sz < NMEMPOOLS * MAXBSIZE)
321		return EINVAL;
322
323	bufmem_valimit = sz;
324	return 0;
325}
326
327static void
328buf_setwm(void)
329{
330
331	bufmem_hiwater = buf_memcalc();
332	/* lowater is approx. 2% of memory (with bufcache = 15) */
333#define	BUFMEM_WMSHIFT	3
334#define	BUFMEM_HIWMMIN	(64 * 1024 << BUFMEM_WMSHIFT)
335	if (bufmem_hiwater < BUFMEM_HIWMMIN)
336		/* Ensure a reasonable minimum value */
337		bufmem_hiwater = BUFMEM_HIWMMIN;
338	bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
339}
340
341#ifdef DEBUG
342int debug_verify_freelist = 0;
343static int
344checkfreelist(buf_t *bp, struct bqueue *dp, int ison)
345{
346	buf_t *b;
347
348	if (!debug_verify_freelist)
349		return 1;
350
351	TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
352		if (b == bp)
353			return ison ? 1 : 0;
354	}
355
356	return ison ? 0 : 1;
357}
358#endif
359
360/*
361 * Insq/Remq for the buffer hash lists.
362 * Call with buffer queue locked.
363 */
364static void
365binsheadfree(buf_t *bp, struct bqueue *dp)
366{
367
368	KASSERT(mutex_owned(&bufcache_lock));
369	KASSERT(bp->b_freelistindex == -1);
370	TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
371	dp->bq_bytes += bp->b_bufsize;
372	bp->b_freelistindex = dp - bufqueues;
373}
374
375static void
376binstailfree(buf_t *bp, struct bqueue *dp)
377{
378
379	KASSERT(mutex_owned(&bufcache_lock));
380	KASSERTMSG(bp->b_freelistindex == -1, "double free of buffer? "
381	    "bp=%p, b_freelistindex=%d\n", bp, bp->b_freelistindex);
382	TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
383	dp->bq_bytes += bp->b_bufsize;
384	bp->b_freelistindex = dp - bufqueues;
385}
386
387void
388bremfree(buf_t *bp)
389{
390	struct bqueue *dp;
391	int bqidx = bp->b_freelistindex;
392
393	KASSERT(mutex_owned(&bufcache_lock));
394
395	KASSERT(bqidx != -1);
396	dp = &bufqueues[bqidx];
397	KDASSERT(checkfreelist(bp, dp, 1));
398	KASSERT(dp->bq_bytes >= bp->b_bufsize);
399	TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
400	dp->bq_bytes -= bp->b_bufsize;
401
402	/* For the sysctl helper. */
403	if (bp == dp->bq_marker)
404		dp->bq_marker = NULL;
405
406#if defined(DIAGNOSTIC)
407	bp->b_freelistindex = -1;
408#endif /* defined(DIAGNOSTIC) */
409}
410
411/*
412 * note that for some ports this is used by pmap bootstrap code to
413 * determine kva size.
414 */
415u_long
416buf_memcalc(void)
417{
418	u_long n;
419	vsize_t mapsz = 0;
420
421	/*
422	 * Determine the upper bound of memory to use for buffers.
423	 *
424	 *	- If bufpages is specified, use that as the number
425	 *	  pages.
426	 *
427	 *	- Otherwise, use bufcache as the percentage of
428	 *	  physical memory.
429	 */
430	if (bufpages != 0) {
431		n = bufpages;
432	} else {
433		if (bufcache < 5) {
434			printf("forcing bufcache %d -> 5", bufcache);
435			bufcache = 5;
436		}
437		if (bufcache > 95) {
438			printf("forcing bufcache %d -> 95", bufcache);
439			bufcache = 95;
440		}
441		if (buf_map != NULL)
442			mapsz = vm_map_max(buf_map) - vm_map_min(buf_map);
443		n = calc_cache_size(mapsz, bufcache,
444		    (buf_map != kernel_map) ? 100 : BUFCACHE_VA_MAXPCT)
445		    / PAGE_SIZE;
446	}
447
448	n <<= PAGE_SHIFT;
449	if (bufmem_valimit != 0 && n > bufmem_valimit)
450		n = bufmem_valimit;
451
452	return (n);
453}
454
455/*
456 * Initialize buffers and hash links for buffers.
457 */
458void
459bufinit(void)
460{
461	struct bqueue *dp;
462	int use_std;
463	u_int i;
464
465	biodone_vfs = biodone;
466
467	mutex_init(&bufcache_lock, MUTEX_DEFAULT, IPL_NONE);
468	mutex_init(&buffer_lock, MUTEX_DEFAULT, IPL_NONE);
469	cv_init(&needbuffer_cv, "needbuf");
470
471	if (bufmem_valimit != 0) {
472		vaddr_t minaddr = 0, maxaddr;
473		buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
474					  bufmem_valimit, 0, false, 0);
475		if (buf_map == NULL)
476			panic("bufinit: cannot allocate submap");
477	} else
478		buf_map = kernel_map;
479
480	/*
481	 * Initialize buffer cache memory parameters.
482	 */
483	bufmem = 0;
484	buf_setwm();
485
486	/* On "small" machines use small pool page sizes where possible */
487	use_std = (physmem < atop(16*1024*1024));
488
489	/*
490	 * Also use them on systems that can map the pool pages using
491	 * a direct-mapped segment.
492	 */
493#ifdef PMAP_MAP_POOLPAGE
494	use_std = 1;
495#endif
496
497	buf_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0,
498	    "bufpl", NULL, IPL_SOFTBIO, NULL, NULL, NULL);
499	bufio_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0,
500	    "biopl", NULL, IPL_BIO, NULL, NULL, NULL);
501
502	for (i = 0; i < NMEMPOOLS; i++) {
503		struct pool_allocator *pa;
504		struct pool *pp = &bmempools[i];
505		u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
506		char *name = kmem_alloc(8, KM_SLEEP); /* XXX: never freed */
507		if (__predict_false(size >= 1048576))
508			(void)snprintf(name, 8, "buf%um", size / 1048576);
509		else if (__predict_true(size >= 1024))
510			(void)snprintf(name, 8, "buf%uk", size / 1024);
511		else
512			(void)snprintf(name, 8, "buf%ub", size);
513		pa = (size <= PAGE_SIZE && use_std)
514			? &pool_allocator_nointr
515			: &bufmempool_allocator;
516		pool_init(pp, size, DEV_BSIZE, 0, 0, name, pa, IPL_NONE);
517		pool_setlowat(pp, 1);
518		pool_sethiwat(pp, 1);
519	}
520
521	/* Initialize the buffer queues */
522	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
523		TAILQ_INIT(&dp->bq_queue);
524		dp->bq_bytes = 0;
525	}
526
527	/*
528	 * Estimate hash table size based on the amount of memory we
529	 * intend to use for the buffer cache. The average buffer
530	 * size is dependent on our clients (i.e. filesystems).
531	 *
532	 * For now, use an empirical 3K per buffer.
533	 */
534	nbuf = (bufmem_hiwater / 1024) / 3;
535	bufhashtbl = hashinit(nbuf, HASH_LIST, true, &bufhash);
536
537	sysctl_kern_buf_setup();
538	sysctl_vm_buf_setup();
539}
540
541void
542bufinit2(void)
543{
544
545	biodone_sih = softint_establish(SOFTINT_BIO | SOFTINT_MPSAFE, biointr,
546	    NULL);
547	if (biodone_sih == NULL)
548		panic("bufinit2: can't establish soft interrupt");
549}
550
551static int
552buf_lotsfree(void)
553{
554	u_long guess;
555
556	/* Always allocate if less than the low water mark. */
557	if (bufmem < bufmem_lowater)
558		return 1;
559
560	/* Never allocate if greater than the high water mark. */
561	if (bufmem > bufmem_hiwater)
562		return 0;
563
564	/* If there's anything on the AGE list, it should be eaten. */
565	if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
566		return 0;
567
568	/*
569	 * The probabily of getting a new allocation is inversely
570	 * proportional  to the current size of the cache above
571	 * the low water mark.  Divide the total first to avoid overflows
572	 * in the product.
573	 */
574	guess = cprng_fast32() % 16;
575
576	if ((bufmem_hiwater - bufmem_lowater) / 16 * guess >=
577	    (bufmem - bufmem_lowater))
578		return 1;
579
580	/* Otherwise don't allocate. */
581	return 0;
582}
583
584/*
585 * Return estimate of bytes we think need to be
586 * released to help resolve low memory conditions.
587 *
588 * => called with bufcache_lock held.
589 */
590static int
591buf_canrelease(void)
592{
593	int pagedemand, ninvalid = 0;
594
595	KASSERT(mutex_owned(&bufcache_lock));
596
597	if (bufmem < bufmem_lowater)
598		return 0;
599
600	if (bufmem > bufmem_hiwater)
601		return bufmem - bufmem_hiwater;
602
603	ninvalid += bufqueues[BQ_AGE].bq_bytes;
604
605	pagedemand = uvmexp.freetarg - uvm_availmem();
606	if (pagedemand < 0)
607		return ninvalid;
608	return MAX(ninvalid, MIN(2 * MAXBSIZE,
609	    MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
610}
611
612/*
613 * Buffer memory allocation helper functions
614 */
615static u_long
616buf_mempoolidx(u_long size)
617{
618	u_int n = 0;
619
620	size -= 1;
621	size >>= MEMPOOL_INDEX_OFFSET;
622	while (size) {
623		size >>= 1;
624		n += 1;
625	}
626	if (n >= NMEMPOOLS)
627		panic("buf mem pool index %d", n);
628	return n;
629}
630
631static u_long
632buf_roundsize(u_long size)
633{
634	/* Round up to nearest power of 2 */
635	return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
636}
637
638static void *
639buf_alloc(size_t size)
640{
641	u_int n = buf_mempoolidx(size);
642	void *addr;
643
644	while (1) {
645		addr = pool_get(&bmempools[n], PR_NOWAIT);
646		if (addr != NULL)
647			break;
648
649		/* No memory, see if we can free some. If so, try again */
650		mutex_enter(&bufcache_lock);
651		if (buf_drain(1) > 0) {
652			mutex_exit(&bufcache_lock);
653			continue;
654		}
655
656		if (curlwp == uvm.pagedaemon_lwp) {
657			mutex_exit(&bufcache_lock);
658			return NULL;
659		}
660
661		/* Wait for buffers to arrive on the LRU queue */
662		cv_timedwait(&needbuffer_cv, &bufcache_lock, hz / 4);
663		mutex_exit(&bufcache_lock);
664	}
665
666	return addr;
667}
668
669static void
670buf_mrelease(void *addr, size_t size)
671{
672
673	pool_put(&bmempools[buf_mempoolidx(size)], addr);
674}
675
676/*
677 * bread()/breadn() helper.
678 */
679static buf_t *
680bio_doread(struct vnode *vp, daddr_t blkno, int size, int async)
681{
682	buf_t *bp;
683	struct mount *mp;
684
685	bp = getblk(vp, blkno, size, 0, 0);
686
687	/*
688	 * getblk() may return NULL if we are the pagedaemon.
689	 */
690	if (bp == NULL) {
691		KASSERT(curlwp == uvm.pagedaemon_lwp);
692		return NULL;
693	}
694
695	/*
696	 * If buffer does not have data valid, start a read.
697	 * Note that if buffer is BC_INVAL, getblk() won't return it.
698	 * Therefore, it's valid if its I/O has completed or been delayed.
699	 */
700	if (!ISSET(bp->b_oflags, (BO_DONE | BO_DELWRI))) {
701		/* Start I/O for the buffer. */
702		SET(bp->b_flags, B_READ | async);
703		if (async)
704			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
705		else
706			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
707		VOP_STRATEGY(vp, bp);
708
709		/* Pay for the read. */
710		curlwp->l_ru.ru_inblock++;
711	} else if (async)
712		brelse(bp, 0);
713
714	if (vp->v_type == VBLK)
715		mp = spec_node_getmountedfs(vp);
716	else
717		mp = vp->v_mount;
718
719	/*
720	 * Collect statistics on synchronous and asynchronous reads.
721	 * Reads from block devices are charged to their associated
722	 * filesystem (if any).
723	 */
724	if (mp != NULL) {
725		if (async == 0)
726			mp->mnt_stat.f_syncreads++;
727		else
728			mp->mnt_stat.f_asyncreads++;
729	}
730
731	return (bp);
732}
733
734/*
735 * Read a disk block.
736 * This algorithm described in Bach (p.54).
737 */
738int
739bread(struct vnode *vp, daddr_t blkno, int size, int flags, buf_t **bpp)
740{
741	buf_t *bp;
742	int error;
743
744	BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist);
745
746	/* Get buffer for block. */
747	bp = *bpp = bio_doread(vp, blkno, size, 0);
748	if (bp == NULL)
749		return ENOMEM;
750
751	/* Wait for the read to complete, and return result. */
752	error = biowait(bp);
753	if (error == 0 && (flags & B_MODIFY) != 0)
754		error = fscow_run(bp, true);
755	if (error) {
756		brelse(bp, 0);
757		*bpp = NULL;
758	}
759
760	return error;
761}
762
763/*
764 * Read-ahead multiple disk blocks. The first is sync, the rest async.
765 * Trivial modification to the breada algorithm presented in Bach (p.55).
766 */
767int
768breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
769    int *rasizes, int nrablks, int flags, buf_t **bpp)
770{
771	buf_t *bp;
772	int error, i;
773
774	BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist);
775
776	bp = *bpp = bio_doread(vp, blkno, size, 0);
777	if (bp == NULL)
778		return ENOMEM;
779
780	/*
781	 * For each of the read-ahead blocks, start a read, if necessary.
782	 */
783	mutex_enter(&bufcache_lock);
784	for (i = 0; i < nrablks; i++) {
785		/* If it's in the cache, just go on to next one. */
786		if (incore(vp, rablks[i]))
787			continue;
788
789		/* Get a buffer for the read-ahead block */
790		mutex_exit(&bufcache_lock);
791		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
792		mutex_enter(&bufcache_lock);
793	}
794	mutex_exit(&bufcache_lock);
795
796	/* Otherwise, we had to start a read for it; wait until it's valid. */
797	error = biowait(bp);
798	if (error == 0 && (flags & B_MODIFY) != 0)
799		error = fscow_run(bp, true);
800	if (error) {
801		brelse(bp, 0);
802		*bpp = NULL;
803	}
804
805	return error;
806}
807
808/*
809 * Block write.  Described in Bach (p.56)
810 */
811int
812bwrite(buf_t *bp)
813{
814	int rv, sync, wasdelayed;
815	struct vnode *vp;
816	struct mount *mp;
817
818	BIOHIST_FUNC(__func__); BIOHIST_CALLARGS(biohist, "bp=%#jx",
819	    (uintptr_t)bp, 0, 0, 0);
820
821	KASSERT(ISSET(bp->b_cflags, BC_BUSY));
822	KASSERT(!cv_has_waiters(&bp->b_done));
823
824	vp = bp->b_vp;
825
826	/*
827	 * dholland 20160728 AFAICT vp==NULL must be impossible as it
828	 * will crash upon reaching VOP_STRATEGY below... see further
829	 * analysis on tech-kern.
830	 */
831	KASSERTMSG(vp != NULL, "bwrite given buffer with null vnode");
832
833	if (vp != NULL) {
834		KASSERT(bp->b_objlock == vp->v_interlock);
835		if (vp->v_type == VBLK)
836			mp = spec_node_getmountedfs(vp);
837		else
838			mp = vp->v_mount;
839	} else {
840		mp = NULL;
841	}
842
843	if (mp && mp->mnt_wapbl) {
844		if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) {
845			bdwrite(bp);
846			return 0;
847		}
848	}
849
850	/*
851	 * Remember buffer type, to switch on it later.  If the write was
852	 * synchronous, but the file system was mounted with MNT_ASYNC,
853	 * convert it to a delayed write.
854	 * XXX note that this relies on delayed tape writes being converted
855	 * to async, not sync writes (which is safe, but ugly).
856	 */
857	sync = !ISSET(bp->b_flags, B_ASYNC);
858	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
859		bdwrite(bp);
860		return (0);
861	}
862
863	/*
864	 * Collect statistics on synchronous and asynchronous writes.
865	 * Writes to block devices are charged to their associated
866	 * filesystem (if any).
867	 */
868	if (mp != NULL) {
869		if (sync)
870			mp->mnt_stat.f_syncwrites++;
871		else
872			mp->mnt_stat.f_asyncwrites++;
873	}
874
875	/*
876	 * Pay for the I/O operation and make sure the buf is on the correct
877	 * vnode queue.
878	 */
879	bp->b_error = 0;
880	wasdelayed = ISSET(bp->b_oflags, BO_DELWRI);
881	CLR(bp->b_flags, B_READ);
882	if (wasdelayed) {
883		mutex_enter(&bufcache_lock);
884		mutex_enter(bp->b_objlock);
885		CLR(bp->b_oflags, BO_DONE | BO_DELWRI);
886		reassignbuf(bp, bp->b_vp);
887		/* Wake anyone trying to busy the buffer via vnode's lists. */
888		cv_broadcast(&bp->b_busy);
889		mutex_exit(&bufcache_lock);
890	} else {
891		curlwp->l_ru.ru_oublock++;
892		mutex_enter(bp->b_objlock);
893		CLR(bp->b_oflags, BO_DONE | BO_DELWRI);
894	}
895	if (vp != NULL)
896		vp->v_numoutput++;
897	mutex_exit(bp->b_objlock);
898
899	/* Initiate disk write. */
900	if (sync)
901		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
902	else
903		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
904
905	VOP_STRATEGY(vp, bp);
906
907	if (sync) {
908		/* If I/O was synchronous, wait for it to complete. */
909		rv = biowait(bp);
910
911		/* Release the buffer. */
912		brelse(bp, 0);
913
914		return (rv);
915	} else {
916		return (0);
917	}
918}
919
920int
921vn_bwrite(void *v)
922{
923	struct vop_bwrite_args *ap = v;
924
925	return (bwrite(ap->a_bp));
926}
927
928/*
929 * Delayed write.
930 *
931 * The buffer is marked dirty, but is not queued for I/O.
932 * This routine should be used when the buffer is expected
933 * to be modified again soon, typically a small write that
934 * partially fills a buffer.
935 *
936 * NB: magnetic tapes cannot be delayed; they must be
937 * written in the order that the writes are requested.
938 *
939 * Described in Leffler, et al. (pp. 208-213).
940 */
941void
942bdwrite(buf_t *bp)
943{
944
945	BIOHIST_FUNC(__func__); BIOHIST_CALLARGS(biohist, "bp=%#jx",
946	    (uintptr_t)bp, 0, 0, 0);
947
948	KASSERT(bp->b_vp == NULL || bp->b_vp->v_tag != VT_UFS ||
949	    bp->b_vp->v_type == VBLK || ISSET(bp->b_flags, B_COWDONE));
950	KASSERT(ISSET(bp->b_cflags, BC_BUSY));
951	KASSERT(!cv_has_waiters(&bp->b_done));
952
953	/* If this is a tape block, write the block now. */
954	if (bdev_type(bp->b_dev) == D_TAPE) {
955		bawrite(bp);
956		return;
957	}
958
959	if (wapbl_vphaswapbl(bp->b_vp)) {
960		struct mount *mp = wapbl_vptomp(bp->b_vp);
961
962		if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) {
963			WAPBL_ADD_BUF(mp, bp);
964		}
965	}
966
967	/*
968	 * If the block hasn't been seen before:
969	 *	(1) Mark it as having been seen,
970	 *	(2) Charge for the write,
971	 *	(3) Make sure it's on its vnode's correct block list.
972	 */
973	KASSERT(bp->b_vp == NULL || bp->b_objlock == bp->b_vp->v_interlock);
974
975	if (!ISSET(bp->b_oflags, BO_DELWRI)) {
976		mutex_enter(&bufcache_lock);
977		mutex_enter(bp->b_objlock);
978		SET(bp->b_oflags, BO_DELWRI);
979		curlwp->l_ru.ru_oublock++;
980		reassignbuf(bp, bp->b_vp);
981		/* Wake anyone trying to busy the buffer via vnode's lists. */
982		cv_broadcast(&bp->b_busy);
983		mutex_exit(&bufcache_lock);
984	} else {
985		mutex_enter(bp->b_objlock);
986	}
987	/* Otherwise, the "write" is done, so mark and release the buffer. */
988	CLR(bp->b_oflags, BO_DONE);
989	mutex_exit(bp->b_objlock);
990
991	brelse(bp, 0);
992}
993
994/*
995 * Asynchronous block write; just an asynchronous bwrite().
996 */
997void
998bawrite(buf_t *bp)
999{
1000
1001	KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1002	KASSERT(bp->b_vp != NULL);
1003
1004	SET(bp->b_flags, B_ASYNC);
1005	VOP_BWRITE(bp->b_vp, bp);
1006}
1007
1008/*
1009 * Release a buffer on to the free lists.
1010 * Described in Bach (p. 46).
1011 */
1012void
1013brelsel(buf_t *bp, int set)
1014{
1015	struct bqueue *bufq;
1016	struct vnode *vp;
1017
1018	SDT_PROBE2(io, kernel, , brelse,  bp, set);
1019
1020	KASSERT(bp != NULL);
1021	KASSERT(mutex_owned(&bufcache_lock));
1022	KASSERT(!cv_has_waiters(&bp->b_done));
1023
1024	SET(bp->b_cflags, set);
1025
1026	KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1027	KASSERT(bp->b_iodone == NULL);
1028
1029	/* Wake up any processes waiting for any buffer to become free. */
1030	cv_signal(&needbuffer_cv);
1031
1032	/* Wake up any proceeses waiting for _this_ buffer to become free */
1033	if (ISSET(bp->b_cflags, BC_WANTED))
1034		CLR(bp->b_cflags, BC_WANTED|BC_AGE);
1035
1036	/* If it's clean clear the copy-on-write flag. */
1037	if (ISSET(bp->b_flags, B_COWDONE)) {
1038		mutex_enter(bp->b_objlock);
1039		if (!ISSET(bp->b_oflags, BO_DELWRI))
1040			CLR(bp->b_flags, B_COWDONE);
1041		mutex_exit(bp->b_objlock);
1042	}
1043
1044	/*
1045	 * Determine which queue the buffer should be on, then put it there.
1046	 */
1047
1048	/* If it's locked, don't report an error; try again later. */
1049	if (ISSET(bp->b_flags, B_LOCKED))
1050		bp->b_error = 0;
1051
1052	/* If it's not cacheable, or an error, mark it invalid. */
1053	if (ISSET(bp->b_cflags, BC_NOCACHE) || bp->b_error != 0)
1054		SET(bp->b_cflags, BC_INVAL);
1055
1056	if (ISSET(bp->b_cflags, BC_VFLUSH)) {
1057		/*
1058		 * This is a delayed write buffer that was just flushed to
1059		 * disk.  It is still on the LRU queue.  If it's become
1060		 * invalid, then we need to move it to a different queue;
1061		 * otherwise leave it in its current position.
1062		 */
1063		CLR(bp->b_cflags, BC_VFLUSH);
1064		if (!ISSET(bp->b_cflags, BC_INVAL|BC_AGE) &&
1065		    !ISSET(bp->b_flags, B_LOCKED) && bp->b_error == 0) {
1066			KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 1));
1067			goto already_queued;
1068		} else {
1069			bremfree(bp);
1070		}
1071	}
1072
1073	KDASSERT(checkfreelist(bp, &bufqueues[BQ_AGE], 0));
1074	KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 0));
1075	KDASSERT(checkfreelist(bp, &bufqueues[BQ_LOCKED], 0));
1076
1077	if ((bp->b_bufsize <= 0) || ISSET(bp->b_cflags, BC_INVAL)) {
1078		/*
1079		 * If it's invalid or empty, dissociate it from its vnode
1080		 * and put on the head of the appropriate queue.
1081		 */
1082		if (ISSET(bp->b_flags, B_LOCKED)) {
1083			if (wapbl_vphaswapbl(vp = bp->b_vp)) {
1084				struct mount *mp = wapbl_vptomp(vp);
1085
1086				KASSERT(bp->b_iodone
1087				    != mp->mnt_wapbl_op->wo_wapbl_biodone);
1088				WAPBL_REMOVE_BUF(mp, bp);
1089			}
1090		}
1091
1092		mutex_enter(bp->b_objlock);
1093		CLR(bp->b_oflags, BO_DONE|BO_DELWRI);
1094		if ((vp = bp->b_vp) != NULL) {
1095			KASSERT(bp->b_objlock == vp->v_interlock);
1096			reassignbuf(bp, bp->b_vp);
1097			brelvp(bp);
1098			mutex_exit(vp->v_interlock);
1099		} else {
1100			KASSERT(bp->b_objlock == &buffer_lock);
1101			mutex_exit(bp->b_objlock);
1102		}
1103		/* We want to dispose of the buffer, so wake everybody. */
1104		cv_broadcast(&bp->b_busy);
1105		if (bp->b_bufsize <= 0)
1106			/* no data */
1107			goto already_queued;
1108		else
1109			/* invalid data */
1110			bufq = &bufqueues[BQ_AGE];
1111		binsheadfree(bp, bufq);
1112	} else  {
1113		/*
1114		 * It has valid data.  Put it on the end of the appropriate
1115		 * queue, so that it'll stick around for as long as possible.
1116		 * If buf is AGE, but has dependencies, must put it on last
1117		 * bufqueue to be scanned, ie LRU. This protects against the
1118		 * livelock where BQ_AGE only has buffers with dependencies,
1119		 * and we thus never get to the dependent buffers in BQ_LRU.
1120		 */
1121		if (ISSET(bp->b_flags, B_LOCKED)) {
1122			/* locked in core */
1123			bufq = &bufqueues[BQ_LOCKED];
1124		} else if (!ISSET(bp->b_cflags, BC_AGE)) {
1125			/* valid data */
1126			bufq = &bufqueues[BQ_LRU];
1127		} else {
1128			/* stale but valid data */
1129			bufq = &bufqueues[BQ_AGE];
1130		}
1131		binstailfree(bp, bufq);
1132	}
1133already_queued:
1134	/* Unlock the buffer. */
1135	CLR(bp->b_cflags, BC_AGE|BC_BUSY|BC_NOCACHE);
1136	CLR(bp->b_flags, B_ASYNC);
1137
1138	/*
1139	 * Wake only the highest priority waiter on the lock, in order to
1140	 * prevent a thundering herd: many LWPs simultaneously awakening and
1141	 * competing for the buffer's lock.  Testing in 2019 revealed this
1142	 * to reduce contention on bufcache_lock tenfold during a kernel
1143	 * compile.  Here and elsewhere, when the buffer is changing
1144	 * identity, being disposed of, or moving from one list to another,
1145	 * we wake all lock requestors.
1146	 */
1147	if (bp->b_bufsize <= 0) {
1148		cv_broadcast(&bp->b_busy);
1149		buf_destroy(bp);
1150#ifdef DEBUG
1151		memset((char *)bp, 0, sizeof(*bp));
1152#endif
1153		pool_cache_put(buf_cache, bp);
1154	} else
1155		cv_signal(&bp->b_busy);
1156}
1157
1158void
1159brelse(buf_t *bp, int set)
1160{
1161
1162	mutex_enter(&bufcache_lock);
1163	brelsel(bp, set);
1164	mutex_exit(&bufcache_lock);
1165}
1166
1167/*
1168 * Determine if a block is in the cache.
1169 * Just look on what would be its hash chain.  If it's there, return
1170 * a pointer to it, unless it's marked invalid.  If it's marked invalid,
1171 * we normally don't return the buffer, unless the caller explicitly
1172 * wants us to.
1173 */
1174buf_t *
1175incore(struct vnode *vp, daddr_t blkno)
1176{
1177	buf_t *bp;
1178
1179	KASSERT(mutex_owned(&bufcache_lock));
1180
1181	/* Search hash chain */
1182	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1183		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1184		    !ISSET(bp->b_cflags, BC_INVAL)) {
1185		    	KASSERT(bp->b_objlock == vp->v_interlock);
1186		    	return (bp);
1187		}
1188	}
1189
1190	return (NULL);
1191}
1192
1193/*
1194 * Get a block of requested size that is associated with
1195 * a given vnode and block offset. If it is found in the
1196 * block cache, mark it as having been found, make it busy
1197 * and return it. Otherwise, return an empty block of the
1198 * correct size. It is up to the caller to insure that the
1199 * cached blocks be of the correct size.
1200 */
1201buf_t *
1202getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1203{
1204	int err, preserve;
1205	buf_t *bp;
1206
1207	mutex_enter(&bufcache_lock);
1208	SDT_PROBE3(io, kernel, , getblk__start,  vp, blkno, size);
1209 loop:
1210	bp = incore(vp, blkno);
1211	if (bp != NULL) {
1212		err = bbusy(bp, ((slpflag & PCATCH) != 0), slptimeo, NULL);
1213		if (err != 0) {
1214			if (err == EPASSTHROUGH)
1215				goto loop;
1216			mutex_exit(&bufcache_lock);
1217			SDT_PROBE4(io, kernel, , getblk__done,
1218			    vp, blkno, size, NULL);
1219			return (NULL);
1220		}
1221		KASSERT(!cv_has_waiters(&bp->b_done));
1222#ifdef DIAGNOSTIC
1223		if (ISSET(bp->b_oflags, BO_DONE|BO_DELWRI) &&
1224		    bp->b_bcount < size && vp->v_type != VBLK)
1225			panic("getblk: block size invariant failed");
1226#endif
1227		bremfree(bp);
1228		preserve = 1;
1229	} else {
1230		if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL)
1231			goto loop;
1232
1233		if (incore(vp, blkno) != NULL) {
1234			/* The block has come into memory in the meantime. */
1235			brelsel(bp, 0);
1236			goto loop;
1237		}
1238
1239		LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash);
1240		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1241		mutex_enter(vp->v_interlock);
1242		bgetvp(vp, bp);
1243		mutex_exit(vp->v_interlock);
1244		preserve = 0;
1245	}
1246	mutex_exit(&bufcache_lock);
1247
1248	/*
1249	 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1250	 * if we re-size buffers here.
1251	 */
1252	if (ISSET(bp->b_flags, B_LOCKED)) {
1253		KASSERT(bp->b_bufsize >= size);
1254	} else {
1255		if (allocbuf(bp, size, preserve)) {
1256			mutex_enter(&bufcache_lock);
1257			LIST_REMOVE(bp, b_hash);
1258			brelsel(bp, BC_INVAL);
1259			mutex_exit(&bufcache_lock);
1260			SDT_PROBE4(io, kernel, , getblk__done,
1261			    vp, blkno, size, NULL);
1262			return NULL;
1263		}
1264	}
1265	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1266	SDT_PROBE4(io, kernel, , getblk__done,  vp, blkno, size, bp);
1267	return (bp);
1268}
1269
1270/*
1271 * Get an empty, disassociated buffer of given size.
1272 */
1273buf_t *
1274geteblk(int size)
1275{
1276	buf_t *bp;
1277	int error __diagused;
1278
1279	mutex_enter(&bufcache_lock);
1280	while ((bp = getnewbuf(0, 0, 0)) == NULL)
1281		;
1282
1283	SET(bp->b_cflags, BC_INVAL);
1284	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1285	mutex_exit(&bufcache_lock);
1286	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1287	error = allocbuf(bp, size, 0);
1288	KASSERT(error == 0);
1289	return (bp);
1290}
1291
1292/*
1293 * Expand or contract the actual memory allocated to a buffer.
1294 *
1295 * If the buffer shrinks, data is lost, so it's up to the
1296 * caller to have written it out *first*; this routine will not
1297 * start a write.  If the buffer grows, it's the callers
1298 * responsibility to fill out the buffer's additional contents.
1299 */
1300int
1301allocbuf(buf_t *bp, int size, int preserve)
1302{
1303	void *addr;
1304	vsize_t oldsize, desired_size;
1305	int oldcount;
1306	int delta;
1307
1308	desired_size = buf_roundsize(size);
1309	if (desired_size > MAXBSIZE)
1310		printf("allocbuf: buffer larger than MAXBSIZE requested");
1311
1312	oldcount = bp->b_bcount;
1313
1314	bp->b_bcount = size;
1315
1316	oldsize = bp->b_bufsize;
1317	if (oldsize == desired_size) {
1318		/*
1319		 * Do not short cut the WAPBL resize, as the buffer length
1320		 * could still have changed and this would corrupt the
1321		 * tracking of the transaction length.
1322		 */
1323		goto out;
1324	}
1325
1326	/*
1327	 * If we want a buffer of a different size, re-allocate the
1328	 * buffer's memory; copy old content only if needed.
1329	 */
1330	addr = buf_alloc(desired_size);
1331	if (addr == NULL)
1332		return ENOMEM;
1333	if (preserve)
1334		memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1335	if (bp->b_data != NULL)
1336		buf_mrelease(bp->b_data, oldsize);
1337	bp->b_data = addr;
1338	bp->b_bufsize = desired_size;
1339
1340	/*
1341	 * Update overall buffer memory counter (protected by bufcache_lock)
1342	 */
1343	delta = (long)desired_size - (long)oldsize;
1344
1345	mutex_enter(&bufcache_lock);
1346	if ((bufmem += delta) > bufmem_hiwater) {
1347		/*
1348		 * Need to trim overall memory usage.
1349		 */
1350		while (buf_canrelease()) {
1351			if (preempt_needed()) {
1352				mutex_exit(&bufcache_lock);
1353				preempt();
1354				mutex_enter(&bufcache_lock);
1355			}
1356			if (buf_trim() == 0)
1357				break;
1358		}
1359	}
1360	mutex_exit(&bufcache_lock);
1361
1362 out:
1363	if (wapbl_vphaswapbl(bp->b_vp))
1364		WAPBL_RESIZE_BUF(wapbl_vptomp(bp->b_vp), bp, oldsize, oldcount);
1365
1366	return 0;
1367}
1368
1369/*
1370 * Find a buffer which is available for use.
1371 * Select something from a free list.
1372 * Preference is to AGE list, then LRU list.
1373 *
1374 * Called with the buffer queues locked.
1375 * Return buffer locked.
1376 */
1377static buf_t *
1378getnewbuf(int slpflag, int slptimeo, int from_bufq)
1379{
1380	buf_t *bp;
1381	struct vnode *vp;
1382	struct mount *transmp = NULL;
1383
1384	SDT_PROBE0(io, kernel, , getnewbuf__start);
1385
1386 start:
1387	KASSERT(mutex_owned(&bufcache_lock));
1388
1389	/*
1390	 * Get a new buffer from the pool.
1391	 */
1392	if (!from_bufq && buf_lotsfree()) {
1393		mutex_exit(&bufcache_lock);
1394		bp = pool_cache_get(buf_cache, PR_NOWAIT);
1395		if (bp != NULL) {
1396			memset((char *)bp, 0, sizeof(*bp));
1397			buf_init(bp);
1398			SET(bp->b_cflags, BC_BUSY);	/* mark buffer busy */
1399			mutex_enter(&bufcache_lock);
1400#if defined(DIAGNOSTIC)
1401			bp->b_freelistindex = -1;
1402#endif /* defined(DIAGNOSTIC) */
1403			SDT_PROBE1(io, kernel, , getnewbuf__done,  bp);
1404			return (bp);
1405		}
1406		mutex_enter(&bufcache_lock);
1407	}
1408
1409	KASSERT(mutex_owned(&bufcache_lock));
1410	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL) {
1411		KASSERT(!ISSET(bp->b_oflags, BO_DELWRI));
1412	} else {
1413		TAILQ_FOREACH(bp, &bufqueues[BQ_LRU].bq_queue, b_freelist) {
1414			if (ISSET(bp->b_cflags, BC_VFLUSH) ||
1415			    !ISSET(bp->b_oflags, BO_DELWRI))
1416				break;
1417			if (fstrans_start_nowait(bp->b_vp->v_mount) == 0) {
1418				KASSERT(transmp == NULL);
1419				transmp = bp->b_vp->v_mount;
1420				break;
1421			}
1422		}
1423	}
1424	if (bp != NULL) {
1425	    	KASSERT(!ISSET(bp->b_cflags, BC_BUSY) || ISSET(bp->b_cflags, BC_VFLUSH));
1426		bremfree(bp);
1427
1428		/* Buffer is no longer on free lists. */
1429		SET(bp->b_cflags, BC_BUSY);
1430
1431		/* Wake anyone trying to lock the old identity. */
1432		cv_broadcast(&bp->b_busy);
1433	} else {
1434		/*
1435		 * XXX: !from_bufq should be removed.
1436		 */
1437		if (!from_bufq || curlwp != uvm.pagedaemon_lwp) {
1438			/* wait for a free buffer of any kind */
1439			if ((slpflag & PCATCH) != 0)
1440				(void)cv_timedwait_sig(&needbuffer_cv,
1441				    &bufcache_lock, slptimeo);
1442			else
1443				(void)cv_timedwait(&needbuffer_cv,
1444				    &bufcache_lock, slptimeo);
1445		}
1446		SDT_PROBE1(io, kernel, , getnewbuf__done,  NULL);
1447		return (NULL);
1448	}
1449
1450#ifdef DIAGNOSTIC
1451	if (bp->b_bufsize <= 0)
1452		panic("buffer %p: on queue but empty", bp);
1453#endif
1454
1455	if (ISSET(bp->b_cflags, BC_VFLUSH)) {
1456		/*
1457		 * This is a delayed write buffer being flushed to disk.  Make
1458		 * sure it gets aged out of the queue when it's finished, and
1459		 * leave it off the LRU queue.
1460		 */
1461		CLR(bp->b_cflags, BC_VFLUSH);
1462		SET(bp->b_cflags, BC_AGE);
1463		goto start;
1464	}
1465
1466	KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1467    	KASSERT(!cv_has_waiters(&bp->b_done));
1468
1469	/*
1470	 * If buffer was a delayed write, start it and return NULL
1471	 * (since we might sleep while starting the write).
1472	 */
1473	if (ISSET(bp->b_oflags, BO_DELWRI)) {
1474		/*
1475		 * This buffer has gone through the LRU, so make sure it gets
1476		 * reused ASAP.
1477		 */
1478		SET(bp->b_cflags, BC_AGE);
1479		mutex_exit(&bufcache_lock);
1480		bawrite(bp);
1481		KASSERT(transmp != NULL);
1482		fstrans_done(transmp);
1483		mutex_enter(&bufcache_lock);
1484		SDT_PROBE1(io, kernel, , getnewbuf__done,  NULL);
1485		return (NULL);
1486	}
1487
1488	KASSERT(transmp == NULL);
1489
1490	vp = bp->b_vp;
1491
1492	/* clear out various other fields */
1493	bp->b_cflags = BC_BUSY;
1494	bp->b_oflags = 0;
1495	bp->b_flags = 0;
1496	bp->b_dev = NODEV;
1497	bp->b_blkno = 0;
1498	bp->b_lblkno = 0;
1499	bp->b_rawblkno = 0;
1500	bp->b_iodone = 0;
1501	bp->b_error = 0;
1502	bp->b_resid = 0;
1503	bp->b_bcount = 0;
1504
1505	LIST_REMOVE(bp, b_hash);
1506
1507	/* Disassociate us from our vnode, if we had one... */
1508	if (vp != NULL) {
1509		mutex_enter(vp->v_interlock);
1510		brelvp(bp);
1511		mutex_exit(vp->v_interlock);
1512	}
1513
1514	SDT_PROBE1(io, kernel, , getnewbuf__done,  bp);
1515	return (bp);
1516}
1517
1518/*
1519 * Attempt to free an aged buffer off the queues.
1520 * Called with queue lock held.
1521 * Returns the amount of buffer memory freed.
1522 */
1523static int
1524buf_trim(void)
1525{
1526	buf_t *bp;
1527	long size;
1528
1529	KASSERT(mutex_owned(&bufcache_lock));
1530
1531	/* Instruct getnewbuf() to get buffers off the queues */
1532	if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1533		return 0;
1534
1535	KASSERT((bp->b_cflags & BC_WANTED) == 0);
1536	size = bp->b_bufsize;
1537	bufmem -= size;
1538	if (size > 0) {
1539		buf_mrelease(bp->b_data, size);
1540		bp->b_bcount = bp->b_bufsize = 0;
1541	}
1542	/* brelse() will return the buffer to the global buffer pool */
1543	brelsel(bp, 0);
1544	return size;
1545}
1546
1547int
1548buf_drain(int n)
1549{
1550	int size = 0, sz;
1551
1552	KASSERT(mutex_owned(&bufcache_lock));
1553
1554	while (size < n && bufmem > bufmem_lowater) {
1555		sz = buf_trim();
1556		if (sz <= 0)
1557			break;
1558		size += sz;
1559	}
1560
1561	return size;
1562}
1563
1564/*
1565 * Wait for operations on the buffer to complete.
1566 * When they do, extract and return the I/O's error value.
1567 */
1568int
1569biowait(buf_t *bp)
1570{
1571
1572	BIOHIST_FUNC(__func__);
1573
1574	KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1575
1576	SDT_PROBE1(io, kernel, , wait__start, bp);
1577
1578	mutex_enter(bp->b_objlock);
1579
1580	BIOHIST_CALLARGS(biohist, "bp=%#jx, oflags=0x%jx, ret_addr=%#jx",
1581	    (uintptr_t)bp, bp->b_oflags,
1582	    (uintptr_t)__builtin_return_address(0), 0);
1583
1584	while (!ISSET(bp->b_oflags, BO_DONE | BO_DELWRI)) {
1585		BIOHIST_LOG(biohist, "waiting bp=%#jx", (uintptr_t)bp, 0, 0, 0);
1586		cv_wait(&bp->b_done, bp->b_objlock);
1587	}
1588	mutex_exit(bp->b_objlock);
1589
1590	SDT_PROBE1(io, kernel, , wait__done, bp);
1591
1592	BIOHIST_LOG(biohist, "return %jd", bp->b_error, 0, 0, 0);
1593
1594	return bp->b_error;
1595}
1596
1597/*
1598 * Mark I/O complete on a buffer.
1599 *
1600 * If a callback has been requested, e.g. the pageout
1601 * daemon, do so. Otherwise, awaken waiting processes.
1602 *
1603 * [ Leffler, et al., says on p.247:
1604 *	"This routine wakes up the blocked process, frees the buffer
1605 *	for an asynchronous write, or, for a request by the pagedaemon
1606 *	process, invokes a procedure specified in the buffer structure" ]
1607 *
1608 * In real life, the pagedaemon (or other system processes) wants
1609 * to do async stuff too, and doesn't want the buffer brelse()'d.
1610 * (for swap pager, that puts swap buffers on the free lists (!!!),
1611 * for the vn device, that puts allocated buffers on the free lists!)
1612 */
1613void
1614biodone(buf_t *bp)
1615{
1616	int s;
1617
1618	BIOHIST_FUNC(__func__);
1619
1620	KASSERT(!ISSET(bp->b_oflags, BO_DONE));
1621
1622	if (cpu_intr_p()) {
1623		/* From interrupt mode: defer to a soft interrupt. */
1624		s = splvm();
1625		TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_biodone, bp, b_actq);
1626
1627		BIOHIST_CALLARGS(biohist, "bp=%#jx, softint scheduled",
1628		    (uintptr_t)bp, 0, 0, 0);
1629		softint_schedule(biodone_sih);
1630		splx(s);
1631	} else {
1632		/* Process now - the buffer may be freed soon. */
1633		biodone2(bp);
1634	}
1635}
1636
1637SDT_PROBE_DEFINE1(io, kernel, , done, "struct buf *"/*bp*/);
1638
1639static void
1640biodone2(buf_t *bp)
1641{
1642	void (*callout)(buf_t *);
1643
1644	SDT_PROBE1(io, kernel, ,done, bp);
1645
1646	BIOHIST_FUNC(__func__);
1647	BIOHIST_CALLARGS(biohist, "bp=%#jx", (uintptr_t)bp, 0, 0, 0);
1648
1649	mutex_enter(bp->b_objlock);
1650	/* Note that the transfer is done. */
1651	if (ISSET(bp->b_oflags, BO_DONE))
1652		panic("biodone2 already");
1653	CLR(bp->b_flags, B_COWDONE);
1654	SET(bp->b_oflags, BO_DONE);
1655	BIO_SETPRIO(bp, BPRIO_DEFAULT);
1656
1657	/* Wake up waiting writers. */
1658	if (!ISSET(bp->b_flags, B_READ))
1659		vwakeup(bp);
1660
1661	if ((callout = bp->b_iodone) != NULL) {
1662		BIOHIST_LOG(biohist, "callout %#jx", (uintptr_t)callout,
1663		    0, 0, 0);
1664
1665		/* Note callout done, then call out. */
1666		KASSERT(!cv_has_waiters(&bp->b_done));
1667		bp->b_iodone = NULL;
1668		mutex_exit(bp->b_objlock);
1669		(*callout)(bp);
1670	} else if (ISSET(bp->b_flags, B_ASYNC)) {
1671		/* If async, release. */
1672		BIOHIST_LOG(biohist, "async", 0, 0, 0, 0);
1673		KASSERT(!cv_has_waiters(&bp->b_done));
1674		mutex_exit(bp->b_objlock);
1675		brelse(bp, 0);
1676	} else {
1677		/* Otherwise just wake up waiters in biowait(). */
1678		BIOHIST_LOG(biohist, "wake-up", 0, 0, 0, 0);
1679		cv_broadcast(&bp->b_done);
1680		mutex_exit(bp->b_objlock);
1681	}
1682}
1683
1684static void
1685biointr(void *cookie)
1686{
1687	struct cpu_info *ci;
1688	buf_t *bp;
1689	int s;
1690
1691	BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist);
1692
1693	ci = curcpu();
1694
1695	s = splvm();
1696	while (!TAILQ_EMPTY(&ci->ci_data.cpu_biodone)) {
1697		KASSERT(curcpu() == ci);
1698
1699		bp = TAILQ_FIRST(&ci->ci_data.cpu_biodone);
1700		TAILQ_REMOVE(&ci->ci_data.cpu_biodone, bp, b_actq);
1701		splx(s);
1702
1703		BIOHIST_LOG(biohist, "bp=%#jx", (uintptr_t)bp, 0, 0, 0);
1704		biodone2(bp);
1705
1706		s = splvm();
1707	}
1708	splx(s);
1709}
1710
1711static void
1712sysctl_fillbuf(const buf_t *i, struct buf_sysctl *o)
1713{
1714	const bool allowaddr = get_expose_address(curproc);
1715
1716	memset(o, 0, sizeof(*o));
1717
1718	o->b_flags = i->b_flags | i->b_cflags | i->b_oflags;
1719	o->b_error = i->b_error;
1720	o->b_prio = i->b_prio;
1721	o->b_dev = i->b_dev;
1722	o->b_bufsize = i->b_bufsize;
1723	o->b_bcount = i->b_bcount;
1724	o->b_resid = i->b_resid;
1725	COND_SET_VALUE(o->b_addr, PTRTOUINT64(i->b_data), allowaddr);
1726	o->b_blkno = i->b_blkno;
1727	o->b_rawblkno = i->b_rawblkno;
1728	COND_SET_VALUE(o->b_iodone, PTRTOUINT64(i->b_iodone), allowaddr);
1729	COND_SET_VALUE(o->b_proc, PTRTOUINT64(i->b_proc), allowaddr);
1730	COND_SET_VALUE(o->b_vp, PTRTOUINT64(i->b_vp), allowaddr);
1731	COND_SET_VALUE(o->b_saveaddr, PTRTOUINT64(i->b_saveaddr), allowaddr);
1732	o->b_lblkno = i->b_lblkno;
1733}
1734
1735#define KERN_BUFSLOP 20
1736static int
1737sysctl_dobuf(SYSCTLFN_ARGS)
1738{
1739	buf_t *bp;
1740	struct buf_sysctl bs;
1741	struct bqueue *bq;
1742	char *dp;
1743	u_int i, op, arg;
1744	size_t len, needed, elem_size, out_size;
1745	int error, elem_count, retries;
1746
1747	if (namelen == 1 && name[0] == CTL_QUERY)
1748		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1749
1750	if (namelen != 4)
1751		return (EINVAL);
1752
1753	retries = 100;
1754 retry:
1755	dp = oldp;
1756	len = (oldp != NULL) ? *oldlenp : 0;
1757	op = name[0];
1758	arg = name[1];
1759	elem_size = name[2];
1760	elem_count = name[3];
1761	out_size = MIN(sizeof(bs), elem_size);
1762
1763	/*
1764	 * at the moment, these are just "placeholders" to make the
1765	 * API for retrieving kern.buf data more extensible in the
1766	 * future.
1767	 *
1768	 * XXX kern.buf currently has "netbsd32" issues.  hopefully
1769	 * these will be resolved at a later point.
1770	 */
1771	if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1772	    elem_size < 1 || elem_count < 0)
1773		return (EINVAL);
1774
1775	error = 0;
1776	needed = 0;
1777	sysctl_unlock();
1778	mutex_enter(&bufcache_lock);
1779	for (i = 0; i < BQUEUES; i++) {
1780		bq = &bufqueues[i];
1781		TAILQ_FOREACH(bp, &bq->bq_queue, b_freelist) {
1782			bq->bq_marker = bp;
1783			if (len >= elem_size && elem_count > 0) {
1784				sysctl_fillbuf(bp, &bs);
1785				mutex_exit(&bufcache_lock);
1786				error = copyout(&bs, dp, out_size);
1787				mutex_enter(&bufcache_lock);
1788				if (error)
1789					break;
1790				if (bq->bq_marker != bp) {
1791					/*
1792					 * This sysctl node is only for
1793					 * statistics.  Retry; if the
1794					 * queue keeps changing, then
1795					 * bail out.
1796					 */
1797					if (retries-- == 0) {
1798						error = EAGAIN;
1799						break;
1800					}
1801					mutex_exit(&bufcache_lock);
1802					sysctl_relock();
1803					goto retry;
1804				}
1805				dp += elem_size;
1806				len -= elem_size;
1807			}
1808			needed += elem_size;
1809			if (elem_count > 0 && elem_count != INT_MAX)
1810				elem_count--;
1811		}
1812		if (error != 0)
1813			break;
1814	}
1815	mutex_exit(&bufcache_lock);
1816	sysctl_relock();
1817
1818	*oldlenp = needed;
1819	if (oldp == NULL)
1820		*oldlenp += KERN_BUFSLOP * sizeof(buf_t);
1821
1822	return (error);
1823}
1824
1825static int
1826sysctl_bufvm_update(SYSCTLFN_ARGS)
1827{
1828	int error, rv;
1829	struct sysctlnode node;
1830	unsigned int temp_bufcache;
1831	unsigned long temp_water;
1832
1833	/* Take a copy of the supplied node and its data */
1834	node = *rnode;
1835	if (node.sysctl_data == &bufcache) {
1836	    node.sysctl_data = &temp_bufcache;
1837	    temp_bufcache = *(unsigned int *)rnode->sysctl_data;
1838	} else {
1839	    node.sysctl_data = &temp_water;
1840	    temp_water = *(unsigned long *)rnode->sysctl_data;
1841	}
1842
1843	/* Update the copy */
1844	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1845	if (error || newp == NULL)
1846		return (error);
1847
1848	if (rnode->sysctl_data == &bufcache) {
1849		if (temp_bufcache > 100)
1850			return (EINVAL);
1851		bufcache = temp_bufcache;
1852		buf_setwm();
1853	} else if (rnode->sysctl_data == &bufmem_lowater) {
1854		if (bufmem_hiwater - temp_water < 16)
1855			return (EINVAL);
1856		bufmem_lowater = temp_water;
1857	} else if (rnode->sysctl_data == &bufmem_hiwater) {
1858		if (temp_water - bufmem_lowater < 16)
1859			return (EINVAL);
1860		bufmem_hiwater = temp_water;
1861	} else
1862		return (EINVAL);
1863
1864	/* Drain until below new high water mark */
1865	sysctl_unlock();
1866	mutex_enter(&bufcache_lock);
1867	while (bufmem > bufmem_hiwater) {
1868		rv = buf_drain((bufmem - bufmem_hiwater) / (2 * 1024));
1869		if (rv <= 0)
1870			break;
1871	}
1872	mutex_exit(&bufcache_lock);
1873	sysctl_relock();
1874
1875	return 0;
1876}
1877
1878static struct sysctllog *vfsbio_sysctllog;
1879
1880static void
1881sysctl_kern_buf_setup(void)
1882{
1883
1884	sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1885		       CTLFLAG_PERMANENT,
1886		       CTLTYPE_NODE, "buf",
1887		       SYSCTL_DESCR("Kernel buffer cache information"),
1888		       sysctl_dobuf, 0, NULL, 0,
1889		       CTL_KERN, KERN_BUF, CTL_EOL);
1890}
1891
1892static void
1893sysctl_vm_buf_setup(void)
1894{
1895
1896	sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1897		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1898		       CTLTYPE_INT, "bufcache",
1899		       SYSCTL_DESCR("Percentage of physical memory to use for "
1900				    "buffer cache"),
1901		       sysctl_bufvm_update, 0, &bufcache, 0,
1902		       CTL_VM, CTL_CREATE, CTL_EOL);
1903	sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1904		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1905		       CTLTYPE_LONG, "bufmem",
1906		       SYSCTL_DESCR("Amount of kernel memory used by buffer "
1907				    "cache"),
1908		       NULL, 0, &bufmem, 0,
1909		       CTL_VM, CTL_CREATE, CTL_EOL);
1910	sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1911		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1912		       CTLTYPE_LONG, "bufmem_lowater",
1913		       SYSCTL_DESCR("Minimum amount of kernel memory to "
1914				    "reserve for buffer cache"),
1915		       sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1916		       CTL_VM, CTL_CREATE, CTL_EOL);
1917	sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1918		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1919		       CTLTYPE_LONG, "bufmem_hiwater",
1920		       SYSCTL_DESCR("Maximum amount of kernel memory to use "
1921				    "for buffer cache"),
1922		       sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1923		       CTL_VM, CTL_CREATE, CTL_EOL);
1924}
1925
1926#ifdef DEBUG
1927/*
1928 * Print out statistics on the current allocation of the buffer pool.
1929 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1930 * in vfs_syscalls.c using sysctl.
1931 */
1932void
1933vfs_bufstats(void)
1934{
1935	int i, j, count;
1936	buf_t *bp;
1937	struct bqueue *dp;
1938	int counts[MAXBSIZE / MIN_PAGE_SIZE + 1];
1939	static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1940
1941	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1942		count = 0;
1943		memset(counts, 0, sizeof(counts));
1944		TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1945			counts[bp->b_bufsize / PAGE_SIZE]++;
1946			count++;
1947		}
1948		printf("%s: total-%d", bname[i], count);
1949		for (j = 0; j <= MAXBSIZE / PAGE_SIZE; j++)
1950			if (counts[j] != 0)
1951				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1952		printf("\n");
1953	}
1954}
1955#endif /* DEBUG */
1956
1957/* ------------------------------ */
1958
1959buf_t *
1960getiobuf(struct vnode *vp, bool waitok)
1961{
1962	buf_t *bp;
1963
1964	bp = pool_cache_get(bufio_cache, (waitok ? PR_WAITOK : PR_NOWAIT));
1965	if (bp == NULL)
1966		return bp;
1967
1968	buf_init(bp);
1969
1970	if ((bp->b_vp = vp) != NULL) {
1971		bp->b_objlock = vp->v_interlock;
1972	} else {
1973		KASSERT(bp->b_objlock == &buffer_lock);
1974	}
1975
1976	return bp;
1977}
1978
1979void
1980putiobuf(buf_t *bp)
1981{
1982
1983	buf_destroy(bp);
1984	pool_cache_put(bufio_cache, bp);
1985}
1986
1987/*
1988 * nestiobuf_iodone: b_iodone callback for nested buffers.
1989 */
1990
1991void
1992nestiobuf_iodone(buf_t *bp)
1993{
1994	buf_t *mbp = bp->b_private;
1995	int error;
1996	int donebytes;
1997
1998	KASSERT(bp->b_bcount <= bp->b_bufsize);
1999	KASSERT(mbp != bp);
2000
2001	error = bp->b_error;
2002	if (bp->b_error == 0 &&
2003	    (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) {
2004		/*
2005		 * Not all got transferred, raise an error. We have no way to
2006		 * propagate these conditions to mbp.
2007		 */
2008		error = EIO;
2009	}
2010
2011	donebytes = bp->b_bufsize;
2012
2013	putiobuf(bp);
2014	nestiobuf_done(mbp, donebytes, error);
2015}
2016
2017/*
2018 * nestiobuf_setup: setup a "nested" buffer.
2019 *
2020 * => 'mbp' is a "master" buffer which is being divided into sub pieces.
2021 * => 'bp' should be a buffer allocated by getiobuf.
2022 * => 'offset' is a byte offset in the master buffer.
2023 * => 'size' is a size in bytes of this nested buffer.
2024 */
2025
2026void
2027nestiobuf_setup(buf_t *mbp, buf_t *bp, int offset, size_t size)
2028{
2029	const int b_pass = mbp->b_flags & (B_READ|B_PHYS|B_RAW|B_MEDIA_FLAGS);
2030	struct vnode *vp = mbp->b_vp;
2031
2032	KASSERT(mbp->b_bcount >= offset + size);
2033	bp->b_vp = vp;
2034	bp->b_dev = mbp->b_dev;
2035	bp->b_objlock = mbp->b_objlock;
2036	bp->b_cflags = BC_BUSY;
2037	bp->b_flags = B_ASYNC | b_pass;
2038	bp->b_iodone = nestiobuf_iodone;
2039	bp->b_data = (char *)mbp->b_data + offset;
2040	bp->b_resid = bp->b_bcount = size;
2041	bp->b_bufsize = bp->b_bcount;
2042	bp->b_private = mbp;
2043	BIO_COPYPRIO(bp, mbp);
2044	if (BUF_ISWRITE(bp) && vp != NULL) {
2045		mutex_enter(vp->v_interlock);
2046		vp->v_numoutput++;
2047		mutex_exit(vp->v_interlock);
2048	}
2049}
2050
2051/*
2052 * nestiobuf_done: propagate completion to the master buffer.
2053 *
2054 * => 'donebytes' specifies how many bytes in the 'mbp' is completed.
2055 * => 'error' is an errno(2) that 'donebytes' has been completed with.
2056 */
2057
2058void
2059nestiobuf_done(buf_t *mbp, int donebytes, int error)
2060{
2061
2062	if (donebytes == 0) {
2063		return;
2064	}
2065	mutex_enter(mbp->b_objlock);
2066	KASSERT(mbp->b_resid >= donebytes);
2067	mbp->b_resid -= donebytes;
2068	if (error)
2069		mbp->b_error = error;
2070	if (mbp->b_resid == 0) {
2071		if (mbp->b_error)
2072			mbp->b_resid = mbp->b_bcount;
2073		mutex_exit(mbp->b_objlock);
2074		biodone(mbp);
2075	} else
2076		mutex_exit(mbp->b_objlock);
2077}
2078
2079void
2080buf_init(buf_t *bp)
2081{
2082
2083	cv_init(&bp->b_busy, "biolock");
2084	cv_init(&bp->b_done, "biowait");
2085	bp->b_dev = NODEV;
2086	bp->b_error = 0;
2087	bp->b_flags = 0;
2088	bp->b_cflags = 0;
2089	bp->b_oflags = 0;
2090	bp->b_objlock = &buffer_lock;
2091	bp->b_iodone = NULL;
2092	bp->b_dev = NODEV;
2093	bp->b_vnbufs.le_next = NOLIST;
2094	BIO_SETPRIO(bp, BPRIO_DEFAULT);
2095}
2096
2097void
2098buf_destroy(buf_t *bp)
2099{
2100
2101	cv_destroy(&bp->b_done);
2102	cv_destroy(&bp->b_busy);
2103}
2104
2105int
2106bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock)
2107{
2108	int error;
2109
2110	KASSERT(mutex_owned(&bufcache_lock));
2111
2112	SDT_PROBE4(io, kernel, , bbusy__start,  bp, intr, timo, interlock);
2113
2114	if ((bp->b_cflags & BC_BUSY) != 0) {
2115		if (curlwp == uvm.pagedaemon_lwp) {
2116			error = EDEADLK;
2117			goto out;
2118		}
2119		bp->b_cflags |= BC_WANTED;
2120		if (interlock != NULL)
2121			mutex_exit(interlock);
2122		if (intr) {
2123			error = cv_timedwait_sig(&bp->b_busy, &bufcache_lock,
2124			    timo);
2125		} else {
2126			error = cv_timedwait(&bp->b_busy, &bufcache_lock,
2127			    timo);
2128		}
2129		/*
2130		 * At this point the buffer may be gone: don't touch it
2131		 * again.  The caller needs to find it again and retry.
2132		 */
2133		if (interlock != NULL)
2134			mutex_enter(interlock);
2135		if (error == 0)
2136			error = EPASSTHROUGH;
2137	} else {
2138		bp->b_cflags |= BC_BUSY;
2139		error = 0;
2140	}
2141
2142out:	SDT_PROBE5(io, kernel, , bbusy__done,
2143	    bp, intr, timo, interlock, error);
2144	return error;
2145}
2146
2147/*
2148 * Nothing outside this file should really need to know about nbuf,
2149 * but a few things still want to read it, so give them a way to do that.
2150 */
2151u_int
2152buf_nbuf(void)
2153{
2154
2155	return nbuf;
2156}
2157