1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37#ifndef _SYS_BUF_H_
38#define	_SYS_BUF_H_
39
40#include <sys/bufobj.h>
41#include <sys/queue.h>
42#include <sys/lock.h>
43#include <sys/lockmgr.h>
44#include <vm/uma.h>
45
46struct bio;
47struct buf;
48struct bufobj;
49struct mount;
50struct vnode;
51struct uio;
52
53/*
54 * To avoid including <ufs/ffs/softdep.h>
55 */
56LIST_HEAD(workhead, worklist);
57/*
58 * These are currently used only by the soft dependency code, hence
59 * are stored once in a global variable. If other subsystems wanted
60 * to use these hooks, a pointer to a set of bio_ops could be added
61 * to each buffer.
62 */
63extern struct bio_ops {
64	void	(*io_start)(struct buf *);
65	void	(*io_complete)(struct buf *);
66	void	(*io_deallocate)(struct buf *);
67	int	(*io_countdeps)(struct buf *, int);
68} bioops;
69
70struct vm_object;
71struct vm_page;
72
73typedef uint32_t b_xflags_t;
74
75/*
76 * The buffer header describes an I/O operation in the kernel.
77 *
78 * NOTES:
79 *	b_bufsize, b_bcount.  b_bufsize is the allocation size of the
80 *	buffer, either DEV_BSIZE or PAGE_SIZE aligned.  b_bcount is the
81 *	originally requested buffer size and can serve as a bounds check
82 *	against EOF.  For most, but not all uses, b_bcount == b_bufsize.
83 *
84 *	b_dirtyoff, b_dirtyend.  Buffers support piecemeal, unaligned
85 *	ranges of dirty data that need to be written to backing store.
86 *	The range is typically clipped at b_bcount ( not b_bufsize ).
87 *
88 *	b_resid.  Number of bytes remaining in I/O.  After an I/O operation
89 *	completes, b_resid is usually 0 indicating 100% success.
90 *
91 *	All fields are protected by the buffer lock except those marked:
92 *		V - Protected by owning bufobj lock
93 *		Q - Protected by the buf queue lock
94 *		D - Protected by an dependency implementation specific lock
95 */
96struct buf {
97	struct bufobj	*b_bufobj;
98	long		b_bcount;
99	void		*b_caller1;
100	caddr_t		b_data;
101	int		b_error;
102	uint16_t	b_iocmd;	/* BIO_* bio_cmd from bio.h */
103	uint16_t	b_ioflags;	/* BIO_* bio_flags from bio.h */
104	off_t		b_iooffset;
105	long		b_resid;
106	void	(*b_iodone)(struct buf *);
107	void	(*b_ckhashcalc)(struct buf *);
108	uint64_t	b_ckhash;	/* B_CKHASH requested check-hash */
109	daddr_t b_blkno;		/* Underlying physical block number. */
110	off_t	b_offset;		/* Offset into file. */
111	TAILQ_ENTRY(buf) b_bobufs;	/* (V) Buffer's associated vnode. */
112	uint32_t	b_vflags;	/* (V) BV_* flags */
113	uint8_t		b_qindex;	/* (Q) buffer queue index */
114	uint8_t		b_domain;	/* (Q) buf domain this resides in */
115	uint16_t	b_subqueue;	/* (Q) per-cpu q if any */
116	uint32_t	b_flags;	/* B_* flags. */
117	b_xflags_t b_xflags;		/* extra flags */
118	struct lock b_lock;		/* Buffer lock */
119	long	b_bufsize;		/* Allocated buffer size. */
120	int	b_runningbufspace;	/* when I/O is running, pipelining */
121	int	b_kvasize;		/* size of kva for buffer */
122	int	b_dirtyoff;		/* Offset in buffer of dirty region. */
123	int	b_dirtyend;		/* Offset of end of dirty region. */
124	caddr_t	b_kvabase;		/* base kva for buffer */
125	daddr_t b_lblkno;		/* Logical block number. */
126	struct	vnode *b_vp;		/* Device vnode. */
127	struct	ucred *b_rcred;		/* Read credentials reference. */
128	struct	ucred *b_wcred;		/* Write credentials reference. */
129	union {
130		TAILQ_ENTRY(buf) b_freelist; /* (Q) */
131		struct {
132			void	(*b_pgiodone)(void *, struct vm_page **,
133				    int, int);
134			int	b_pgbefore;
135			int	b_pgafter;
136		};
137	};
138	union	cluster_info {
139		TAILQ_HEAD(cluster_list_head, buf) cluster_head;
140		TAILQ_ENTRY(buf) cluster_entry;
141	} b_cluster;
142	int		b_npages;
143	struct	workhead b_dep;		/* (D) List of filesystem dependencies. */
144	void	*b_fsprivate1;
145	void	*b_fsprivate2;
146	void	*b_fsprivate3;
147
148#if defined(FULL_BUF_TRACKING)
149#define BUF_TRACKING_SIZE	32
150#define BUF_TRACKING_ENTRY(x)	((x) & (BUF_TRACKING_SIZE - 1))
151	const char	*b_io_tracking[BUF_TRACKING_SIZE];
152	uint32_t	b_io_tcnt;
153#elif defined(BUF_TRACKING)
154	const char	*b_io_tracking;
155#endif
156	struct	vm_page *b_pages[];
157};
158
159#define b_object	b_bufobj->bo_object
160
161/*
162 * These flags are kept in b_flags.
163 *
164 * Notes:
165 *
166 *	B_ASYNC		VOP calls on bp's are usually async whether or not
167 *			B_ASYNC is set, but some subsystems, such as NFS, like
168 *			to know what is best for the caller so they can
169 *			optimize the I/O.
170 *
171 *	B_PAGING	Indicates that bp is being used by the paging system or
172 *			some paging system and that the bp is not linked into
173 *			the b_vp's clean/dirty linked lists or ref counts.
174 *			Buffer vp reassignments are illegal in this case.
175 *
176 *	B_CACHE		This may only be set if the buffer is entirely valid.
177 *			The situation where B_DELWRI is set and B_CACHE is
178 *			clear MUST be committed to disk by getblk() so
179 *			B_DELWRI can also be cleared.  See the comments for
180 *			getblk() in kern/vfs_bio.c.  If B_CACHE is clear,
181 *			the caller is expected to clear BIO_ERROR and B_INVAL,
182 *			set BIO_READ, and initiate an I/O.
183 *
184 *			The 'entire buffer' is defined to be the range from
185 *			0 through b_bcount.
186 *
187 *	B_MALLOC	Request that the buffer be allocated from the malloc
188 *			pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned.
189 *
190 *	B_CLUSTEROK	This flag is typically set for B_DELWRI buffers
191 *			by filesystems that allow clustering when the buffer
192 *			is fully dirty and indicates that it may be clustered
193 *			with other adjacent dirty buffers.  Note the clustering
194 *			may not be used with the stage 1 data write under NFS
195 *			but may be used for the commit rpc portion.
196 *
197 *	B_INVALONERR	This flag is set on dirty buffers.  It specifies that a
198 *			write error should forcibly invalidate the buffer
199 *			contents.  This flag should be used with caution, as it
200 *			discards data.  It is incompatible with B_ASYNC.
201 *
202 *	B_VMIO		Indicates that the buffer is tied into an VM object.
203 *			The buffer's data is always PAGE_SIZE aligned even
204 *			if b_bufsize and b_bcount are not.  ( b_bufsize is
205 *			always at least DEV_BSIZE aligned, though ).
206 *
207 *	B_DIRECT	Hint that we should attempt to completely free
208 *			the pages underlying the buffer.  B_DIRECT is
209 *			sticky until the buffer is released and typically
210 *			only has an effect when B_RELBUF is also set.
211 *
212 */
213
214#define	B_AGE		0x00000001	/* Move to age queue when I/O done. */
215#define	B_NEEDCOMMIT	0x00000002	/* Append-write in progress. */
216#define	B_ASYNC		0x00000004	/* Start I/O, do not wait. */
217#define	B_DIRECT	0x00000008	/* direct I/O flag (pls free vmio) */
218#define	B_DEFERRED	0x00000010	/* Skipped over for cleaning */
219#define	B_CACHE		0x00000020	/* Bread found us in the cache. */
220#define	B_VALIDSUSPWRT	0x00000040	/* Valid write during suspension. */
221#define	B_DELWRI	0x00000080	/* Delay I/O until buffer reused. */
222#define	B_CKHASH	0x00000100	/* checksum hash calculated on read */
223#define	B_DONE		0x00000200	/* I/O completed. */
224#define	B_EINTR		0x00000400	/* I/O was interrupted */
225#define	B_NOREUSE	0x00000800	/* Contents not reused once released. */
226#define	B_REUSE		0x00001000	/* Contents reused, second chance. */
227#define	B_INVAL		0x00002000	/* Does not contain valid info. */
228#define	B_BARRIER	0x00004000	/* Write this and all preceding first. */
229#define	B_NOCACHE	0x00008000	/* Do not cache block after use. */
230#define	B_MALLOC	0x00010000	/* malloced b_data */
231#define	B_CLUSTEROK	0x00020000	/* Pagein op, so swap() can count it. */
232#define	B_INVALONERR	0x00040000	/* Invalidate on write error. */
233#define	B_IOSTARTED	0x00080000	/* buf_start() called */
234#define	B_00100000	0x00100000	/* Available flag. */
235#define	B_MAXPHYS	0x00200000	/* nitems(b_pages[]) = atop(MAXPHYS). */
236#define	B_RELBUF	0x00400000	/* Release VMIO buffer. */
237#define	B_FS_FLAG1	0x00800000	/* Available flag for FS use. */
238#define	B_NOCOPY	0x01000000	/* Don't copy-on-write this buf. */
239#define	B_INFREECNT	0x02000000	/* buf is counted in numfreebufs */
240#define	B_PAGING	0x04000000	/* volatile paging I/O -- bypass VMIO */
241#define B_MANAGED	0x08000000	/* Managed by FS. */
242#define B_RAM		0x10000000	/* Read ahead mark (flag) */
243#define B_VMIO		0x20000000	/* VMIO flag */
244#define B_CLUSTER	0x40000000	/* pagein op, so swap() can count it */
245#define B_REMFREE	0x80000000	/* Delayed bremfree */
246
247#define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \
248	"\33paging\32infreecnt\31nocopy\30b23\27relbuf\26maxphys\25b20" \
249	"\24iostarted\23invalonerr\22clusterok\21malloc\20nocache\17b14" \
250	"\16inval\15reuse\14noreuse\13eintr\12done\11b8\10delwri" \
251	"\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age"
252
253/*
254 * These flags are kept in b_xflags.
255 *
256 * BX_FSPRIV reserves a set of eight flags that may be used by individual
257 * filesystems for their own purpose. Their specific definitions are
258 * found in the header files for each filesystem that uses them.
259 */
260#define	BX_VNDIRTY	0x00000001	/* On vnode dirty list */
261#define	BX_VNCLEAN	0x00000002	/* On vnode clean list */
262#define	BX_CVTENXIO	0x00000004	/* Convert errors to ENXIO */
263#define	BX_BKGRDWRITE	0x00000010	/* Do writes in background */
264#define	BX_BKGRDMARKER	0x00000020	/* Mark buffer for splay tree */
265#define	BX_ALTDATA	0x00000040	/* Holds extended data */
266#define	BX_FSPRIV	0x00FF0000	/* Filesystem-specific flags mask */
267
268#define	PRINT_BUF_XFLAGS "\20\7altdata\6bkgrdmarker\5bkgrdwrite\3cvtenxio" \
269	"\2clean\1dirty"
270
271#define	NOOFFSET	(-1LL)		/* No buffer offset calculated yet */
272
273/*
274 * These flags are kept in b_vflags.
275 */
276#define	BV_SCANNED	0x00000001	/* VOP_FSYNC funcs mark written bufs */
277#define	BV_BKGRDINPROG	0x00000002	/* Background write in progress */
278#define	BV_BKGRDWAIT	0x00000004	/* Background write waiting */
279#define	BV_BKGRDERR	0x00000008	/* Error from background write */
280
281#define	PRINT_BUF_VFLAGS "\20\4bkgrderr\3bkgrdwait\2bkgrdinprog\1scanned"
282
283#ifdef _KERNEL
284
285#ifndef NSWBUF_MIN
286#define	NSWBUF_MIN	16
287#endif
288
289/*
290 * Buffer locking
291 */
292#include <sys/proc.h>			/* XXX for curthread */
293#include <sys/mutex.h>
294
295/*
296 * Initialize a lock.
297 */
298#define BUF_LOCKINIT(bp, wmesg)						\
299	lockinit(&(bp)->b_lock, PRIBIO + 4, wmesg, 0, LK_NEW)
300/*
301 *
302 * Get a lock sleeping non-interruptably until it becomes available.
303 */
304#define	BUF_LOCK(bp, locktype, interlock)				\
305	_lockmgr_args_rw(&(bp)->b_lock, (locktype), (interlock),	\
306	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,		\
307	    LOCK_FILE, LOCK_LINE)
308
309/*
310 * Get a lock sleeping with specified interruptably and timeout.
311 */
312#define	BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo)	\
313	_lockmgr_args_rw(&(bp)->b_lock, (locktype) | LK_TIMELOCK,	\
314	    (interlock), (wmesg), (PRIBIO + 4) | (catch), (timo),	\
315	    LOCK_FILE, LOCK_LINE)
316
317/*
318 * Release a lock. Only the acquiring process may free the lock unless
319 * it has been handed off to biodone.
320 */
321#define	BUF_UNLOCK(bp) do {						\
322	KASSERT(((bp)->b_flags & B_REMFREE) == 0,			\
323	    ("BUF_UNLOCK %p while B_REMFREE is still set.", (bp)));	\
324									\
325	BUF_UNLOCK_RAW((bp));						\
326} while (0)
327#define	BUF_UNLOCK_RAW(bp) do {						\
328	(void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL,		\
329	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,		\
330	    LOCK_FILE, LOCK_LINE);					\
331} while (0)
332
333/*
334 * Check if a buffer lock is recursed.
335 */
336#define	BUF_LOCKRECURSED(bp)						\
337	lockmgr_recursed(&(bp)->b_lock)
338
339/*
340 * Check if a buffer lock is currently held.
341 */
342#define	BUF_ISLOCKED(bp)						\
343	lockstatus(&(bp)->b_lock)
344
345/*
346 * Check if a buffer lock is currently held by LK_KERNPROC.
347 */
348#define	BUF_DISOWNED(bp)						\
349	lockmgr_disowned(&(bp)->b_lock)
350
351/*
352 * Free a buffer lock.
353 */
354#define BUF_LOCKFREE(bp) 						\
355	lockdestroy(&(bp)->b_lock)
356
357/*
358 * Print informations on a buffer lock.
359 */
360#define BUF_LOCKPRINTINFO(bp) 						\
361	lockmgr_printinfo(&(bp)->b_lock)
362
363/*
364 * Buffer lock assertions.
365 */
366#if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
367#define	BUF_ASSERT_LOCKED(bp)						\
368	_lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE)
369#define	BUF_ASSERT_SLOCKED(bp)						\
370	_lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE)
371#define	BUF_ASSERT_XLOCKED(bp)						\
372	_lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE)
373#define	BUF_ASSERT_UNLOCKED(bp)						\
374	_lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE)
375#else
376#define	BUF_ASSERT_LOCKED(bp)
377#define	BUF_ASSERT_SLOCKED(bp)
378#define	BUF_ASSERT_XLOCKED(bp)
379#define	BUF_ASSERT_UNLOCKED(bp)
380#endif
381
382#ifdef _SYS_PROC_H_	/* Avoid #include <sys/proc.h> pollution */
383/*
384 * When initiating asynchronous I/O, change ownership of the lock to the
385 * kernel. Once done, the lock may legally released by biodone. The
386 * original owning process can no longer acquire it recursively, but must
387 * wait until the I/O is completed and the lock has been freed by biodone.
388 */
389#define	BUF_KERNPROC(bp)						\
390	_lockmgr_disown(&(bp)->b_lock, LOCK_FILE, LOCK_LINE)
391#endif
392
393#endif /* _KERNEL */
394
395struct buf_queue_head {
396	TAILQ_HEAD(buf_queue, buf) queue;
397	daddr_t last_pblkno;
398	struct	buf *insert_point;
399	struct	buf *switch_point;
400};
401
402/*
403 * This structure describes a clustered I/O.
404 */
405struct cluster_save {
406	long	bs_bcount;		/* Saved b_bcount. */
407	long	bs_bufsize;		/* Saved b_bufsize. */
408	int	bs_nchildren;		/* Number of associated buffers. */
409	struct buf **bs_children;	/* List of associated buffers. */
410};
411
412/*
413 * Vnode clustering tracker
414 */
415struct vn_clusterw {
416	daddr_t	v_cstart;			/* v start block of cluster */
417	daddr_t	v_lasta;			/* v last allocation  */
418	daddr_t	v_lastw;			/* v last write  */
419	int	v_clen;				/* v length of cur. cluster */
420};
421
422#ifdef _KERNEL
423
424static __inline int
425bwrite(struct buf *bp)
426{
427
428	KASSERT(bp->b_bufobj != NULL, ("bwrite: no bufobj bp=%p", bp));
429	KASSERT(bp->b_bufobj->bo_ops != NULL, ("bwrite: no bo_ops bp=%p", bp));
430	KASSERT(bp->b_bufobj->bo_ops->bop_write != NULL,
431	    ("bwrite: no bop_write bp=%p", bp));
432	return (BO_WRITE(bp->b_bufobj, bp));
433}
434
435static __inline void
436bstrategy(struct buf *bp)
437{
438
439	KASSERT(bp->b_bufobj != NULL, ("bstrategy: no bufobj bp=%p", bp));
440	KASSERT(bp->b_bufobj->bo_ops != NULL,
441	    ("bstrategy: no bo_ops bp=%p", bp));
442	KASSERT(bp->b_bufobj->bo_ops->bop_strategy != NULL,
443	    ("bstrategy: no bop_strategy bp=%p", bp));
444	BO_STRATEGY(bp->b_bufobj, bp);
445}
446
447static __inline void
448buf_start(struct buf *bp)
449{
450	KASSERT((bp->b_flags & B_IOSTARTED) == 0,
451	    ("recursed buf_start %p", bp));
452	bp->b_flags |= B_IOSTARTED;
453	if (bioops.io_start)
454		(*bioops.io_start)(bp);
455}
456
457static __inline void
458buf_complete(struct buf *bp)
459{
460	if ((bp->b_flags & B_IOSTARTED) != 0) {
461		bp->b_flags &= ~B_IOSTARTED;
462		if (bioops.io_complete)
463			(*bioops.io_complete)(bp);
464	}
465}
466
467static __inline void
468buf_deallocate(struct buf *bp)
469{
470	if (bioops.io_deallocate)
471		(*bioops.io_deallocate)(bp);
472}
473
474static __inline int
475buf_countdeps(struct buf *bp, int i)
476{
477	if (bioops.io_countdeps)
478		return ((*bioops.io_countdeps)(bp, i));
479	else
480		return (0);
481}
482
483static __inline void
484buf_track(struct buf *bp __unused, const char *location __unused)
485{
486
487#if defined(FULL_BUF_TRACKING)
488	bp->b_io_tracking[BUF_TRACKING_ENTRY(bp->b_io_tcnt++)] = location;
489#elif defined(BUF_TRACKING)
490	bp->b_io_tracking = location;
491#endif
492}
493
494#endif /* _KERNEL */
495
496/*
497 * Zero out the buffer's data area.
498 */
499#define	clrbuf(bp) {							\
500	bzero((bp)->b_data, (u_int)(bp)->b_bcount);			\
501	(bp)->b_resid = 0;						\
502}
503
504/*
505 * Flags for getblk's last parameter.
506 */
507#define	GB_LOCK_NOWAIT	0x0001		/* Fail if we block on a buf lock. */
508#define	GB_NOCREAT	0x0002		/* Don't create a buf if not found. */
509#define	GB_NOWAIT_BD	0x0004		/* Do not wait for bufdaemon. */
510#define	GB_UNMAPPED	0x0008		/* Do not mmap buffer pages. */
511#define	GB_KVAALLOC	0x0010		/* But allocate KVA. */
512#define	GB_CKHASH	0x0020		/* If reading, calc checksum hash */
513#define	GB_NOSPARSE	0x0040		/* Do not instantiate holes */
514#define	GB_CVTENXIO	0x0080		/* Convert errors to ENXIO */
515#define	GB_NOWITNESS	0x0100		/* Do not record for WITNESS */
516
517#ifdef _KERNEL
518extern int	nbuf;			/* The number of buffer headers */
519extern u_long	maxswzone;		/* Max KVA for swap structures */
520extern u_long	maxbcache;		/* Max KVA for buffer cache */
521extern int	maxbcachebuf;		/* Max buffer cache block size */
522extern long	runningbufspace;
523extern long	hibufspace;
524extern int	dirtybufthresh;
525extern int	bdwriteskip;
526extern int	dirtybufferflushes;
527extern int	altbufferflushes;
528extern int	nswbuf;			/* Number of swap I/O buffer headers. */
529extern caddr_t __read_mostly unmapped_buf; /* Data address for unmapped
530					      buffers. */
531
532static inline int
533buf_mapped(struct buf *bp)
534{
535
536	return (bp->b_data != unmapped_buf);
537}
538
539void	runningbufwakeup(struct buf *);
540void	waitrunningbufspace(void);
541caddr_t	kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
542void	bufinit(void);
543void	bufshutdown(int);
544void	bdata2bio(struct buf *bp, struct bio *bip);
545void	bwillwrite(void);
546int	buf_dirty_count_severe(void);
547void	bremfree(struct buf *);
548void	bremfreef(struct buf *);	/* XXX Force bremfree, only for nfs. */
549#define bread(vp, blkno, size, cred, bpp) \
550	    breadn_flags(vp, blkno, blkno, size, NULL, NULL, 0, cred, 0, \
551		NULL, bpp)
552#define bread_gb(vp, blkno, size, cred, gbflags, bpp) \
553	    breadn_flags(vp, blkno, blkno, size, NULL, NULL, 0, cred, \
554		gbflags, NULL, bpp)
555#define breadn(vp, blkno, size, rablkno, rabsize, cnt, cred, bpp) \
556	    breadn_flags(vp, blkno, blkno, size, rablkno, rabsize, cnt, cred, \
557		0, NULL, bpp)
558int	breadn_flags(struct vnode *, daddr_t, daddr_t, int, daddr_t *, int *,
559	    int, struct ucred *, int, void (*)(struct buf *), struct buf **);
560void	bdwrite(struct buf *);
561void	bawrite(struct buf *);
562void	babarrierwrite(struct buf *);
563int	bbarrierwrite(struct buf *);
564void	bdirty(struct buf *);
565void	bundirty(struct buf *);
566void	bufstrategy(struct bufobj *, struct buf *);
567void	brelse(struct buf *);
568void	bqrelse(struct buf *);
569int	vfs_bio_awrite(struct buf *);
570void	vfs_busy_pages_acquire(struct buf *bp);
571void	vfs_busy_pages_release(struct buf *bp);
572struct buf *incore(struct bufobj *, daddr_t);
573bool	inmem(struct vnode *, daddr_t);
574struct buf *gbincore(struct bufobj *, daddr_t);
575struct buf *gbincore_unlocked(struct bufobj *, daddr_t);
576struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
577int	getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
578	    int slpflag, int slptimeo, int flags, struct buf **bpp);
579struct buf *geteblk(int, int);
580int	bufwait(struct buf *);
581int	bufwrite(struct buf *);
582void	bufdone(struct buf *);
583void	bd_speedup(void);
584
585extern uma_zone_t pbuf_zone;
586uma_zone_t pbuf_zsecond_create(const char *name, int max);
587
588struct vn_clusterw;
589
590void	cluster_init_vn(struct vn_clusterw *vnc);
591int	cluster_read(struct vnode *, u_quad_t, daddr_t, long,
592	    struct ucred *, long, int, int, struct buf **);
593int	cluster_wbuild(struct vnode *, long, daddr_t, int, int);
594void	cluster_write(struct vnode *, struct vn_clusterw *, struct buf *,
595	    u_quad_t, int, int);
596void	vfs_bio_brelse(struct buf *bp, int ioflags);
597void	vfs_bio_bzero_buf(struct buf *bp, int base, int size);
598void	vfs_bio_clrbuf(struct buf *);
599void	vfs_bio_set_flags(struct buf *bp, int ioflags);
600void	vfs_bio_set_valid(struct buf *, int base, int size);
601void	vfs_busy_pages(struct buf *, int clear_modify);
602void	vfs_unbusy_pages(struct buf *);
603int	vmapbuf(struct buf *, void *, size_t, int);
604void	vunmapbuf(struct buf *);
605void	brelvp(struct buf *);
606int	bgetvp(struct vnode *, struct buf *) __result_use_check;
607void	pbgetbo(struct bufobj *bo, struct buf *bp);
608void	pbgetvp(struct vnode *, struct buf *);
609void	pbrelbo(struct buf *);
610void	pbrelvp(struct buf *);
611int	allocbuf(struct buf *bp, int size);
612void	reassignbuf(struct buf *);
613void	bwait(struct buf *, u_char, const char *);
614void	bdone(struct buf *);
615
616typedef daddr_t (vbg_get_lblkno_t)(struct vnode *, vm_ooffset_t);
617typedef int (vbg_get_blksize_t)(struct vnode *, daddr_t, long *);
618int	vfs_bio_getpages(struct vnode *vp, struct vm_page **ma, int count,
619	    int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
620	    vbg_get_blksize_t get_blksize);
621
622#endif /* _KERNEL */
623
624#endif /* !_SYS_BUF_H_ */
625