vfs_bio.c revision 285872
1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * this file contains a new buffer I/O scheme implementing a coherent
34 * VM object and buffer cache scheme.  Pains have been taken to make
35 * sure that the performance degradation associated with schemes such
36 * as this is not realized.
37 *
38 * Author:  John S. Dyson
39 * Significant help during the development and debugging phases
40 * had been provided by David Greenman, also of the FreeBSD core team.
41 *
42 * see man buf(9) for more info.
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: head/sys/kern/vfs_bio.c 285872 2015-07-25 15:00:14Z kib $");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/bio.h>
51#include <sys/conf.h>
52#include <sys/buf.h>
53#include <sys/devicestat.h>
54#include <sys/eventhandler.h>
55#include <sys/fail.h>
56#include <sys/limits.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mount.h>
60#include <sys/mutex.h>
61#include <sys/kernel.h>
62#include <sys/kthread.h>
63#include <sys/proc.h>
64#include <sys/resourcevar.h>
65#include <sys/rwlock.h>
66#include <sys/sysctl.h>
67#include <sys/vmem.h>
68#include <sys/vmmeter.h>
69#include <sys/vnode.h>
70#include <geom/geom.h>
71#include <vm/vm.h>
72#include <vm/vm_param.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_pageout.h>
75#include <vm/vm_page.h>
76#include <vm/vm_object.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_map.h>
79#include "opt_compat.h"
80#include "opt_swap.h"
81
82static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
83
84struct	bio_ops bioops;		/* I/O operation notification */
85
86struct	buf_ops buf_ops_bio = {
87	.bop_name	=	"buf_ops_bio",
88	.bop_write	=	bufwrite,
89	.bop_strategy	=	bufstrategy,
90	.bop_sync	=	bufsync,
91	.bop_bdflush	=	bufbdflush,
92};
93
94/*
95 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
96 * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
97 */
98struct buf *buf;		/* buffer header pool */
99caddr_t unmapped_buf;
100
101/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
102struct proc *bufdaemonproc;
103
104static int inmem(struct vnode *vp, daddr_t blkno);
105static void vm_hold_free_pages(struct buf *bp, int newbsize);
106static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
107		vm_offset_t to);
108static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
109static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
110		vm_page_t m);
111static void vfs_clean_pages_dirty_buf(struct buf *bp);
112static void vfs_setdirty_locked_object(struct buf *bp);
113static void vfs_vmio_release(struct buf *bp);
114static int vfs_bio_clcheck(struct vnode *vp, int size,
115		daddr_t lblkno, daddr_t blkno);
116static int buf_flush(struct vnode *vp, int);
117static int flushbufqueues(struct vnode *, int, int);
118static void buf_daemon(void);
119static void bremfreel(struct buf *bp);
120static __inline void bd_wakeup(void);
121static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
122#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
123    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
124static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
125#endif
126
127int vmiodirenable = TRUE;
128SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
129    "Use the VM system for directory writes");
130long runningbufspace;
131SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
132    "Amount of presently outstanding async buffer io");
133static long bufspace;
134#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
135    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
136SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
137    &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
138#else
139SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
140    "Physical memory used for buffers");
141#endif
142static long bufkvaspace;
143SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0,
144    "Kernel virtual memory used for buffers");
145static long maxbufspace;
146SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
147    "Maximum allowed value of bufspace (including buf_daemon)");
148static long bufmallocspace;
149SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
150    "Amount of malloced memory for buffers");
151static long maxbufmallocspace;
152SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
153    "Maximum amount of malloced memory for buffers");
154static long lobufspace;
155SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
156    "Minimum amount of buffers we want to have");
157long hibufspace;
158SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
159    "Maximum allowed value of bufspace (excluding buf_daemon)");
160static int bufreusecnt;
161SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
162    "Number of times we have reused a buffer");
163static int buffreekvacnt;
164SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
165    "Number of times we have freed the KVA space from some buffer");
166static int bufdefragcnt;
167SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
168    "Number of times we have had to repeat buffer allocation to defragment");
169static long lorunningspace;
170SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
171    CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
172    "Minimum preferred space used for in-progress I/O");
173static long hirunningspace;
174SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
175    CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
176    "Maximum amount of space to use for in-progress I/O");
177int dirtybufferflushes;
178SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
179    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
180int bdwriteskip;
181SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
182    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
183int altbufferflushes;
184SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
185    0, "Number of fsync flushes to limit dirty buffers");
186static int recursiveflushes;
187SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
188    0, "Number of flushes skipped due to being recursive");
189static int numdirtybuffers;
190SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
191    "Number of buffers that are dirty (has unwritten changes) at the moment");
192static int lodirtybuffers;
193SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
194    "How many buffers we want to have free before bufdaemon can sleep");
195static int hidirtybuffers;
196SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
197    "When the number of dirty buffers is considered severe");
198int dirtybufthresh;
199SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
200    0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
201static int numfreebuffers;
202SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
203    "Number of free buffers");
204static int lofreebuffers;
205SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
206   "XXX Unused");
207static int hifreebuffers;
208SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
209   "XXX Complicatedly unused");
210static int getnewbufcalls;
211SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
212   "Number of calls to getnewbuf");
213static int getnewbufrestarts;
214SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
215    "Number of times getnewbuf has had to restart a buffer aquisition");
216static int mappingrestarts;
217SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
218    "Number of times getblk has had to restart a buffer mapping for "
219    "unmapped buffer");
220static int flushbufqtarget = 100;
221SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
222    "Amount of work to do in flushbufqueues when helping bufdaemon");
223static long notbufdflushes;
224SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes, 0,
225    "Number of dirty buffer flushes done by the bufdaemon helpers");
226static long barrierwrites;
227SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
228    "Number of barrier writes");
229SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
230    &unmapped_buf_allowed, 0,
231    "Permit the use of the unmapped i/o");
232
233/*
234 * Lock for the non-dirty bufqueues
235 */
236static struct mtx_padalign bqclean;
237
238/*
239 * Lock for the dirty queue.
240 */
241static struct mtx_padalign bqdirty;
242
243/*
244 * This lock synchronizes access to bd_request.
245 */
246static struct mtx_padalign bdlock;
247
248/*
249 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
250 * waitrunningbufspace().
251 */
252static struct mtx_padalign rbreqlock;
253
254/*
255 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
256 */
257static struct rwlock_padalign nblock;
258
259/*
260 * Lock that protects bdirtywait.
261 */
262static struct mtx_padalign bdirtylock;
263
264/*
265 * Wakeup point for bufdaemon, as well as indicator of whether it is already
266 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
267 * is idling.
268 */
269static int bd_request;
270
271/*
272 * Request for the buf daemon to write more buffers than is indicated by
273 * lodirtybuf.  This may be necessary to push out excess dependencies or
274 * defragment the address space where a simple count of the number of dirty
275 * buffers is insufficient to characterize the demand for flushing them.
276 */
277static int bd_speedupreq;
278
279/*
280 * bogus page -- for I/O to/from partially complete buffers
281 * this is a temporary solution to the problem, but it is not
282 * really that bad.  it would be better to split the buffer
283 * for input in the case of buffers partially already in memory,
284 * but the code is intricate enough already.
285 */
286vm_page_t bogus_page;
287
288/*
289 * Synchronization (sleep/wakeup) variable for active buffer space requests.
290 * Set when wait starts, cleared prior to wakeup().
291 * Used in runningbufwakeup() and waitrunningbufspace().
292 */
293static int runningbufreq;
294
295/*
296 * Synchronization (sleep/wakeup) variable for buffer requests.
297 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
298 * by and/or.
299 * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(),
300 * getnewbuf(), and getblk().
301 */
302static volatile int needsbuffer;
303
304/*
305 * Synchronization for bwillwrite() waiters.
306 */
307static int bdirtywait;
308
309/*
310 * Definitions for the buffer free lists.
311 */
312#define BUFFER_QUEUES	5	/* number of free buffer queues */
313
314#define QUEUE_NONE	0	/* on no queue */
315#define QUEUE_CLEAN	1	/* non-B_DELWRI buffers */
316#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
317#define QUEUE_EMPTYKVA	3	/* empty buffer headers w/KVA assignment */
318#define QUEUE_EMPTY	4	/* empty buffer headers */
319#define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
320
321/* Queues for free buffers with various properties */
322static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
323#ifdef INVARIANTS
324static int bq_len[BUFFER_QUEUES];
325#endif
326
327/*
328 * Single global constant for BUF_WMESG, to avoid getting multiple references.
329 * buf_wmesg is referred from macros.
330 */
331const char *buf_wmesg = BUF_WMESG;
332
333#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
334#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
335#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
336
337static int
338sysctl_runningspace(SYSCTL_HANDLER_ARGS)
339{
340	long value;
341	int error;
342
343	value = *(long *)arg1;
344	error = sysctl_handle_long(oidp, &value, 0, req);
345	if (error != 0 || req->newptr == NULL)
346		return (error);
347	mtx_lock(&rbreqlock);
348	if (arg1 == &hirunningspace) {
349		if (value < lorunningspace)
350			error = EINVAL;
351		else
352			hirunningspace = value;
353	} else {
354		KASSERT(arg1 == &lorunningspace,
355		    ("%s: unknown arg1", __func__));
356		if (value > hirunningspace)
357			error = EINVAL;
358		else
359			lorunningspace = value;
360	}
361	mtx_unlock(&rbreqlock);
362	return (error);
363}
364
365#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
366    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
367static int
368sysctl_bufspace(SYSCTL_HANDLER_ARGS)
369{
370	long lvalue;
371	int ivalue;
372
373	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
374		return (sysctl_handle_long(oidp, arg1, arg2, req));
375	lvalue = *(long *)arg1;
376	if (lvalue > INT_MAX)
377		/* On overflow, still write out a long to trigger ENOMEM. */
378		return (sysctl_handle_long(oidp, &lvalue, 0, req));
379	ivalue = lvalue;
380	return (sysctl_handle_int(oidp, &ivalue, 0, req));
381}
382#endif
383
384/*
385 *	bqlock:
386 *
387 *	Return the appropriate queue lock based on the index.
388 */
389static inline struct mtx *
390bqlock(int qindex)
391{
392
393	if (qindex == QUEUE_DIRTY)
394		return (struct mtx *)(&bqdirty);
395	return (struct mtx *)(&bqclean);
396}
397
398/*
399 *	bdirtywakeup:
400 *
401 *	Wakeup any bwillwrite() waiters.
402 */
403static void
404bdirtywakeup(void)
405{
406	mtx_lock(&bdirtylock);
407	if (bdirtywait) {
408		bdirtywait = 0;
409		wakeup(&bdirtywait);
410	}
411	mtx_unlock(&bdirtylock);
412}
413
414/*
415 *	bdirtysub:
416 *
417 *	Decrement the numdirtybuffers count by one and wakeup any
418 *	threads blocked in bwillwrite().
419 */
420static void
421bdirtysub(void)
422{
423
424	if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
425	    (lodirtybuffers + hidirtybuffers) / 2)
426		bdirtywakeup();
427}
428
429/*
430 *	bdirtyadd:
431 *
432 *	Increment the numdirtybuffers count by one and wakeup the buf
433 *	daemon if needed.
434 */
435static void
436bdirtyadd(void)
437{
438
439	/*
440	 * Only do the wakeup once as we cross the boundary.  The
441	 * buf daemon will keep running until the condition clears.
442	 */
443	if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
444	    (lodirtybuffers + hidirtybuffers) / 2)
445		bd_wakeup();
446}
447
448/*
449 *	bufspacewakeup:
450 *
451 *	Called when buffer space is potentially available for recovery.
452 *	getnewbuf() will block on this flag when it is unable to free
453 *	sufficient buffer space.  Buffer space becomes recoverable when
454 *	bp's get placed back in the queues.
455 */
456static __inline void
457bufspacewakeup(void)
458{
459	int need_wakeup, on;
460
461	/*
462	 * If someone is waiting for bufspace, wake them up.  Even
463	 * though we may not have freed the kva space yet, the waiting
464	 * process will be able to now.
465	 */
466	rw_rlock(&nblock);
467	for (;;) {
468		need_wakeup = 0;
469		on = needsbuffer;
470		if ((on & VFS_BIO_NEED_BUFSPACE) == 0)
471			break;
472		need_wakeup = 1;
473		if (atomic_cmpset_rel_int(&needsbuffer, on,
474		    on & ~VFS_BIO_NEED_BUFSPACE))
475			break;
476	}
477	if (need_wakeup)
478		wakeup(__DEVOLATILE(void *, &needsbuffer));
479	rw_runlock(&nblock);
480}
481
482/*
483 *	bufspaceadjust:
484 *
485 *	Adjust the reported bufspace for a KVA managed buffer, possibly
486 * 	waking any waiters.
487 */
488static void
489bufspaceadjust(struct buf *bp, int bufsize)
490{
491	int diff;
492
493	KASSERT((bp->b_flags & B_MALLOC) == 0,
494	    ("bufspaceadjust: malloc buf %p", bp));
495	diff = bufsize - bp->b_bufsize;
496	if (diff < 0) {
497		atomic_subtract_long(&bufspace, -diff);
498		bufspacewakeup();
499	} else
500		atomic_add_long(&bufspace, diff);
501	bp->b_bufsize = bufsize;
502}
503
504/*
505 *	bufmallocadjust:
506 *
507 *	Adjust the reported bufspace for a malloc managed buffer, possibly
508 *	waking any waiters.
509 */
510static void
511bufmallocadjust(struct buf *bp, int bufsize)
512{
513	int diff;
514
515	KASSERT((bp->b_flags & B_MALLOC) != 0,
516	    ("bufmallocadjust: non-malloc buf %p", bp));
517	diff = bufsize - bp->b_bufsize;
518	if (diff < 0) {
519		atomic_subtract_long(&bufmallocspace, -diff);
520		bufspacewakeup();
521	} else
522		atomic_add_long(&bufmallocspace, diff);
523	bp->b_bufsize = bufsize;
524}
525
526/*
527 *	runningwakeup:
528 *
529 *	Wake up processes that are waiting on asynchronous writes to fall
530 *	below lorunningspace.
531 */
532static void
533runningwakeup(void)
534{
535
536	mtx_lock(&rbreqlock);
537	if (runningbufreq) {
538		runningbufreq = 0;
539		wakeup(&runningbufreq);
540	}
541	mtx_unlock(&rbreqlock);
542}
543
544/*
545 *	runningbufwakeup:
546 *
547 *	Decrement the outstanding write count according.
548 */
549void
550runningbufwakeup(struct buf *bp)
551{
552	long space, bspace;
553
554	bspace = bp->b_runningbufspace;
555	if (bspace == 0)
556		return;
557	space = atomic_fetchadd_long(&runningbufspace, -bspace);
558	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
559	    space, bspace));
560	bp->b_runningbufspace = 0;
561	/*
562	 * Only acquire the lock and wakeup on the transition from exceeding
563	 * the threshold to falling below it.
564	 */
565	if (space < lorunningspace)
566		return;
567	if (space - bspace > lorunningspace)
568		return;
569	runningwakeup();
570}
571
572/*
573 *	bufcountadd:
574 *
575 *	Called when a buffer has been added to one of the free queues to
576 *	account for the buffer and to wakeup anyone waiting for free buffers.
577 *	This typically occurs when large amounts of metadata are being handled
578 *	by the buffer cache ( else buffer space runs out first, usually ).
579 */
580static __inline void
581bufcountadd(struct buf *bp)
582{
583	int mask, need_wakeup, old, on;
584
585	KASSERT((bp->b_flags & B_INFREECNT) == 0,
586	    ("buf %p already counted as free", bp));
587	bp->b_flags |= B_INFREECNT;
588	old = atomic_fetchadd_int(&numfreebuffers, 1);
589	KASSERT(old >= 0 && old < nbuf,
590	    ("numfreebuffers climbed to %d", old + 1));
591	mask = VFS_BIO_NEED_ANY;
592	if (numfreebuffers >= hifreebuffers)
593		mask |= VFS_BIO_NEED_FREE;
594	rw_rlock(&nblock);
595	for (;;) {
596		need_wakeup = 0;
597		on = needsbuffer;
598		if (on == 0)
599			break;
600		need_wakeup = 1;
601		if (atomic_cmpset_rel_int(&needsbuffer, on, on & ~mask))
602			break;
603	}
604	if (need_wakeup)
605		wakeup(__DEVOLATILE(void *, &needsbuffer));
606	rw_runlock(&nblock);
607}
608
609/*
610 *	bufcountsub:
611 *
612 *	Decrement the numfreebuffers count as needed.
613 */
614static void
615bufcountsub(struct buf *bp)
616{
617	int old;
618
619	/*
620	 * Fixup numfreebuffers count.  If the buffer is invalid or not
621	 * delayed-write, the buffer was free and we must decrement
622	 * numfreebuffers.
623	 */
624	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
625		KASSERT((bp->b_flags & B_INFREECNT) != 0,
626		    ("buf %p not counted in numfreebuffers", bp));
627		bp->b_flags &= ~B_INFREECNT;
628		old = atomic_fetchadd_int(&numfreebuffers, -1);
629		KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1));
630	}
631}
632
633/*
634 *	waitrunningbufspace()
635 *
636 *	runningbufspace is a measure of the amount of I/O currently
637 *	running.  This routine is used in async-write situations to
638 *	prevent creating huge backups of pending writes to a device.
639 *	Only asynchronous writes are governed by this function.
640 *
641 *	This does NOT turn an async write into a sync write.  It waits
642 *	for earlier writes to complete and generally returns before the
643 *	caller's write has reached the device.
644 */
645void
646waitrunningbufspace(void)
647{
648
649	mtx_lock(&rbreqlock);
650	while (runningbufspace > hirunningspace) {
651		runningbufreq = 1;
652		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
653	}
654	mtx_unlock(&rbreqlock);
655}
656
657
658/*
659 *	vfs_buf_test_cache:
660 *
661 *	Called when a buffer is extended.  This function clears the B_CACHE
662 *	bit if the newly extended portion of the buffer does not contain
663 *	valid data.
664 */
665static __inline
666void
667vfs_buf_test_cache(struct buf *bp,
668		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
669		  vm_page_t m)
670{
671
672	VM_OBJECT_ASSERT_LOCKED(m->object);
673	if (bp->b_flags & B_CACHE) {
674		int base = (foff + off) & PAGE_MASK;
675		if (vm_page_is_valid(m, base, size) == 0)
676			bp->b_flags &= ~B_CACHE;
677	}
678}
679
680/* Wake up the buffer daemon if necessary */
681static __inline void
682bd_wakeup(void)
683{
684
685	mtx_lock(&bdlock);
686	if (bd_request == 0) {
687		bd_request = 1;
688		wakeup(&bd_request);
689	}
690	mtx_unlock(&bdlock);
691}
692
693/*
694 * bd_speedup - speedup the buffer cache flushing code
695 */
696void
697bd_speedup(void)
698{
699	int needwake;
700
701	mtx_lock(&bdlock);
702	needwake = 0;
703	if (bd_speedupreq == 0 || bd_request == 0)
704		needwake = 1;
705	bd_speedupreq = 1;
706	bd_request = 1;
707	if (needwake)
708		wakeup(&bd_request);
709	mtx_unlock(&bdlock);
710}
711
712#ifndef NSWBUF_MIN
713#define	NSWBUF_MIN	16
714#endif
715
716#ifdef __i386__
717#define	TRANSIENT_DENOM	5
718#else
719#define	TRANSIENT_DENOM 10
720#endif
721
722/*
723 * Calculating buffer cache scaling values and reserve space for buffer
724 * headers.  This is called during low level kernel initialization and
725 * may be called more then once.  We CANNOT write to the memory area
726 * being reserved at this time.
727 */
728caddr_t
729kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
730{
731	int tuned_nbuf;
732	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
733
734	/*
735	 * physmem_est is in pages.  Convert it to kilobytes (assumes
736	 * PAGE_SIZE is >= 1K)
737	 */
738	physmem_est = physmem_est * (PAGE_SIZE / 1024);
739
740	/*
741	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
742	 * For the first 64MB of ram nominally allocate sufficient buffers to
743	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
744	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
745	 * the buffer cache we limit the eventual kva reservation to
746	 * maxbcache bytes.
747	 *
748	 * factor represents the 1/4 x ram conversion.
749	 */
750	if (nbuf == 0) {
751		int factor = 4 * BKVASIZE / 1024;
752
753		nbuf = 50;
754		if (physmem_est > 4096)
755			nbuf += min((physmem_est - 4096) / factor,
756			    65536 / factor);
757		if (physmem_est > 65536)
758			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
759			    32 * 1024 * 1024 / (factor * 5));
760
761		if (maxbcache && nbuf > maxbcache / BKVASIZE)
762			nbuf = maxbcache / BKVASIZE;
763		tuned_nbuf = 1;
764	} else
765		tuned_nbuf = 0;
766
767	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
768	maxbuf = (LONG_MAX / 3) / BKVASIZE;
769	if (nbuf > maxbuf) {
770		if (!tuned_nbuf)
771			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
772			    maxbuf);
773		nbuf = maxbuf;
774	}
775
776	/*
777	 * Ideal allocation size for the transient bio submap is 10%
778	 * of the maximal space buffer map.  This roughly corresponds
779	 * to the amount of the buffer mapped for typical UFS load.
780	 *
781	 * Clip the buffer map to reserve space for the transient
782	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
783	 * maximum buffer map extent on the platform.
784	 *
785	 * The fall-back to the maxbuf in case of maxbcache unset,
786	 * allows to not trim the buffer KVA for the architectures
787	 * with ample KVA space.
788	 */
789	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
790		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
791		buf_sz = (long)nbuf * BKVASIZE;
792		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
793		    (TRANSIENT_DENOM - 1)) {
794			/*
795			 * There is more KVA than memory.  Do not
796			 * adjust buffer map size, and assign the rest
797			 * of maxbuf to transient map.
798			 */
799			biotmap_sz = maxbuf_sz - buf_sz;
800		} else {
801			/*
802			 * Buffer map spans all KVA we could afford on
803			 * this platform.  Give 10% (20% on i386) of
804			 * the buffer map to the transient bio map.
805			 */
806			biotmap_sz = buf_sz / TRANSIENT_DENOM;
807			buf_sz -= biotmap_sz;
808		}
809		if (biotmap_sz / INT_MAX > MAXPHYS)
810			bio_transient_maxcnt = INT_MAX;
811		else
812			bio_transient_maxcnt = biotmap_sz / MAXPHYS;
813		/*
814		 * Artifically limit to 1024 simultaneous in-flight I/Os
815		 * using the transient mapping.
816		 */
817		if (bio_transient_maxcnt > 1024)
818			bio_transient_maxcnt = 1024;
819		if (tuned_nbuf)
820			nbuf = buf_sz / BKVASIZE;
821	}
822
823	/*
824	 * swbufs are used as temporary holders for I/O, such as paging I/O.
825	 * We have no less then 16 and no more then 256.
826	 */
827	nswbuf = min(nbuf / 4, 256);
828	TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
829	if (nswbuf < NSWBUF_MIN)
830		nswbuf = NSWBUF_MIN;
831
832	/*
833	 * Reserve space for the buffer cache buffers
834	 */
835	swbuf = (void *)v;
836	v = (caddr_t)(swbuf + nswbuf);
837	buf = (void *)v;
838	v = (caddr_t)(buf + nbuf);
839
840	return(v);
841}
842
843/* Initialize the buffer subsystem.  Called before use of any buffers. */
844void
845bufinit(void)
846{
847	struct buf *bp;
848	int i;
849
850	CTASSERT(MAXBCACHEBUF >= MAXBSIZE);
851	mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF);
852	mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF);
853	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
854	rw_init(&nblock, "needsbuffer lock");
855	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
856	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
857
858	/* next, make a null set of free lists */
859	for (i = 0; i < BUFFER_QUEUES; i++)
860		TAILQ_INIT(&bufqueues[i]);
861
862	unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
863
864	/* finally, initialize each buffer header and stick on empty q */
865	for (i = 0; i < nbuf; i++) {
866		bp = &buf[i];
867		bzero(bp, sizeof *bp);
868		bp->b_flags = B_INVAL | B_INFREECNT;
869		bp->b_rcred = NOCRED;
870		bp->b_wcred = NOCRED;
871		bp->b_qindex = QUEUE_EMPTY;
872		bp->b_xflags = 0;
873		bp->b_data = bp->b_kvabase = unmapped_buf;
874		LIST_INIT(&bp->b_dep);
875		BUF_LOCKINIT(bp);
876		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
877#ifdef INVARIANTS
878		bq_len[QUEUE_EMPTY]++;
879#endif
880	}
881
882	/*
883	 * maxbufspace is the absolute maximum amount of buffer space we are
884	 * allowed to reserve in KVM and in real terms.  The absolute maximum
885	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
886	 * used by most other processes.  The differential is required to
887	 * ensure that buf_daemon is able to run when other processes might
888	 * be blocked waiting for buffer space.
889	 *
890	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
891	 * this may result in KVM fragmentation which is not handled optimally
892	 * by the system.
893	 */
894	maxbufspace = (long)nbuf * BKVASIZE;
895	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10);
896	lobufspace = hibufspace - MAXBCACHEBUF;
897
898	/*
899	 * Note: The 16 MiB upper limit for hirunningspace was chosen
900	 * arbitrarily and may need further tuning. It corresponds to
901	 * 128 outstanding write IO requests (if IO size is 128 KiB),
902	 * which fits with many RAID controllers' tagged queuing limits.
903	 * The lower 1 MiB limit is the historical upper limit for
904	 * hirunningspace.
905	 */
906	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF),
907	    16 * 1024 * 1024), 1024 * 1024);
908	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF);
909
910/*
911 * Limit the amount of malloc memory since it is wired permanently into
912 * the kernel space.  Even though this is accounted for in the buffer
913 * allocation, we don't want the malloced region to grow uncontrolled.
914 * The malloc scheme improves memory utilization significantly on average
915 * (small) directories.
916 */
917	maxbufmallocspace = hibufspace / 20;
918
919/*
920 * Reduce the chance of a deadlock occuring by limiting the number
921 * of delayed-write dirty buffers we allow to stack up.
922 */
923	hidirtybuffers = nbuf / 4 + 20;
924	dirtybufthresh = hidirtybuffers * 9 / 10;
925	numdirtybuffers = 0;
926/*
927 * To support extreme low-memory systems, make sure hidirtybuffers cannot
928 * eat up all available buffer space.  This occurs when our minimum cannot
929 * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
930 * BKVASIZE'd buffers.
931 */
932	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
933		hidirtybuffers >>= 1;
934	}
935	lodirtybuffers = hidirtybuffers / 2;
936
937/*
938 * Try to keep the number of free buffers in the specified range,
939 * and give special processes (e.g. like buf_daemon) access to an
940 * emergency reserve.
941 */
942	lofreebuffers = nbuf / 18 + 5;
943	hifreebuffers = 2 * lofreebuffers;
944	numfreebuffers = nbuf;
945
946	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
947	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
948}
949
950#ifdef INVARIANTS
951static inline void
952vfs_buf_check_mapped(struct buf *bp)
953{
954
955	KASSERT(bp->b_kvabase != unmapped_buf,
956	    ("mapped buf: b_kvabase was not updated %p", bp));
957	KASSERT(bp->b_data != unmapped_buf,
958	    ("mapped buf: b_data was not updated %p", bp));
959	KASSERT(bp->b_data < unmapped_buf || bp->b_data > unmapped_buf +
960	    MAXPHYS, ("b_data + b_offset unmapped %p", bp));
961}
962
963static inline void
964vfs_buf_check_unmapped(struct buf *bp)
965{
966
967	KASSERT(bp->b_data == unmapped_buf,
968	    ("unmapped buf: corrupted b_data %p", bp));
969}
970
971#define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
972#define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
973#else
974#define	BUF_CHECK_MAPPED(bp) do {} while (0)
975#define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
976#endif
977
978static void
979bpmap_qenter(struct buf *bp)
980{
981
982	BUF_CHECK_MAPPED(bp);
983
984	/*
985	 * bp->b_data is relative to bp->b_offset, but
986	 * bp->b_offset may be offset into the first page.
987	 */
988	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
989	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
990	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
991	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
992}
993
994/*
995 *	binsfree:
996 *
997 *	Insert the buffer into the appropriate free list.
998 */
999static void
1000binsfree(struct buf *bp, int qindex)
1001{
1002	struct mtx *olock, *nlock;
1003
1004	BUF_ASSERT_XLOCKED(bp);
1005
1006	nlock = bqlock(qindex);
1007	/* Handle delayed bremfree() processing. */
1008	if (bp->b_flags & B_REMFREE) {
1009		olock = bqlock(bp->b_qindex);
1010		mtx_lock(olock);
1011		bremfreel(bp);
1012		if (olock != nlock) {
1013			mtx_unlock(olock);
1014			mtx_lock(nlock);
1015		}
1016	} else
1017		mtx_lock(nlock);
1018
1019	if (bp->b_qindex != QUEUE_NONE)
1020		panic("binsfree: free buffer onto another queue???");
1021
1022	bp->b_qindex = qindex;
1023	if (bp->b_flags & B_AGE)
1024		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1025	else
1026		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1027#ifdef INVARIANTS
1028	bq_len[bp->b_qindex]++;
1029#endif
1030	mtx_unlock(nlock);
1031
1032	/*
1033	 * Something we can maybe free or reuse.
1034	 */
1035	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1036		bufspacewakeup();
1037
1038	if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
1039		bufcountadd(bp);
1040}
1041
1042/*
1043 *	bremfree:
1044 *
1045 *	Mark the buffer for removal from the appropriate free list.
1046 *
1047 */
1048void
1049bremfree(struct buf *bp)
1050{
1051
1052	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1053	KASSERT((bp->b_flags & B_REMFREE) == 0,
1054	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1055	KASSERT(bp->b_qindex != QUEUE_NONE,
1056	    ("bremfree: buffer %p not on a queue.", bp));
1057	BUF_ASSERT_XLOCKED(bp);
1058
1059	bp->b_flags |= B_REMFREE;
1060	bufcountsub(bp);
1061}
1062
1063/*
1064 *	bremfreef:
1065 *
1066 *	Force an immediate removal from a free list.  Used only in nfs when
1067 *	it abuses the b_freelist pointer.
1068 */
1069void
1070bremfreef(struct buf *bp)
1071{
1072	struct mtx *qlock;
1073
1074	qlock = bqlock(bp->b_qindex);
1075	mtx_lock(qlock);
1076	bremfreel(bp);
1077	mtx_unlock(qlock);
1078}
1079
1080/*
1081 *	bremfreel:
1082 *
1083 *	Removes a buffer from the free list, must be called with the
1084 *	correct qlock held.
1085 */
1086static void
1087bremfreel(struct buf *bp)
1088{
1089
1090	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
1091	    bp, bp->b_vp, bp->b_flags);
1092	KASSERT(bp->b_qindex != QUEUE_NONE,
1093	    ("bremfreel: buffer %p not on a queue.", bp));
1094	BUF_ASSERT_XLOCKED(bp);
1095	mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
1096
1097	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
1098#ifdef INVARIANTS
1099	KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
1100	    bp->b_qindex));
1101	bq_len[bp->b_qindex]--;
1102#endif
1103	bp->b_qindex = QUEUE_NONE;
1104	/*
1105	 * If this was a delayed bremfree() we only need to remove the buffer
1106	 * from the queue and return the stats are already done.
1107	 */
1108	if (bp->b_flags & B_REMFREE) {
1109		bp->b_flags &= ~B_REMFREE;
1110		return;
1111	}
1112	bufcountsub(bp);
1113}
1114
1115/*
1116 *	bufkvafree:
1117 *
1118 *	Free the kva allocation for a buffer.
1119 *
1120 */
1121static void
1122bufkvafree(struct buf *bp)
1123{
1124
1125#ifdef INVARIANTS
1126	if (bp->b_kvasize == 0) {
1127		KASSERT(bp->b_kvabase == unmapped_buf &&
1128		    bp->b_data == unmapped_buf,
1129		    ("Leaked KVA space on %p", bp));
1130	} else if (buf_mapped(bp))
1131		BUF_CHECK_MAPPED(bp);
1132	else
1133		BUF_CHECK_UNMAPPED(bp);
1134#endif
1135	if (bp->b_kvasize == 0)
1136		return;
1137
1138	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1139	atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
1140	atomic_add_int(&buffreekvacnt, 1);
1141	bp->b_data = bp->b_kvabase = unmapped_buf;
1142	bp->b_kvasize = 0;
1143}
1144
1145/*
1146 *	bufkvaalloc:
1147 *
1148 *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
1149 */
1150static int
1151bufkvaalloc(struct buf *bp, int maxsize, int gbflags)
1152{
1153	vm_offset_t addr;
1154	int error;
1155
1156	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
1157	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
1158
1159	bufkvafree(bp);
1160
1161	addr = 0;
1162	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
1163	if (error != 0) {
1164		/*
1165		 * Buffer map is too fragmented.  Request the caller
1166		 * to defragment the map.
1167		 */
1168		atomic_add_int(&bufdefragcnt, 1);
1169		return (error);
1170	}
1171	bp->b_kvabase = (caddr_t)addr;
1172	bp->b_kvasize = maxsize;
1173	atomic_add_long(&bufkvaspace, bp->b_kvasize);
1174	if ((gbflags & GB_UNMAPPED) != 0) {
1175		bp->b_data = unmapped_buf;
1176		BUF_CHECK_UNMAPPED(bp);
1177	} else {
1178		bp->b_data = bp->b_kvabase;
1179		BUF_CHECK_MAPPED(bp);
1180	}
1181	return (0);
1182}
1183
1184/*
1185 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
1186 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
1187 * the buffer is valid and we do not have to do anything.
1188 */
1189void
1190breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
1191    int cnt, struct ucred * cred)
1192{
1193	struct buf *rabp;
1194	int i;
1195
1196	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
1197		if (inmem(vp, *rablkno))
1198			continue;
1199		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
1200
1201		if ((rabp->b_flags & B_CACHE) == 0) {
1202			if (!TD_IS_IDLETHREAD(curthread))
1203				curthread->td_ru.ru_inblock++;
1204			rabp->b_flags |= B_ASYNC;
1205			rabp->b_flags &= ~B_INVAL;
1206			rabp->b_ioflags &= ~BIO_ERROR;
1207			rabp->b_iocmd = BIO_READ;
1208			if (rabp->b_rcred == NOCRED && cred != NOCRED)
1209				rabp->b_rcred = crhold(cred);
1210			vfs_busy_pages(rabp, 0);
1211			BUF_KERNPROC(rabp);
1212			rabp->b_iooffset = dbtob(rabp->b_blkno);
1213			bstrategy(rabp);
1214		} else {
1215			brelse(rabp);
1216		}
1217	}
1218}
1219
1220/*
1221 * Entry point for bread() and breadn() via #defines in sys/buf.h.
1222 *
1223 * Get a buffer with the specified data.  Look in the cache first.  We
1224 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
1225 * is set, the buffer is valid and we do not have to do anything, see
1226 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1227 */
1228int
1229breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1230    int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
1231{
1232	struct buf *bp;
1233	int rv = 0, readwait = 0;
1234
1235	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
1236	/*
1237	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
1238	 */
1239	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1240	if (bp == NULL)
1241		return (EBUSY);
1242
1243	/* if not found in cache, do some I/O */
1244	if ((bp->b_flags & B_CACHE) == 0) {
1245		if (!TD_IS_IDLETHREAD(curthread))
1246			curthread->td_ru.ru_inblock++;
1247		bp->b_iocmd = BIO_READ;
1248		bp->b_flags &= ~B_INVAL;
1249		bp->b_ioflags &= ~BIO_ERROR;
1250		if (bp->b_rcred == NOCRED && cred != NOCRED)
1251			bp->b_rcred = crhold(cred);
1252		vfs_busy_pages(bp, 0);
1253		bp->b_iooffset = dbtob(bp->b_blkno);
1254		bstrategy(bp);
1255		++readwait;
1256	}
1257
1258	breada(vp, rablkno, rabsize, cnt, cred);
1259
1260	if (readwait) {
1261		rv = bufwait(bp);
1262	}
1263	return (rv);
1264}
1265
1266/*
1267 * Write, release buffer on completion.  (Done by iodone
1268 * if async).  Do not bother writing anything if the buffer
1269 * is invalid.
1270 *
1271 * Note that we set B_CACHE here, indicating that buffer is
1272 * fully valid and thus cacheable.  This is true even of NFS
1273 * now so we set it generally.  This could be set either here
1274 * or in biodone() since the I/O is synchronous.  We put it
1275 * here.
1276 */
1277int
1278bufwrite(struct buf *bp)
1279{
1280	int oldflags;
1281	struct vnode *vp;
1282	long space;
1283	int vp_md;
1284
1285	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1286	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
1287		bp->b_flags |= B_INVAL | B_RELBUF;
1288		bp->b_flags &= ~B_CACHE;
1289		brelse(bp);
1290		return (ENXIO);
1291	}
1292	if (bp->b_flags & B_INVAL) {
1293		brelse(bp);
1294		return (0);
1295	}
1296
1297	if (bp->b_flags & B_BARRIER)
1298		barrierwrites++;
1299
1300	oldflags = bp->b_flags;
1301
1302	BUF_ASSERT_HELD(bp);
1303
1304	if (bp->b_pin_count > 0)
1305		bunpin_wait(bp);
1306
1307	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1308	    ("FFS background buffer should not get here %p", bp));
1309
1310	vp = bp->b_vp;
1311	if (vp)
1312		vp_md = vp->v_vflag & VV_MD;
1313	else
1314		vp_md = 0;
1315
1316	/*
1317	 * Mark the buffer clean.  Increment the bufobj write count
1318	 * before bundirty() call, to prevent other thread from seeing
1319	 * empty dirty list and zero counter for writes in progress,
1320	 * falsely indicating that the bufobj is clean.
1321	 */
1322	bufobj_wref(bp->b_bufobj);
1323	bundirty(bp);
1324
1325	bp->b_flags &= ~B_DONE;
1326	bp->b_ioflags &= ~BIO_ERROR;
1327	bp->b_flags |= B_CACHE;
1328	bp->b_iocmd = BIO_WRITE;
1329
1330	vfs_busy_pages(bp, 1);
1331
1332	/*
1333	 * Normal bwrites pipeline writes
1334	 */
1335	bp->b_runningbufspace = bp->b_bufsize;
1336	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
1337
1338	if (!TD_IS_IDLETHREAD(curthread))
1339		curthread->td_ru.ru_oublock++;
1340	if (oldflags & B_ASYNC)
1341		BUF_KERNPROC(bp);
1342	bp->b_iooffset = dbtob(bp->b_blkno);
1343	bstrategy(bp);
1344
1345	if ((oldflags & B_ASYNC) == 0) {
1346		int rtval = bufwait(bp);
1347		brelse(bp);
1348		return (rtval);
1349	} else if (space > hirunningspace) {
1350		/*
1351		 * don't allow the async write to saturate the I/O
1352		 * system.  We will not deadlock here because
1353		 * we are blocking waiting for I/O that is already in-progress
1354		 * to complete. We do not block here if it is the update
1355		 * or syncer daemon trying to clean up as that can lead
1356		 * to deadlock.
1357		 */
1358		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
1359			waitrunningbufspace();
1360	}
1361
1362	return (0);
1363}
1364
1365void
1366bufbdflush(struct bufobj *bo, struct buf *bp)
1367{
1368	struct buf *nbp;
1369
1370	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
1371		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1372		altbufferflushes++;
1373	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
1374		BO_LOCK(bo);
1375		/*
1376		 * Try to find a buffer to flush.
1377		 */
1378		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
1379			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
1380			    BUF_LOCK(nbp,
1381				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
1382				continue;
1383			if (bp == nbp)
1384				panic("bdwrite: found ourselves");
1385			BO_UNLOCK(bo);
1386			/* Don't countdeps with the bo lock held. */
1387			if (buf_countdeps(nbp, 0)) {
1388				BO_LOCK(bo);
1389				BUF_UNLOCK(nbp);
1390				continue;
1391			}
1392			if (nbp->b_flags & B_CLUSTEROK) {
1393				vfs_bio_awrite(nbp);
1394			} else {
1395				bremfree(nbp);
1396				bawrite(nbp);
1397			}
1398			dirtybufferflushes++;
1399			break;
1400		}
1401		if (nbp == NULL)
1402			BO_UNLOCK(bo);
1403	}
1404}
1405
1406/*
1407 * Delayed write. (Buffer is marked dirty).  Do not bother writing
1408 * anything if the buffer is marked invalid.
1409 *
1410 * Note that since the buffer must be completely valid, we can safely
1411 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
1412 * biodone() in order to prevent getblk from writing the buffer
1413 * out synchronously.
1414 */
1415void
1416bdwrite(struct buf *bp)
1417{
1418	struct thread *td = curthread;
1419	struct vnode *vp;
1420	struct bufobj *bo;
1421
1422	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1423	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1424	KASSERT((bp->b_flags & B_BARRIER) == 0,
1425	    ("Barrier request in delayed write %p", bp));
1426	BUF_ASSERT_HELD(bp);
1427
1428	if (bp->b_flags & B_INVAL) {
1429		brelse(bp);
1430		return;
1431	}
1432
1433	/*
1434	 * If we have too many dirty buffers, don't create any more.
1435	 * If we are wildly over our limit, then force a complete
1436	 * cleanup. Otherwise, just keep the situation from getting
1437	 * out of control. Note that we have to avoid a recursive
1438	 * disaster and not try to clean up after our own cleanup!
1439	 */
1440	vp = bp->b_vp;
1441	bo = bp->b_bufobj;
1442	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
1443		td->td_pflags |= TDP_INBDFLUSH;
1444		BO_BDFLUSH(bo, bp);
1445		td->td_pflags &= ~TDP_INBDFLUSH;
1446	} else
1447		recursiveflushes++;
1448
1449	bdirty(bp);
1450	/*
1451	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
1452	 * true even of NFS now.
1453	 */
1454	bp->b_flags |= B_CACHE;
1455
1456	/*
1457	 * This bmap keeps the system from needing to do the bmap later,
1458	 * perhaps when the system is attempting to do a sync.  Since it
1459	 * is likely that the indirect block -- or whatever other datastructure
1460	 * that the filesystem needs is still in memory now, it is a good
1461	 * thing to do this.  Note also, that if the pageout daemon is
1462	 * requesting a sync -- there might not be enough memory to do
1463	 * the bmap then...  So, this is important to do.
1464	 */
1465	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
1466		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1467	}
1468
1469	/*
1470	 * Set the *dirty* buffer range based upon the VM system dirty
1471	 * pages.
1472	 *
1473	 * Mark the buffer pages as clean.  We need to do this here to
1474	 * satisfy the vnode_pager and the pageout daemon, so that it
1475	 * thinks that the pages have been "cleaned".  Note that since
1476	 * the pages are in a delayed write buffer -- the VFS layer
1477	 * "will" see that the pages get written out on the next sync,
1478	 * or perhaps the cluster will be completed.
1479	 */
1480	vfs_clean_pages_dirty_buf(bp);
1481	bqrelse(bp);
1482
1483	/*
1484	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1485	 * due to the softdep code.
1486	 */
1487}
1488
1489/*
1490 *	bdirty:
1491 *
1492 *	Turn buffer into delayed write request.  We must clear BIO_READ and
1493 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
1494 *	itself to properly update it in the dirty/clean lists.  We mark it
1495 *	B_DONE to ensure that any asynchronization of the buffer properly
1496 *	clears B_DONE ( else a panic will occur later ).
1497 *
1498 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1499 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
1500 *	should only be called if the buffer is known-good.
1501 *
1502 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1503 *	count.
1504 *
1505 *	The buffer must be on QUEUE_NONE.
1506 */
1507void
1508bdirty(struct buf *bp)
1509{
1510
1511	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
1512	    bp, bp->b_vp, bp->b_flags);
1513	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1514	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1515	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1516	BUF_ASSERT_HELD(bp);
1517	bp->b_flags &= ~(B_RELBUF);
1518	bp->b_iocmd = BIO_WRITE;
1519
1520	if ((bp->b_flags & B_DELWRI) == 0) {
1521		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
1522		reassignbuf(bp);
1523		bdirtyadd();
1524	}
1525}
1526
1527/*
1528 *	bundirty:
1529 *
1530 *	Clear B_DELWRI for buffer.
1531 *
1532 *	Since the buffer is not on a queue, we do not update the numfreebuffers
1533 *	count.
1534 *
1535 *	The buffer must be on QUEUE_NONE.
1536 */
1537
1538void
1539bundirty(struct buf *bp)
1540{
1541
1542	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1543	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1544	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
1545	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1546	BUF_ASSERT_HELD(bp);
1547
1548	if (bp->b_flags & B_DELWRI) {
1549		bp->b_flags &= ~B_DELWRI;
1550		reassignbuf(bp);
1551		bdirtysub();
1552	}
1553	/*
1554	 * Since it is now being written, we can clear its deferred write flag.
1555	 */
1556	bp->b_flags &= ~B_DEFERRED;
1557}
1558
1559/*
1560 *	bawrite:
1561 *
1562 *	Asynchronous write.  Start output on a buffer, but do not wait for
1563 *	it to complete.  The buffer is released when the output completes.
1564 *
1565 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1566 *	B_INVAL buffers.  Not us.
1567 */
1568void
1569bawrite(struct buf *bp)
1570{
1571
1572	bp->b_flags |= B_ASYNC;
1573	(void) bwrite(bp);
1574}
1575
1576/*
1577 *	babarrierwrite:
1578 *
1579 *	Asynchronous barrier write.  Start output on a buffer, but do not
1580 *	wait for it to complete.  Place a write barrier after this write so
1581 *	that this buffer and all buffers written before it are committed to
1582 *	the disk before any buffers written after this write are committed
1583 *	to the disk.  The buffer is released when the output completes.
1584 */
1585void
1586babarrierwrite(struct buf *bp)
1587{
1588
1589	bp->b_flags |= B_ASYNC | B_BARRIER;
1590	(void) bwrite(bp);
1591}
1592
1593/*
1594 *	bbarrierwrite:
1595 *
1596 *	Synchronous barrier write.  Start output on a buffer and wait for
1597 *	it to complete.  Place a write barrier after this write so that
1598 *	this buffer and all buffers written before it are committed to
1599 *	the disk before any buffers written after this write are committed
1600 *	to the disk.  The buffer is released when the output completes.
1601 */
1602int
1603bbarrierwrite(struct buf *bp)
1604{
1605
1606	bp->b_flags |= B_BARRIER;
1607	return (bwrite(bp));
1608}
1609
1610/*
1611 *	bwillwrite:
1612 *
1613 *	Called prior to the locking of any vnodes when we are expecting to
1614 *	write.  We do not want to starve the buffer cache with too many
1615 *	dirty buffers so we block here.  By blocking prior to the locking
1616 *	of any vnodes we attempt to avoid the situation where a locked vnode
1617 *	prevents the various system daemons from flushing related buffers.
1618 */
1619void
1620bwillwrite(void)
1621{
1622
1623	if (numdirtybuffers >= hidirtybuffers) {
1624		mtx_lock(&bdirtylock);
1625		while (numdirtybuffers >= hidirtybuffers) {
1626			bdirtywait = 1;
1627			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
1628			    "flswai", 0);
1629		}
1630		mtx_unlock(&bdirtylock);
1631	}
1632}
1633
1634/*
1635 * Return true if we have too many dirty buffers.
1636 */
1637int
1638buf_dirty_count_severe(void)
1639{
1640
1641	return(numdirtybuffers >= hidirtybuffers);
1642}
1643
1644/*
1645 *	brelse:
1646 *
1647 *	Release a busy buffer and, if requested, free its resources.  The
1648 *	buffer will be stashed in the appropriate bufqueue[] allowing it
1649 *	to be accessed later as a cache entity or reused for other purposes.
1650 */
1651void
1652brelse(struct buf *bp)
1653{
1654	int qindex;
1655
1656	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
1657	    bp, bp->b_vp, bp->b_flags);
1658	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1659	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1660
1661	if (BUF_LOCKRECURSED(bp)) {
1662		/*
1663		 * Do not process, in particular, do not handle the
1664		 * B_INVAL/B_RELBUF and do not release to free list.
1665		 */
1666		BUF_UNLOCK(bp);
1667		return;
1668	}
1669
1670	if (bp->b_flags & B_MANAGED) {
1671		bqrelse(bp);
1672		return;
1673	}
1674
1675	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
1676		BO_LOCK(bp->b_bufobj);
1677		bp->b_vflags &= ~BV_BKGRDERR;
1678		BO_UNLOCK(bp->b_bufobj);
1679		bdirty(bp);
1680	}
1681	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
1682	    bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
1683		/*
1684		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1685		 * pages from being scrapped.  If the error is anything
1686		 * other than an I/O error (EIO), assume that retrying
1687		 * is futile.
1688		 */
1689		bp->b_ioflags &= ~BIO_ERROR;
1690		bdirty(bp);
1691	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1692	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
1693		/*
1694		 * Either a failed I/O or we were asked to free or not
1695		 * cache the buffer.
1696		 */
1697		bp->b_flags |= B_INVAL;
1698		if (!LIST_EMPTY(&bp->b_dep))
1699			buf_deallocate(bp);
1700		if (bp->b_flags & B_DELWRI)
1701			bdirtysub();
1702		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1703		if ((bp->b_flags & B_VMIO) == 0) {
1704			if (bp->b_bufsize)
1705				allocbuf(bp, 0);
1706			if (bp->b_vp)
1707				brelvp(bp);
1708		}
1709	}
1710
1711	/*
1712	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1713	 * is called with B_DELWRI set, the underlying pages may wind up
1714	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1715	 * because pages associated with a B_DELWRI bp are marked clean.
1716	 *
1717	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1718	 * if B_DELWRI is set.
1719	 */
1720	if (bp->b_flags & B_DELWRI)
1721		bp->b_flags &= ~B_RELBUF;
1722
1723	/*
1724	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1725	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1726	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1727	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1728	 *
1729	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1730	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1731	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1732	 *
1733	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1734	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1735	 * the commit state and we cannot afford to lose the buffer. If the
1736	 * buffer has a background write in progress, we need to keep it
1737	 * around to prevent it from being reconstituted and starting a second
1738	 * background write.
1739	 */
1740	if ((bp->b_flags & B_VMIO)
1741	    && !(bp->b_vp->v_mount != NULL &&
1742		 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1743		 !vn_isdisk(bp->b_vp, NULL) &&
1744		 (bp->b_flags & B_DELWRI))
1745	    ) {
1746
1747		int i, j, resid;
1748		vm_page_t m;
1749		off_t foff;
1750		vm_pindex_t poff;
1751		vm_object_t obj;
1752
1753		obj = bp->b_bufobj->bo_object;
1754
1755		/*
1756		 * Get the base offset and length of the buffer.  Note that
1757		 * in the VMIO case if the buffer block size is not
1758		 * page-aligned then b_data pointer may not be page-aligned.
1759		 * But our b_pages[] array *IS* page aligned.
1760		 *
1761		 * block sizes less then DEV_BSIZE (usually 512) are not
1762		 * supported due to the page granularity bits (m->valid,
1763		 * m->dirty, etc...).
1764		 *
1765		 * See man buf(9) for more information
1766		 */
1767		resid = bp->b_bufsize;
1768		foff = bp->b_offset;
1769		for (i = 0; i < bp->b_npages; i++) {
1770			int had_bogus = 0;
1771
1772			m = bp->b_pages[i];
1773
1774			/*
1775			 * If we hit a bogus page, fixup *all* the bogus pages
1776			 * now.
1777			 */
1778			if (m == bogus_page) {
1779				poff = OFF_TO_IDX(bp->b_offset);
1780				had_bogus = 1;
1781
1782				VM_OBJECT_RLOCK(obj);
1783				for (j = i; j < bp->b_npages; j++) {
1784					vm_page_t mtmp;
1785					mtmp = bp->b_pages[j];
1786					if (mtmp == bogus_page) {
1787						mtmp = vm_page_lookup(obj, poff + j);
1788						if (!mtmp) {
1789							panic("brelse: page missing\n");
1790						}
1791						bp->b_pages[j] = mtmp;
1792					}
1793				}
1794				VM_OBJECT_RUNLOCK(obj);
1795
1796				if ((bp->b_flags & B_INVAL) == 0 &&
1797				    buf_mapped(bp)) {
1798					BUF_CHECK_MAPPED(bp);
1799					pmap_qenter(
1800					    trunc_page((vm_offset_t)bp->b_data),
1801					    bp->b_pages, bp->b_npages);
1802				}
1803				m = bp->b_pages[i];
1804			}
1805			if ((bp->b_flags & B_NOCACHE) ||
1806			    (bp->b_ioflags & BIO_ERROR &&
1807			     bp->b_iocmd == BIO_READ)) {
1808				int poffset = foff & PAGE_MASK;
1809				int presid = resid > (PAGE_SIZE - poffset) ?
1810					(PAGE_SIZE - poffset) : resid;
1811
1812				KASSERT(presid >= 0, ("brelse: extra page"));
1813				VM_OBJECT_WLOCK(obj);
1814				while (vm_page_xbusied(m)) {
1815					vm_page_lock(m);
1816					VM_OBJECT_WUNLOCK(obj);
1817					vm_page_busy_sleep(m, "mbncsh");
1818					VM_OBJECT_WLOCK(obj);
1819				}
1820				if (pmap_page_wired_mappings(m) == 0)
1821					vm_page_set_invalid(m, poffset, presid);
1822				VM_OBJECT_WUNLOCK(obj);
1823				if (had_bogus)
1824					printf("avoided corruption bug in bogus_page/brelse code\n");
1825			}
1826			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1827			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1828		}
1829		if (bp->b_flags & (B_INVAL | B_RELBUF))
1830			vfs_vmio_release(bp);
1831
1832	} else if (bp->b_flags & B_VMIO) {
1833
1834		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1835			vfs_vmio_release(bp);
1836		}
1837
1838	} else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) {
1839		if (bp->b_bufsize != 0)
1840			allocbuf(bp, 0);
1841		if (bp->b_vp != NULL)
1842			brelvp(bp);
1843	}
1844
1845	/*
1846	 * If the buffer has junk contents signal it and eventually
1847	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
1848	 * doesn't find it.
1849	 */
1850	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
1851	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
1852		bp->b_flags |= B_INVAL;
1853	if (bp->b_flags & B_INVAL) {
1854		if (bp->b_flags & B_DELWRI)
1855			bundirty(bp);
1856		if (bp->b_vp)
1857			brelvp(bp);
1858	}
1859
1860	/* buffers with no memory */
1861	if (bp->b_bufsize == 0) {
1862		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1863		if (bp->b_vflags & BV_BKGRDINPROG)
1864			panic("losing buffer 1");
1865		if (bp->b_kvasize)
1866			qindex = QUEUE_EMPTYKVA;
1867		else
1868			qindex = QUEUE_EMPTY;
1869		bp->b_flags |= B_AGE;
1870	/* buffers with junk contents */
1871	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1872	    (bp->b_ioflags & BIO_ERROR)) {
1873		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
1874		if (bp->b_vflags & BV_BKGRDINPROG)
1875			panic("losing buffer 2");
1876		qindex = QUEUE_CLEAN;
1877		bp->b_flags |= B_AGE;
1878	/* remaining buffers */
1879	} else if (bp->b_flags & B_DELWRI)
1880		qindex = QUEUE_DIRTY;
1881	else
1882		qindex = QUEUE_CLEAN;
1883
1884	binsfree(bp, qindex);
1885
1886	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
1887	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1888		panic("brelse: not dirty");
1889	/* unlock */
1890	BUF_UNLOCK(bp);
1891}
1892
1893/*
1894 * Release a buffer back to the appropriate queue but do not try to free
1895 * it.  The buffer is expected to be used again soon.
1896 *
1897 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1898 * biodone() to requeue an async I/O on completion.  It is also used when
1899 * known good buffers need to be requeued but we think we may need the data
1900 * again soon.
1901 *
1902 * XXX we should be able to leave the B_RELBUF hint set on completion.
1903 */
1904void
1905bqrelse(struct buf *bp)
1906{
1907	int qindex;
1908
1909	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1910	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1911	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1912
1913	if (BUF_LOCKRECURSED(bp)) {
1914		/* do not release to free list */
1915		BUF_UNLOCK(bp);
1916		return;
1917	}
1918	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1919
1920	if (bp->b_flags & B_MANAGED) {
1921		if (bp->b_flags & B_REMFREE)
1922			bremfreef(bp);
1923		goto out;
1924	}
1925
1926	/* buffers with stale but valid contents */
1927	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
1928	    BV_BKGRDERR)) == BV_BKGRDERR) {
1929		BO_LOCK(bp->b_bufobj);
1930		bp->b_vflags &= ~BV_BKGRDERR;
1931		BO_UNLOCK(bp->b_bufobj);
1932		qindex = QUEUE_DIRTY;
1933	} else {
1934		if ((bp->b_flags & B_DELWRI) == 0 &&
1935		    (bp->b_xflags & BX_VNDIRTY))
1936			panic("bqrelse: not dirty");
1937		qindex = QUEUE_CLEAN;
1938	}
1939	binsfree(bp, qindex);
1940
1941out:
1942	/* unlock */
1943	BUF_UNLOCK(bp);
1944}
1945
1946/* Give pages used by the bp back to the VM system (where possible) */
1947static void
1948vfs_vmio_release(struct buf *bp)
1949{
1950	vm_object_t obj;
1951	vm_page_t m;
1952	int i;
1953
1954	if (buf_mapped(bp)) {
1955		BUF_CHECK_MAPPED(bp);
1956		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
1957	} else
1958		BUF_CHECK_UNMAPPED(bp);
1959	obj = bp->b_bufobj->bo_object;
1960	if (obj != NULL)
1961		VM_OBJECT_WLOCK(obj);
1962	for (i = 0; i < bp->b_npages; i++) {
1963		m = bp->b_pages[i];
1964		bp->b_pages[i] = NULL;
1965		/*
1966		 * In order to keep page LRU ordering consistent, put
1967		 * everything on the inactive queue.
1968		 */
1969		vm_page_lock(m);
1970		vm_page_unwire(m, PQ_INACTIVE);
1971
1972		/*
1973		 * Might as well free the page if we can and it has
1974		 * no valid data.  We also free the page if the
1975		 * buffer was used for direct I/O
1976		 */
1977		if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
1978			if (m->wire_count == 0 && !vm_page_busied(m))
1979				vm_page_free(m);
1980		} else if (bp->b_flags & B_DIRECT)
1981			vm_page_try_to_free(m);
1982		vm_page_unlock(m);
1983	}
1984	if (obj != NULL)
1985		VM_OBJECT_WUNLOCK(obj);
1986
1987	if (bp->b_bufsize)
1988		bufspaceadjust(bp, 0);
1989	bp->b_npages = 0;
1990	bp->b_flags &= ~B_VMIO;
1991	if (bp->b_vp)
1992		brelvp(bp);
1993}
1994
1995/*
1996 * Check to see if a block at a particular lbn is available for a clustered
1997 * write.
1998 */
1999static int
2000vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
2001{
2002	struct buf *bpa;
2003	int match;
2004
2005	match = 0;
2006
2007	/* If the buf isn't in core skip it */
2008	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
2009		return (0);
2010
2011	/* If the buf is busy we don't want to wait for it */
2012	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2013		return (0);
2014
2015	/* Only cluster with valid clusterable delayed write buffers */
2016	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
2017	    (B_DELWRI | B_CLUSTEROK))
2018		goto done;
2019
2020	if (bpa->b_bufsize != size)
2021		goto done;
2022
2023	/*
2024	 * Check to see if it is in the expected place on disk and that the
2025	 * block has been mapped.
2026	 */
2027	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
2028		match = 1;
2029done:
2030	BUF_UNLOCK(bpa);
2031	return (match);
2032}
2033
2034/*
2035 *	vfs_bio_awrite:
2036 *
2037 *	Implement clustered async writes for clearing out B_DELWRI buffers.
2038 *	This is much better then the old way of writing only one buffer at
2039 *	a time.  Note that we may not be presented with the buffers in the
2040 *	correct order, so we search for the cluster in both directions.
2041 */
2042int
2043vfs_bio_awrite(struct buf *bp)
2044{
2045	struct bufobj *bo;
2046	int i;
2047	int j;
2048	daddr_t lblkno = bp->b_lblkno;
2049	struct vnode *vp = bp->b_vp;
2050	int ncl;
2051	int nwritten;
2052	int size;
2053	int maxcl;
2054	int gbflags;
2055
2056	bo = &vp->v_bufobj;
2057	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
2058	/*
2059	 * right now we support clustered writing only to regular files.  If
2060	 * we find a clusterable block we could be in the middle of a cluster
2061	 * rather then at the beginning.
2062	 */
2063	if ((vp->v_type == VREG) &&
2064	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
2065	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
2066
2067		size = vp->v_mount->mnt_stat.f_iosize;
2068		maxcl = MAXPHYS / size;
2069
2070		BO_RLOCK(bo);
2071		for (i = 1; i < maxcl; i++)
2072			if (vfs_bio_clcheck(vp, size, lblkno + i,
2073			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
2074				break;
2075
2076		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
2077			if (vfs_bio_clcheck(vp, size, lblkno - j,
2078			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2079				break;
2080		BO_RUNLOCK(bo);
2081		--j;
2082		ncl = i + j;
2083		/*
2084		 * this is a possible cluster write
2085		 */
2086		if (ncl != 1) {
2087			BUF_UNLOCK(bp);
2088			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
2089			    gbflags);
2090			return (nwritten);
2091		}
2092	}
2093	bremfree(bp);
2094	bp->b_flags |= B_ASYNC;
2095	/*
2096	 * default (old) behavior, writing out only one block
2097	 *
2098	 * XXX returns b_bufsize instead of b_bcount for nwritten?
2099	 */
2100	nwritten = bp->b_bufsize;
2101	(void) bwrite(bp);
2102
2103	return (nwritten);
2104}
2105
2106/*
2107 * Ask the bufdaemon for help, or act as bufdaemon itself, when a
2108 * locked vnode is supplied.
2109 */
2110static void
2111getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo,
2112    int defrag)
2113{
2114	struct thread *td;
2115	char *waitmsg;
2116	int error, fl, flags, norunbuf;
2117
2118	mtx_assert(&bqclean, MA_OWNED);
2119
2120	if (defrag) {
2121		flags = VFS_BIO_NEED_BUFSPACE;
2122		waitmsg = "nbufkv";
2123	} else if (bufspace >= hibufspace) {
2124		waitmsg = "nbufbs";
2125		flags = VFS_BIO_NEED_BUFSPACE;
2126	} else {
2127		waitmsg = "newbuf";
2128		flags = VFS_BIO_NEED_ANY;
2129	}
2130	atomic_set_int(&needsbuffer, flags);
2131	mtx_unlock(&bqclean);
2132
2133	bd_speedup();	/* heeeelp */
2134	if ((gbflags & GB_NOWAIT_BD) != 0)
2135		return;
2136
2137	td = curthread;
2138	rw_wlock(&nblock);
2139	while ((needsbuffer & flags) != 0) {
2140		if (vp != NULL && vp->v_type != VCHR &&
2141		    (td->td_pflags & TDP_BUFNEED) == 0) {
2142			rw_wunlock(&nblock);
2143			/*
2144			 * getblk() is called with a vnode locked, and
2145			 * some majority of the dirty buffers may as
2146			 * well belong to the vnode.  Flushing the
2147			 * buffers there would make a progress that
2148			 * cannot be achieved by the buf_daemon, that
2149			 * cannot lock the vnode.
2150			 */
2151			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
2152			    (td->td_pflags & TDP_NORUNNINGBUF);
2153
2154			/*
2155			 * Play bufdaemon.  The getnewbuf() function
2156			 * may be called while the thread owns lock
2157			 * for another dirty buffer for the same
2158			 * vnode, which makes it impossible to use
2159			 * VOP_FSYNC() there, due to the buffer lock
2160			 * recursion.
2161			 */
2162			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
2163			fl = buf_flush(vp, flushbufqtarget);
2164			td->td_pflags &= norunbuf;
2165			rw_wlock(&nblock);
2166			if (fl != 0)
2167				continue;
2168			if ((needsbuffer & flags) == 0)
2169				break;
2170		}
2171		error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
2172		    (PRIBIO + 4) | slpflag, waitmsg, slptimeo);
2173		if (error != 0)
2174			break;
2175	}
2176	rw_wunlock(&nblock);
2177}
2178
2179static void
2180getnewbuf_reuse_bp(struct buf *bp, int qindex)
2181{
2182
2183	CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
2184	    "queue %d (recycling)", bp, bp->b_vp, bp->b_flags,
2185	     bp->b_kvasize, bp->b_bufsize, qindex);
2186	mtx_assert(&bqclean, MA_NOTOWNED);
2187
2188	/*
2189	 * Note: we no longer distinguish between VMIO and non-VMIO
2190	 * buffers.
2191	 */
2192	KASSERT((bp->b_flags & B_DELWRI) == 0,
2193	    ("delwri buffer %p found in queue %d", bp, qindex));
2194
2195	if (qindex == QUEUE_CLEAN) {
2196		if (bp->b_flags & B_VMIO) {
2197			bp->b_flags &= ~B_ASYNC;
2198			vfs_vmio_release(bp);
2199		}
2200		if (bp->b_vp != NULL)
2201			brelvp(bp);
2202	}
2203
2204	/*
2205	 * Get the rest of the buffer freed up.  b_kva* is still valid
2206	 * after this operation.
2207	 */
2208
2209	if (bp->b_rcred != NOCRED) {
2210		crfree(bp->b_rcred);
2211		bp->b_rcred = NOCRED;
2212	}
2213	if (bp->b_wcred != NOCRED) {
2214		crfree(bp->b_wcred);
2215		bp->b_wcred = NOCRED;
2216	}
2217	if (!LIST_EMPTY(&bp->b_dep))
2218		buf_deallocate(bp);
2219	if (bp->b_vflags & BV_BKGRDINPROG)
2220		panic("losing buffer 3");
2221	KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p.  qindex: %d",
2222	    bp, bp->b_vp, qindex));
2223	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
2224	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
2225
2226	if (bp->b_bufsize)
2227		allocbuf(bp, 0);
2228
2229	bp->b_flags = 0;
2230	bp->b_ioflags = 0;
2231	bp->b_xflags = 0;
2232	KASSERT((bp->b_flags & B_INFREECNT) == 0,
2233	    ("buf %p still counted as free?", bp));
2234	bp->b_vflags = 0;
2235	bp->b_vp = NULL;
2236	bp->b_blkno = bp->b_lblkno = 0;
2237	bp->b_offset = NOOFFSET;
2238	bp->b_iodone = 0;
2239	bp->b_error = 0;
2240	bp->b_resid = 0;
2241	bp->b_bcount = 0;
2242	bp->b_npages = 0;
2243	bp->b_dirtyoff = bp->b_dirtyend = 0;
2244	bp->b_bufobj = NULL;
2245	bp->b_pin_count = 0;
2246	bp->b_data = bp->b_kvabase;
2247	bp->b_fsprivate1 = NULL;
2248	bp->b_fsprivate2 = NULL;
2249	bp->b_fsprivate3 = NULL;
2250
2251	LIST_INIT(&bp->b_dep);
2252}
2253
2254static int flushingbufs;
2255
2256static struct buf *
2257getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata)
2258{
2259	struct buf *bp, *nbp;
2260	int nqindex, qindex, pass;
2261
2262	KASSERT(!unmapped || !defrag, ("both unmapped and defrag"));
2263
2264	pass = 1;
2265restart:
2266	atomic_add_int(&getnewbufrestarts, 1);
2267
2268	/*
2269	 * Setup for scan.  If we do not have enough free buffers,
2270	 * we setup a degenerate case that immediately fails.  Note
2271	 * that if we are specially marked process, we are allowed to
2272	 * dip into our reserves.
2273	 *
2274	 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN
2275	 * for the allocation of the mapped buffer.  For unmapped, the
2276	 * easiest is to start with EMPTY outright.
2277	 *
2278	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
2279	 * However, there are a number of cases (defragging, reusing, ...)
2280	 * where we cannot backup.
2281	 */
2282	nbp = NULL;
2283	mtx_lock(&bqclean);
2284	if (!defrag && unmapped) {
2285		nqindex = QUEUE_EMPTY;
2286		nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
2287	}
2288	if (nbp == NULL) {
2289		nqindex = QUEUE_EMPTYKVA;
2290		nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
2291	}
2292
2293	/*
2294	 * If no EMPTYKVA buffers and we are either defragging or
2295	 * reusing, locate a CLEAN buffer to free or reuse.  If
2296	 * bufspace useage is low skip this step so we can allocate a
2297	 * new buffer.
2298	 */
2299	if (nbp == NULL && (defrag || bufspace >= lobufspace)) {
2300		nqindex = QUEUE_CLEAN;
2301		nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2302	}
2303
2304	/*
2305	 * If we could not find or were not allowed to reuse a CLEAN
2306	 * buffer, check to see if it is ok to use an EMPTY buffer.
2307	 * We can only use an EMPTY buffer if allocating its KVA would
2308	 * not otherwise run us out of buffer space.  No KVA is needed
2309	 * for the unmapped allocation.
2310	 */
2311	if (nbp == NULL && defrag == 0 && (bufspace + maxsize < hibufspace ||
2312	    metadata)) {
2313		nqindex = QUEUE_EMPTY;
2314		nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
2315	}
2316
2317	/*
2318	 * All available buffers might be clean, retry ignoring the
2319	 * lobufspace as the last resort.
2320	 */
2321	if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[QUEUE_CLEAN])) {
2322		nqindex = QUEUE_CLEAN;
2323		nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2324	}
2325
2326	/*
2327	 * Run scan, possibly freeing data and/or kva mappings on the fly
2328	 * depending.
2329	 */
2330	while ((bp = nbp) != NULL) {
2331		qindex = nqindex;
2332
2333		/*
2334		 * Calculate next bp (we can only use it if we do not
2335		 * block or do other fancy things).
2336		 */
2337		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
2338			switch (qindex) {
2339			case QUEUE_EMPTY:
2340				nqindex = QUEUE_EMPTYKVA;
2341				nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
2342				if (nbp != NULL)
2343					break;
2344				/* FALLTHROUGH */
2345			case QUEUE_EMPTYKVA:
2346				nqindex = QUEUE_CLEAN;
2347				nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
2348				if (nbp != NULL)
2349					break;
2350				/* FALLTHROUGH */
2351			case QUEUE_CLEAN:
2352				if (metadata && pass == 1) {
2353					pass = 2;
2354					nqindex = QUEUE_EMPTY;
2355					nbp = TAILQ_FIRST(
2356					    &bufqueues[QUEUE_EMPTY]);
2357				}
2358				/*
2359				 * nbp is NULL.
2360				 */
2361				break;
2362			}
2363		}
2364		/*
2365		 * If we are defragging then we need a buffer with
2366		 * b_kvasize != 0.  This situation occurs when we
2367		 * have many unmapped bufs.
2368		 */
2369		if (defrag && bp->b_kvasize == 0)
2370			continue;
2371
2372		/*
2373		 * Start freeing the bp.  This is somewhat involved.  nbp
2374		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
2375		 */
2376		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2377			continue;
2378		/*
2379		 * BKGRDINPROG can only be set with the buf and bufobj
2380		 * locks both held.  We tolerate a race to clear it here.
2381		 */
2382		if (bp->b_vflags & BV_BKGRDINPROG) {
2383			BUF_UNLOCK(bp);
2384			continue;
2385		}
2386
2387		/*
2388		 * Requeue the background write buffer with error.
2389		 */
2390		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
2391			bremfreel(bp);
2392			mtx_unlock(&bqclean);
2393			bqrelse(bp);
2394			continue;
2395		}
2396
2397		KASSERT(bp->b_qindex == qindex,
2398		    ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
2399
2400		bremfreel(bp);
2401		mtx_unlock(&bqclean);
2402		/*
2403		 * NOTE:  nbp is now entirely invalid.  We can only restart
2404		 * the scan from this point on.
2405		 */
2406
2407		getnewbuf_reuse_bp(bp, qindex);
2408		mtx_assert(&bqclean, MA_NOTOWNED);
2409
2410		/*
2411		 * If we are defragging then free the buffer.
2412		 */
2413		if (defrag) {
2414			bp->b_flags |= B_INVAL;
2415			bufkvafree(bp);
2416			brelse(bp);
2417			defrag = 0;
2418			goto restart;
2419		}
2420
2421		/*
2422		 * Notify any waiters for the buffer lock about
2423		 * identity change by freeing the buffer.
2424		 */
2425		if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) {
2426			bp->b_flags |= B_INVAL;
2427			bufkvafree(bp);
2428			brelse(bp);
2429			goto restart;
2430		}
2431
2432		if (metadata)
2433			break;
2434
2435		/*
2436		 * If we are overcomitted then recover the buffer and its
2437		 * KVM space.  This occurs in rare situations when multiple
2438		 * processes are blocked in getnewbuf() or allocbuf().
2439		 */
2440		if (bufspace >= hibufspace)
2441			flushingbufs = 1;
2442		if (flushingbufs && bp->b_kvasize != 0) {
2443			bp->b_flags |= B_INVAL;
2444			bufkvafree(bp);
2445			brelse(bp);
2446			goto restart;
2447		}
2448		if (bufspace < lobufspace)
2449			flushingbufs = 0;
2450		break;
2451	}
2452	return (bp);
2453}
2454
2455/*
2456 *	getnewbuf:
2457 *
2458 *	Find and initialize a new buffer header, freeing up existing buffers
2459 *	in the bufqueues as necessary.  The new buffer is returned locked.
2460 *
2461 *	Important:  B_INVAL is not set.  If the caller wishes to throw the
2462 *	buffer away, the caller must set B_INVAL prior to calling brelse().
2463 *
2464 *	We block if:
2465 *		We have insufficient buffer headers
2466 *		We have insufficient buffer space
2467 *		buffer_arena is too fragmented ( space reservation fails )
2468 *		If we have to flush dirty buffers ( but we try to avoid this )
2469 */
2470static struct buf *
2471getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize,
2472    int gbflags)
2473{
2474	struct buf *bp;
2475	int defrag, metadata;
2476
2477	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2478	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2479	if (!unmapped_buf_allowed)
2480		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2481
2482	defrag = 0;
2483	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2484	    vp->v_type == VCHR)
2485		metadata = 1;
2486	else
2487		metadata = 0;
2488	/*
2489	 * We can't afford to block since we might be holding a vnode lock,
2490	 * which may prevent system daemons from running.  We deal with
2491	 * low-memory situations by proactively returning memory and running
2492	 * async I/O rather then sync I/O.
2493	 */
2494	atomic_add_int(&getnewbufcalls, 1);
2495	atomic_subtract_int(&getnewbufrestarts, 1);
2496restart:
2497	bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED |
2498	    GB_KVAALLOC)) == GB_UNMAPPED, metadata);
2499	if (bp != NULL)
2500		defrag = 0;
2501
2502	/*
2503	 * If we exhausted our list, sleep as appropriate.  We may have to
2504	 * wakeup various daemons and write out some dirty buffers.
2505	 *
2506	 * Generally we are sleeping due to insufficient buffer space.
2507	 */
2508	if (bp == NULL) {
2509		mtx_assert(&bqclean, MA_OWNED);
2510		getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag);
2511		mtx_assert(&bqclean, MA_NOTOWNED);
2512	} else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) {
2513		mtx_assert(&bqclean, MA_NOTOWNED);
2514
2515		bufkvafree(bp);
2516		atomic_add_int(&bufreusecnt, 1);
2517	} else {
2518		mtx_assert(&bqclean, MA_NOTOWNED);
2519
2520		/*
2521		 * We finally have a valid bp.  We aren't quite out of the
2522		 * woods, we still have to reserve kva space. In order to
2523		 * keep fragmentation sane we only allocate kva in BKVASIZE
2524		 * chunks.
2525		 */
2526		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2527
2528		if (maxsize != bp->b_kvasize &&
2529		    bufkvaalloc(bp, maxsize, gbflags)) {
2530			defrag = 1;
2531			bp->b_flags |= B_INVAL;
2532			brelse(bp);
2533			goto restart;
2534		} else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) ==
2535		    (GB_UNMAPPED | GB_KVAALLOC)) {
2536			bp->b_data = unmapped_buf;
2537			BUF_CHECK_UNMAPPED(bp);
2538		}
2539		atomic_add_int(&bufreusecnt, 1);
2540	}
2541	return (bp);
2542}
2543
2544/*
2545 *	buf_daemon:
2546 *
2547 *	buffer flushing daemon.  Buffers are normally flushed by the
2548 *	update daemon but if it cannot keep up this process starts to
2549 *	take the load in an attempt to prevent getnewbuf() from blocking.
2550 */
2551
2552static struct kproc_desc buf_kp = {
2553	"bufdaemon",
2554	buf_daemon,
2555	&bufdaemonproc
2556};
2557SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2558
2559static int
2560buf_flush(struct vnode *vp, int target)
2561{
2562	int flushed;
2563
2564	flushed = flushbufqueues(vp, target, 0);
2565	if (flushed == 0) {
2566		/*
2567		 * Could not find any buffers without rollback
2568		 * dependencies, so just write the first one
2569		 * in the hopes of eventually making progress.
2570		 */
2571		if (vp != NULL && target > 2)
2572			target /= 2;
2573		flushbufqueues(vp, target, 1);
2574	}
2575	return (flushed);
2576}
2577
2578static void
2579buf_daemon()
2580{
2581	int lodirty;
2582
2583	/*
2584	 * This process needs to be suspended prior to shutdown sync.
2585	 */
2586	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2587	    SHUTDOWN_PRI_LAST);
2588
2589	/*
2590	 * This process is allowed to take the buffer cache to the limit
2591	 */
2592	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2593	mtx_lock(&bdlock);
2594	for (;;) {
2595		bd_request = 0;
2596		mtx_unlock(&bdlock);
2597
2598		kproc_suspend_check(bufdaemonproc);
2599		lodirty = lodirtybuffers;
2600		if (bd_speedupreq) {
2601			lodirty = numdirtybuffers / 2;
2602			bd_speedupreq = 0;
2603		}
2604		/*
2605		 * Do the flush.  Limit the amount of in-transit I/O we
2606		 * allow to build up, otherwise we would completely saturate
2607		 * the I/O system.
2608		 */
2609		while (numdirtybuffers > lodirty) {
2610			if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
2611				break;
2612			kern_yield(PRI_USER);
2613		}
2614
2615		/*
2616		 * Only clear bd_request if we have reached our low water
2617		 * mark.  The buf_daemon normally waits 1 second and
2618		 * then incrementally flushes any dirty buffers that have
2619		 * built up, within reason.
2620		 *
2621		 * If we were unable to hit our low water mark and couldn't
2622		 * find any flushable buffers, we sleep for a short period
2623		 * to avoid endless loops on unlockable buffers.
2624		 */
2625		mtx_lock(&bdlock);
2626		if (numdirtybuffers <= lodirtybuffers) {
2627			/*
2628			 * We reached our low water mark, reset the
2629			 * request and sleep until we are needed again.
2630			 * The sleep is just so the suspend code works.
2631			 */
2632			bd_request = 0;
2633			/*
2634			 * Do an extra wakeup in case dirty threshold
2635			 * changed via sysctl and the explicit transition
2636			 * out of shortfall was missed.
2637			 */
2638			bdirtywakeup();
2639			if (runningbufspace <= lorunningspace)
2640				runningwakeup();
2641			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
2642		} else {
2643			/*
2644			 * We couldn't find any flushable dirty buffers but
2645			 * still have too many dirty buffers, we
2646			 * have to sleep and try again.  (rare)
2647			 */
2648			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
2649		}
2650	}
2651}
2652
2653/*
2654 *	flushbufqueues:
2655 *
2656 *	Try to flush a buffer in the dirty queue.  We must be careful to
2657 *	free up B_INVAL buffers instead of write them, which NFS is
2658 *	particularly sensitive to.
2659 */
2660static int flushwithdeps = 0;
2661SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
2662    0, "Number of buffers flushed with dependecies that require rollbacks");
2663
2664static int
2665flushbufqueues(struct vnode *lvp, int target, int flushdeps)
2666{
2667	struct buf *sentinel;
2668	struct vnode *vp;
2669	struct mount *mp;
2670	struct buf *bp;
2671	int hasdeps;
2672	int flushed;
2673	int queue;
2674	int error;
2675	bool unlock;
2676
2677	flushed = 0;
2678	queue = QUEUE_DIRTY;
2679	bp = NULL;
2680	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
2681	sentinel->b_qindex = QUEUE_SENTINEL;
2682	mtx_lock(&bqdirty);
2683	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
2684	mtx_unlock(&bqdirty);
2685	while (flushed != target) {
2686		maybe_yield();
2687		mtx_lock(&bqdirty);
2688		bp = TAILQ_NEXT(sentinel, b_freelist);
2689		if (bp != NULL) {
2690			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2691			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
2692			    b_freelist);
2693		} else {
2694			mtx_unlock(&bqdirty);
2695			break;
2696		}
2697		/*
2698		 * Skip sentinels inserted by other invocations of the
2699		 * flushbufqueues(), taking care to not reorder them.
2700		 *
2701		 * Only flush the buffers that belong to the
2702		 * vnode locked by the curthread.
2703		 */
2704		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
2705		    bp->b_vp != lvp)) {
2706			mtx_unlock(&bqdirty);
2707 			continue;
2708		}
2709		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
2710		mtx_unlock(&bqdirty);
2711		if (error != 0)
2712			continue;
2713		if (bp->b_pin_count > 0) {
2714			BUF_UNLOCK(bp);
2715			continue;
2716		}
2717		/*
2718		 * BKGRDINPROG can only be set with the buf and bufobj
2719		 * locks both held.  We tolerate a race to clear it here.
2720		 */
2721		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
2722		    (bp->b_flags & B_DELWRI) == 0) {
2723			BUF_UNLOCK(bp);
2724			continue;
2725		}
2726		if (bp->b_flags & B_INVAL) {
2727			bremfreef(bp);
2728			brelse(bp);
2729			flushed++;
2730			continue;
2731		}
2732
2733		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
2734			if (flushdeps == 0) {
2735				BUF_UNLOCK(bp);
2736				continue;
2737			}
2738			hasdeps = 1;
2739		} else
2740			hasdeps = 0;
2741		/*
2742		 * We must hold the lock on a vnode before writing
2743		 * one of its buffers. Otherwise we may confuse, or
2744		 * in the case of a snapshot vnode, deadlock the
2745		 * system.
2746		 *
2747		 * The lock order here is the reverse of the normal
2748		 * of vnode followed by buf lock.  This is ok because
2749		 * the NOWAIT will prevent deadlock.
2750		 */
2751		vp = bp->b_vp;
2752		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2753			BUF_UNLOCK(bp);
2754			continue;
2755		}
2756		if (lvp == NULL) {
2757			unlock = true;
2758			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
2759		} else {
2760			ASSERT_VOP_LOCKED(vp, "getbuf");
2761			unlock = false;
2762			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
2763			    vn_lock(vp, LK_TRYUPGRADE);
2764		}
2765		if (error == 0) {
2766			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
2767			    bp, bp->b_vp, bp->b_flags);
2768			if (curproc == bufdaemonproc) {
2769				vfs_bio_awrite(bp);
2770			} else {
2771				bremfree(bp);
2772				bwrite(bp);
2773				notbufdflushes++;
2774			}
2775			vn_finished_write(mp);
2776			if (unlock)
2777				VOP_UNLOCK(vp, 0);
2778			flushwithdeps += hasdeps;
2779			flushed++;
2780
2781			/*
2782			 * Sleeping on runningbufspace while holding
2783			 * vnode lock leads to deadlock.
2784			 */
2785			if (curproc == bufdaemonproc &&
2786			    runningbufspace > hirunningspace)
2787				waitrunningbufspace();
2788			continue;
2789		}
2790		vn_finished_write(mp);
2791		BUF_UNLOCK(bp);
2792	}
2793	mtx_lock(&bqdirty);
2794	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
2795	mtx_unlock(&bqdirty);
2796	free(sentinel, M_TEMP);
2797	return (flushed);
2798}
2799
2800/*
2801 * Check to see if a block is currently memory resident.
2802 */
2803struct buf *
2804incore(struct bufobj *bo, daddr_t blkno)
2805{
2806	struct buf *bp;
2807
2808	BO_RLOCK(bo);
2809	bp = gbincore(bo, blkno);
2810	BO_RUNLOCK(bo);
2811	return (bp);
2812}
2813
2814/*
2815 * Returns true if no I/O is needed to access the
2816 * associated VM object.  This is like incore except
2817 * it also hunts around in the VM system for the data.
2818 */
2819
2820static int
2821inmem(struct vnode * vp, daddr_t blkno)
2822{
2823	vm_object_t obj;
2824	vm_offset_t toff, tinc, size;
2825	vm_page_t m;
2826	vm_ooffset_t off;
2827
2828	ASSERT_VOP_LOCKED(vp, "inmem");
2829
2830	if (incore(&vp->v_bufobj, blkno))
2831		return 1;
2832	if (vp->v_mount == NULL)
2833		return 0;
2834	obj = vp->v_object;
2835	if (obj == NULL)
2836		return (0);
2837
2838	size = PAGE_SIZE;
2839	if (size > vp->v_mount->mnt_stat.f_iosize)
2840		size = vp->v_mount->mnt_stat.f_iosize;
2841	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2842
2843	VM_OBJECT_RLOCK(obj);
2844	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2845		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2846		if (!m)
2847			goto notinmem;
2848		tinc = size;
2849		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2850			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2851		if (vm_page_is_valid(m,
2852		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2853			goto notinmem;
2854	}
2855	VM_OBJECT_RUNLOCK(obj);
2856	return 1;
2857
2858notinmem:
2859	VM_OBJECT_RUNLOCK(obj);
2860	return (0);
2861}
2862
2863/*
2864 * Set the dirty range for a buffer based on the status of the dirty
2865 * bits in the pages comprising the buffer.  The range is limited
2866 * to the size of the buffer.
2867 *
2868 * Tell the VM system that the pages associated with this buffer
2869 * are clean.  This is used for delayed writes where the data is
2870 * going to go to disk eventually without additional VM intevention.
2871 *
2872 * Note that while we only really need to clean through to b_bcount, we
2873 * just go ahead and clean through to b_bufsize.
2874 */
2875static void
2876vfs_clean_pages_dirty_buf(struct buf *bp)
2877{
2878	vm_ooffset_t foff, noff, eoff;
2879	vm_page_t m;
2880	int i;
2881
2882	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
2883		return;
2884
2885	foff = bp->b_offset;
2886	KASSERT(bp->b_offset != NOOFFSET,
2887	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
2888
2889	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
2890	vfs_drain_busy_pages(bp);
2891	vfs_setdirty_locked_object(bp);
2892	for (i = 0; i < bp->b_npages; i++) {
2893		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2894		eoff = noff;
2895		if (eoff > bp->b_offset + bp->b_bufsize)
2896			eoff = bp->b_offset + bp->b_bufsize;
2897		m = bp->b_pages[i];
2898		vfs_page_set_validclean(bp, foff, m);
2899		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2900		foff = noff;
2901	}
2902	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
2903}
2904
2905static void
2906vfs_setdirty_locked_object(struct buf *bp)
2907{
2908	vm_object_t object;
2909	int i;
2910
2911	object = bp->b_bufobj->bo_object;
2912	VM_OBJECT_ASSERT_WLOCKED(object);
2913
2914	/*
2915	 * We qualify the scan for modified pages on whether the
2916	 * object has been flushed yet.
2917	 */
2918	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
2919		vm_offset_t boffset;
2920		vm_offset_t eoffset;
2921
2922		/*
2923		 * test the pages to see if they have been modified directly
2924		 * by users through the VM system.
2925		 */
2926		for (i = 0; i < bp->b_npages; i++)
2927			vm_page_test_dirty(bp->b_pages[i]);
2928
2929		/*
2930		 * Calculate the encompassing dirty range, boffset and eoffset,
2931		 * (eoffset - boffset) bytes.
2932		 */
2933
2934		for (i = 0; i < bp->b_npages; i++) {
2935			if (bp->b_pages[i]->dirty)
2936				break;
2937		}
2938		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2939
2940		for (i = bp->b_npages - 1; i >= 0; --i) {
2941			if (bp->b_pages[i]->dirty) {
2942				break;
2943			}
2944		}
2945		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2946
2947		/*
2948		 * Fit it to the buffer.
2949		 */
2950
2951		if (eoffset > bp->b_bcount)
2952			eoffset = bp->b_bcount;
2953
2954		/*
2955		 * If we have a good dirty range, merge with the existing
2956		 * dirty range.
2957		 */
2958
2959		if (boffset < eoffset) {
2960			if (bp->b_dirtyoff > boffset)
2961				bp->b_dirtyoff = boffset;
2962			if (bp->b_dirtyend < eoffset)
2963				bp->b_dirtyend = eoffset;
2964		}
2965	}
2966}
2967
2968/*
2969 * Allocate the KVA mapping for an existing buffer.
2970 * If an unmapped buffer is provided but a mapped buffer is requested, take
2971 * also care to properly setup mappings between pages and KVA.
2972 */
2973static void
2974bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
2975{
2976	struct buf *scratch_bp;
2977	int bsize, maxsize, need_mapping, need_kva;
2978	off_t offset;
2979
2980	need_mapping = bp->b_data == unmapped_buf &&
2981	    (gbflags & GB_UNMAPPED) == 0;
2982	need_kva = bp->b_kvabase == unmapped_buf &&
2983	    bp->b_data == unmapped_buf &&
2984	    (gbflags & GB_KVAALLOC) != 0;
2985	if (!need_mapping && !need_kva)
2986		return;
2987
2988	BUF_CHECK_UNMAPPED(bp);
2989
2990	if (need_mapping && bp->b_kvabase != unmapped_buf) {
2991		/*
2992		 * Buffer is not mapped, but the KVA was already
2993		 * reserved at the time of the instantiation.  Use the
2994		 * allocated space.
2995		 */
2996		goto has_addr;
2997	}
2998
2999	/*
3000	 * Calculate the amount of the address space we would reserve
3001	 * if the buffer was mapped.
3002	 */
3003	bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3004	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3005	offset = blkno * bsize;
3006	maxsize = size + (offset & PAGE_MASK);
3007	maxsize = imax(maxsize, bsize);
3008
3009mapping_loop:
3010	if (bufkvaalloc(bp, maxsize, gbflags)) {
3011		/*
3012		 * Request defragmentation. getnewbuf() returns us the
3013		 * allocated space by the scratch buffer KVA.
3014		 */
3015		scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags |
3016		    (GB_UNMAPPED | GB_KVAALLOC));
3017		if (scratch_bp == NULL) {
3018			if ((gbflags & GB_NOWAIT_BD) != 0) {
3019				/*
3020				 * XXXKIB: defragmentation cannot
3021				 * succeed, not sure what else to do.
3022				 */
3023				panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3024			}
3025			atomic_add_int(&mappingrestarts, 1);
3026			goto mapping_loop;
3027		}
3028		KASSERT(scratch_bp->b_kvabase != unmapped_buf,
3029		    ("scratch bp has no KVA %p", scratch_bp));
3030		/* Grab pointers. */
3031		bp->b_kvabase = scratch_bp->b_kvabase;
3032		bp->b_kvasize = scratch_bp->b_kvasize;
3033		bp->b_data = scratch_bp->b_data;
3034
3035		/* Get rid of the scratch buffer. */
3036		scratch_bp->b_kvasize = 0;
3037		scratch_bp->b_flags |= B_INVAL;
3038		scratch_bp->b_data = scratch_bp->b_kvabase = unmapped_buf;
3039		brelse(scratch_bp);
3040	}
3041has_addr:
3042	if (need_mapping) {
3043		/* b_offset is handled by bpmap_qenter. */
3044		bp->b_data = bp->b_kvabase;
3045		BUF_CHECK_MAPPED(bp);
3046		bpmap_qenter(bp);
3047	}
3048}
3049
3050/*
3051 *	getblk:
3052 *
3053 *	Get a block given a specified block and offset into a file/device.
3054 *	The buffers B_DONE bit will be cleared on return, making it almost
3055 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3056 *	return.  The caller should clear B_INVAL prior to initiating a
3057 *	READ.
3058 *
3059 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3060 *	an existing buffer.
3061 *
3062 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3063 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3064 *	and then cleared based on the backing VM.  If the previous buffer is
3065 *	non-0-sized but invalid, B_CACHE will be cleared.
3066 *
3067 *	If getblk() must create a new buffer, the new buffer is returned with
3068 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3069 *	case it is returned with B_INVAL clear and B_CACHE set based on the
3070 *	backing VM.
3071 *
3072 *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
3073 *	B_CACHE bit is clear.
3074 *
3075 *	What this means, basically, is that the caller should use B_CACHE to
3076 *	determine whether the buffer is fully valid or not and should clear
3077 *	B_INVAL prior to issuing a read.  If the caller intends to validate
3078 *	the buffer by loading its data area with something, the caller needs
3079 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3080 *	the caller should set B_CACHE ( as an optimization ), else the caller
3081 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3082 *	a write attempt or if it was a successfull read.  If the caller
3083 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3084 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3085 */
3086struct buf *
3087getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3088    int flags)
3089{
3090	struct buf *bp;
3091	struct bufobj *bo;
3092	int bsize, error, maxsize, vmio;
3093	off_t offset;
3094
3095	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3096	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3097	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3098	ASSERT_VOP_LOCKED(vp, "getblk");
3099	if (size > MAXBCACHEBUF)
3100		panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size,
3101		    MAXBCACHEBUF);
3102	if (!unmapped_buf_allowed)
3103		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3104
3105	bo = &vp->v_bufobj;
3106loop:
3107	BO_RLOCK(bo);
3108	bp = gbincore(bo, blkno);
3109	if (bp != NULL) {
3110		int lockflags;
3111		/*
3112		 * Buffer is in-core.  If the buffer is not busy nor managed,
3113		 * it must be on a queue.
3114		 */
3115		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3116
3117		if (flags & GB_LOCK_NOWAIT)
3118			lockflags |= LK_NOWAIT;
3119
3120		error = BUF_TIMELOCK(bp, lockflags,
3121		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3122
3123		/*
3124		 * If we slept and got the lock we have to restart in case
3125		 * the buffer changed identities.
3126		 */
3127		if (error == ENOLCK)
3128			goto loop;
3129		/* We timed out or were interrupted. */
3130		else if (error)
3131			return (NULL);
3132		/* If recursed, assume caller knows the rules. */
3133		else if (BUF_LOCKRECURSED(bp))
3134			goto end;
3135
3136		/*
3137		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3138		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3139		 * and for a VMIO buffer B_CACHE is adjusted according to the
3140		 * backing VM cache.
3141		 */
3142		if (bp->b_flags & B_INVAL)
3143			bp->b_flags &= ~B_CACHE;
3144		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3145			bp->b_flags |= B_CACHE;
3146		if (bp->b_flags & B_MANAGED)
3147			MPASS(bp->b_qindex == QUEUE_NONE);
3148		else
3149			bremfree(bp);
3150
3151		/*
3152		 * check for size inconsistencies for non-VMIO case.
3153		 */
3154		if (bp->b_bcount != size) {
3155			if ((bp->b_flags & B_VMIO) == 0 ||
3156			    (size > bp->b_kvasize)) {
3157				if (bp->b_flags & B_DELWRI) {
3158					/*
3159					 * If buffer is pinned and caller does
3160					 * not want sleep  waiting for it to be
3161					 * unpinned, bail out
3162					 * */
3163					if (bp->b_pin_count > 0) {
3164						if (flags & GB_LOCK_NOWAIT) {
3165							bqrelse(bp);
3166							return (NULL);
3167						} else {
3168							bunpin_wait(bp);
3169						}
3170					}
3171					bp->b_flags |= B_NOCACHE;
3172					bwrite(bp);
3173				} else {
3174					if (LIST_EMPTY(&bp->b_dep)) {
3175						bp->b_flags |= B_RELBUF;
3176						brelse(bp);
3177					} else {
3178						bp->b_flags |= B_NOCACHE;
3179						bwrite(bp);
3180					}
3181				}
3182				goto loop;
3183			}
3184		}
3185
3186		/*
3187		 * Handle the case of unmapped buffer which should
3188		 * become mapped, or the buffer for which KVA
3189		 * reservation is requested.
3190		 */
3191		bp_unmapped_get_kva(bp, blkno, size, flags);
3192
3193		/*
3194		 * If the size is inconsistant in the VMIO case, we can resize
3195		 * the buffer.  This might lead to B_CACHE getting set or
3196		 * cleared.  If the size has not changed, B_CACHE remains
3197		 * unchanged from its previous state.
3198		 */
3199		if (bp->b_bcount != size)
3200			allocbuf(bp, size);
3201
3202		KASSERT(bp->b_offset != NOOFFSET,
3203		    ("getblk: no buffer offset"));
3204
3205		/*
3206		 * A buffer with B_DELWRI set and B_CACHE clear must
3207		 * be committed before we can return the buffer in
3208		 * order to prevent the caller from issuing a read
3209		 * ( due to B_CACHE not being set ) and overwriting
3210		 * it.
3211		 *
3212		 * Most callers, including NFS and FFS, need this to
3213		 * operate properly either because they assume they
3214		 * can issue a read if B_CACHE is not set, or because
3215		 * ( for example ) an uncached B_DELWRI might loop due
3216		 * to softupdates re-dirtying the buffer.  In the latter
3217		 * case, B_CACHE is set after the first write completes,
3218		 * preventing further loops.
3219		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3220		 * above while extending the buffer, we cannot allow the
3221		 * buffer to remain with B_CACHE set after the write
3222		 * completes or it will represent a corrupt state.  To
3223		 * deal with this we set B_NOCACHE to scrap the buffer
3224		 * after the write.
3225		 *
3226		 * We might be able to do something fancy, like setting
3227		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3228		 * so the below call doesn't set B_CACHE, but that gets real
3229		 * confusing.  This is much easier.
3230		 */
3231
3232		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3233			bp->b_flags |= B_NOCACHE;
3234			bwrite(bp);
3235			goto loop;
3236		}
3237		bp->b_flags &= ~B_DONE;
3238	} else {
3239		/*
3240		 * Buffer is not in-core, create new buffer.  The buffer
3241		 * returned by getnewbuf() is locked.  Note that the returned
3242		 * buffer is also considered valid (not marked B_INVAL).
3243		 */
3244		BO_RUNLOCK(bo);
3245		/*
3246		 * If the user does not want us to create the buffer, bail out
3247		 * here.
3248		 */
3249		if (flags & GB_NOCREAT)
3250			return NULL;
3251		if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread))
3252			return NULL;
3253
3254		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3255		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3256		offset = blkno * bsize;
3257		vmio = vp->v_object != NULL;
3258		if (vmio) {
3259			maxsize = size + (offset & PAGE_MASK);
3260		} else {
3261			maxsize = size;
3262			/* Do not allow non-VMIO notmapped buffers. */
3263			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3264		}
3265		maxsize = imax(maxsize, bsize);
3266
3267		bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags);
3268		if (bp == NULL) {
3269			if (slpflag || slptimeo)
3270				return NULL;
3271			goto loop;
3272		}
3273
3274		/*
3275		 * This code is used to make sure that a buffer is not
3276		 * created while the getnewbuf routine is blocked.
3277		 * This can be a problem whether the vnode is locked or not.
3278		 * If the buffer is created out from under us, we have to
3279		 * throw away the one we just created.
3280		 *
3281		 * Note: this must occur before we associate the buffer
3282		 * with the vp especially considering limitations in
3283		 * the splay tree implementation when dealing with duplicate
3284		 * lblkno's.
3285		 */
3286		BO_LOCK(bo);
3287		if (gbincore(bo, blkno)) {
3288			BO_UNLOCK(bo);
3289			bp->b_flags |= B_INVAL;
3290			brelse(bp);
3291			goto loop;
3292		}
3293
3294		/*
3295		 * Insert the buffer into the hash, so that it can
3296		 * be found by incore.
3297		 */
3298		bp->b_blkno = bp->b_lblkno = blkno;
3299		bp->b_offset = offset;
3300		bgetvp(vp, bp);
3301		BO_UNLOCK(bo);
3302
3303		/*
3304		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
3305		 * buffer size starts out as 0, B_CACHE will be set by
3306		 * allocbuf() for the VMIO case prior to it testing the
3307		 * backing store for validity.
3308		 */
3309
3310		if (vmio) {
3311			bp->b_flags |= B_VMIO;
3312			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3313			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3314			    bp, vp->v_object, bp->b_bufobj->bo_object));
3315		} else {
3316			bp->b_flags &= ~B_VMIO;
3317			KASSERT(bp->b_bufobj->bo_object == NULL,
3318			    ("ARGH! has b_bufobj->bo_object %p %p\n",
3319			    bp, bp->b_bufobj->bo_object));
3320			BUF_CHECK_MAPPED(bp);
3321		}
3322
3323		allocbuf(bp, size);
3324		bp->b_flags &= ~B_DONE;
3325	}
3326	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3327	BUF_ASSERT_HELD(bp);
3328end:
3329	KASSERT(bp->b_bufobj == bo,
3330	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3331	return (bp);
3332}
3333
3334/*
3335 * Get an empty, disassociated buffer of given size.  The buffer is initially
3336 * set to B_INVAL.
3337 */
3338struct buf *
3339geteblk(int size, int flags)
3340{
3341	struct buf *bp;
3342	int maxsize;
3343
3344	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3345	while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) {
3346		if ((flags & GB_NOWAIT_BD) &&
3347		    (curthread->td_pflags & TDP_BUFNEED) != 0)
3348			return (NULL);
3349	}
3350	allocbuf(bp, size);
3351	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3352	BUF_ASSERT_HELD(bp);
3353	return (bp);
3354}
3355
3356/*
3357 * This code constitutes the buffer memory from either anonymous system
3358 * memory (in the case of non-VMIO operations) or from an associated
3359 * VM object (in the case of VMIO operations).  This code is able to
3360 * resize a buffer up or down.
3361 *
3362 * Note that this code is tricky, and has many complications to resolve
3363 * deadlock or inconsistant data situations.  Tread lightly!!!
3364 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
3365 * the caller.  Calling this code willy nilly can result in the loss of data.
3366 *
3367 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
3368 * B_CACHE for the non-VMIO case.
3369 */
3370
3371int
3372allocbuf(struct buf *bp, int size)
3373{
3374	int newbsize, mbsize;
3375	int i;
3376
3377	BUF_ASSERT_HELD(bp);
3378
3379	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
3380		panic("allocbuf: buffer too small");
3381
3382	if ((bp->b_flags & B_VMIO) == 0) {
3383		caddr_t origbuf;
3384		int origbufsize;
3385		/*
3386		 * Just get anonymous memory from the kernel.  Don't
3387		 * mess with B_CACHE.
3388		 */
3389		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3390		if (bp->b_flags & B_MALLOC)
3391			newbsize = mbsize;
3392		else
3393			newbsize = round_page(size);
3394
3395		if (newbsize < bp->b_bufsize) {
3396			/*
3397			 * malloced buffers are not shrunk
3398			 */
3399			if (bp->b_flags & B_MALLOC) {
3400				if (newbsize) {
3401					bp->b_bcount = size;
3402				} else {
3403					free(bp->b_data, M_BIOBUF);
3404					bufmallocadjust(bp, 0);
3405					bp->b_data = bp->b_kvabase;
3406					bp->b_bcount = 0;
3407					bp->b_flags &= ~B_MALLOC;
3408				}
3409				return 1;
3410			}
3411			vm_hold_free_pages(bp, newbsize);
3412		} else if (newbsize > bp->b_bufsize) {
3413			/*
3414			 * We only use malloced memory on the first allocation.
3415			 * and revert to page-allocated memory when the buffer
3416			 * grows.
3417			 */
3418			/*
3419			 * There is a potential smp race here that could lead
3420			 * to bufmallocspace slightly passing the max.  It
3421			 * is probably extremely rare and not worth worrying
3422			 * over.
3423			 */
3424			if ((bufmallocspace < maxbufmallocspace) &&
3425				(bp->b_bufsize == 0) &&
3426				(mbsize <= PAGE_SIZE/2)) {
3427
3428				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
3429				bp->b_bcount = size;
3430				bp->b_flags |= B_MALLOC;
3431				bufmallocadjust(bp, mbsize);
3432				return 1;
3433			}
3434			origbuf = NULL;
3435			origbufsize = 0;
3436			/*
3437			 * If the buffer is growing on its other-than-first
3438			 * allocation then we revert to the page-allocation
3439			 * scheme.
3440			 */
3441			if (bp->b_flags & B_MALLOC) {
3442				origbuf = bp->b_data;
3443				origbufsize = bp->b_bufsize;
3444				bp->b_data = bp->b_kvabase;
3445				bufmallocadjust(bp, 0);
3446				bp->b_flags &= ~B_MALLOC;
3447				newbsize = round_page(newbsize);
3448			}
3449			vm_hold_load_pages(
3450			    bp,
3451			    (vm_offset_t) bp->b_data + bp->b_bufsize,
3452			    (vm_offset_t) bp->b_data + newbsize);
3453			if (origbuf) {
3454				bcopy(origbuf, bp->b_data, origbufsize);
3455				free(origbuf, M_BIOBUF);
3456			}
3457		}
3458	} else {
3459		int desiredpages;
3460
3461		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3462		desiredpages = (size == 0) ? 0 :
3463			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3464
3465		if (bp->b_flags & B_MALLOC)
3466			panic("allocbuf: VMIO buffer can't be malloced");
3467		/*
3468		 * Set B_CACHE initially if buffer is 0 length or will become
3469		 * 0-length.
3470		 */
3471		if (size == 0 || bp->b_bufsize == 0)
3472			bp->b_flags |= B_CACHE;
3473
3474		if (newbsize < bp->b_bufsize) {
3475			/*
3476			 * DEV_BSIZE aligned new buffer size is less then the
3477			 * DEV_BSIZE aligned existing buffer size.  Figure out
3478			 * if we have to remove any pages.
3479			 */
3480			if (desiredpages < bp->b_npages) {
3481				vm_page_t m;
3482
3483				if (buf_mapped(bp)) {
3484					BUF_CHECK_MAPPED(bp);
3485					pmap_qremove((vm_offset_t)trunc_page(
3486					    (vm_offset_t)bp->b_data) +
3487					    (desiredpages << PAGE_SHIFT),
3488					    (bp->b_npages - desiredpages));
3489				} else
3490					BUF_CHECK_UNMAPPED(bp);
3491				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3492				for (i = desiredpages; i < bp->b_npages; i++) {
3493					/*
3494					 * the page is not freed here -- it
3495					 * is the responsibility of
3496					 * vnode_pager_setsize
3497					 */
3498					m = bp->b_pages[i];
3499					KASSERT(m != bogus_page,
3500					    ("allocbuf: bogus page found"));
3501					while (vm_page_sleep_if_busy(m,
3502					    "biodep"))
3503						continue;
3504
3505					bp->b_pages[i] = NULL;
3506					vm_page_lock(m);
3507					vm_page_unwire(m, PQ_INACTIVE);
3508					vm_page_unlock(m);
3509				}
3510				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3511				bp->b_npages = desiredpages;
3512			}
3513		} else if (size > bp->b_bcount) {
3514			/*
3515			 * We are growing the buffer, possibly in a
3516			 * byte-granular fashion.
3517			 */
3518			vm_object_t obj;
3519			vm_offset_t toff;
3520			vm_offset_t tinc;
3521
3522			/*
3523			 * Step 1, bring in the VM pages from the object,
3524			 * allocating them if necessary.  We must clear
3525			 * B_CACHE if these pages are not valid for the
3526			 * range covered by the buffer.
3527			 */
3528
3529			obj = bp->b_bufobj->bo_object;
3530
3531			VM_OBJECT_WLOCK(obj);
3532			while (bp->b_npages < desiredpages) {
3533				vm_page_t m;
3534
3535				/*
3536				 * We must allocate system pages since blocking
3537				 * here could interfere with paging I/O, no
3538				 * matter which process we are.
3539				 *
3540				 * Only exclusive busy can be tested here.
3541				 * Blocking on shared busy might lead to
3542				 * deadlocks once allocbuf() is called after
3543				 * pages are vfs_busy_pages().
3544				 */
3545				m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
3546				    bp->b_npages, VM_ALLOC_NOBUSY |
3547				    VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
3548				    VM_ALLOC_IGN_SBUSY |
3549				    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
3550				if (m->valid == 0)
3551					bp->b_flags &= ~B_CACHE;
3552				bp->b_pages[bp->b_npages] = m;
3553				++bp->b_npages;
3554			}
3555
3556			/*
3557			 * Step 2.  We've loaded the pages into the buffer,
3558			 * we have to figure out if we can still have B_CACHE
3559			 * set.  Note that B_CACHE is set according to the
3560			 * byte-granular range ( bcount and size ), new the
3561			 * aligned range ( newbsize ).
3562			 *
3563			 * The VM test is against m->valid, which is DEV_BSIZE
3564			 * aligned.  Needless to say, the validity of the data
3565			 * needs to also be DEV_BSIZE aligned.  Note that this
3566			 * fails with NFS if the server or some other client
3567			 * extends the file's EOF.  If our buffer is resized,
3568			 * B_CACHE may remain set! XXX
3569			 */
3570
3571			toff = bp->b_bcount;
3572			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3573
3574			while ((bp->b_flags & B_CACHE) && toff < size) {
3575				vm_pindex_t pi;
3576
3577				if (tinc > (size - toff))
3578					tinc = size - toff;
3579
3580				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
3581				    PAGE_SHIFT;
3582
3583				vfs_buf_test_cache(
3584				    bp,
3585				    bp->b_offset,
3586				    toff,
3587				    tinc,
3588				    bp->b_pages[pi]
3589				);
3590				toff += tinc;
3591				tinc = PAGE_SIZE;
3592			}
3593			VM_OBJECT_WUNLOCK(obj);
3594
3595			/*
3596			 * Step 3, fixup the KVA pmap.
3597			 */
3598			if (buf_mapped(bp))
3599				bpmap_qenter(bp);
3600			else
3601				BUF_CHECK_UNMAPPED(bp);
3602		}
3603	}
3604	/* Record changes in allocation size. */
3605	if (bp->b_bufsize != newbsize)
3606		bufspaceadjust(bp, newbsize);
3607	bp->b_bcount = size;		/* requested buffer size. */
3608	return 1;
3609}
3610
3611extern int inflight_transient_maps;
3612
3613void
3614biodone(struct bio *bp)
3615{
3616	struct mtx *mtxp;
3617	void (*done)(struct bio *);
3618	vm_offset_t start, end;
3619
3620	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3621		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
3622		bp->bio_flags |= BIO_UNMAPPED;
3623		start = trunc_page((vm_offset_t)bp->bio_data);
3624		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3625		bp->bio_data = unmapped_buf;
3626		pmap_qremove(start, OFF_TO_IDX(end - start));
3627		vmem_free(transient_arena, start, end - start);
3628		atomic_add_int(&inflight_transient_maps, -1);
3629	}
3630	done = bp->bio_done;
3631	if (done == NULL) {
3632		mtxp = mtx_pool_find(mtxpool_sleep, bp);
3633		mtx_lock(mtxp);
3634		bp->bio_flags |= BIO_DONE;
3635		wakeup(bp);
3636		mtx_unlock(mtxp);
3637	} else {
3638		bp->bio_flags |= BIO_DONE;
3639		done(bp);
3640	}
3641}
3642
3643/*
3644 * Wait for a BIO to finish.
3645 */
3646int
3647biowait(struct bio *bp, const char *wchan)
3648{
3649	struct mtx *mtxp;
3650
3651	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3652	mtx_lock(mtxp);
3653	while ((bp->bio_flags & BIO_DONE) == 0)
3654		msleep(bp, mtxp, PRIBIO, wchan, 0);
3655	mtx_unlock(mtxp);
3656	if (bp->bio_error != 0)
3657		return (bp->bio_error);
3658	if (!(bp->bio_flags & BIO_ERROR))
3659		return (0);
3660	return (EIO);
3661}
3662
3663void
3664biofinish(struct bio *bp, struct devstat *stat, int error)
3665{
3666
3667	if (error) {
3668		bp->bio_error = error;
3669		bp->bio_flags |= BIO_ERROR;
3670	}
3671	if (stat != NULL)
3672		devstat_end_transaction_bio(stat, bp);
3673	biodone(bp);
3674}
3675
3676/*
3677 *	bufwait:
3678 *
3679 *	Wait for buffer I/O completion, returning error status.  The buffer
3680 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3681 *	error and cleared.
3682 */
3683int
3684bufwait(struct buf *bp)
3685{
3686	if (bp->b_iocmd == BIO_READ)
3687		bwait(bp, PRIBIO, "biord");
3688	else
3689		bwait(bp, PRIBIO, "biowr");
3690	if (bp->b_flags & B_EINTR) {
3691		bp->b_flags &= ~B_EINTR;
3692		return (EINTR);
3693	}
3694	if (bp->b_ioflags & BIO_ERROR) {
3695		return (bp->b_error ? bp->b_error : EIO);
3696	} else {
3697		return (0);
3698	}
3699}
3700
3701 /*
3702  * Call back function from struct bio back up to struct buf.
3703  */
3704static void
3705bufdonebio(struct bio *bip)
3706{
3707	struct buf *bp;
3708
3709	bp = bip->bio_caller2;
3710	bp->b_resid = bip->bio_resid;
3711	bp->b_ioflags = bip->bio_flags;
3712	bp->b_error = bip->bio_error;
3713	if (bp->b_error)
3714		bp->b_ioflags |= BIO_ERROR;
3715	bufdone(bp);
3716	g_destroy_bio(bip);
3717}
3718
3719void
3720dev_strategy(struct cdev *dev, struct buf *bp)
3721{
3722	struct cdevsw *csw;
3723	int ref;
3724
3725	KASSERT(dev->si_refcount > 0,
3726	    ("dev_strategy on un-referenced struct cdev *(%s) %p",
3727	    devtoname(dev), dev));
3728
3729	csw = dev_refthread(dev, &ref);
3730	dev_strategy_csw(dev, csw, bp);
3731	dev_relthread(dev, ref);
3732}
3733
3734void
3735dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp)
3736{
3737	struct bio *bip;
3738
3739	KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE,
3740	    ("b_iocmd botch"));
3741	KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) ||
3742	    dev->si_threadcount > 0,
3743	    ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev),
3744	    dev));
3745	if (csw == NULL) {
3746		bp->b_error = ENXIO;
3747		bp->b_ioflags = BIO_ERROR;
3748		bufdone(bp);
3749		return;
3750	}
3751	for (;;) {
3752		bip = g_new_bio();
3753		if (bip != NULL)
3754			break;
3755		/* Try again later */
3756		tsleep(&bp, PRIBIO, "dev_strat", hz/10);
3757	}
3758	bip->bio_cmd = bp->b_iocmd;
3759	bip->bio_offset = bp->b_iooffset;
3760	bip->bio_length = bp->b_bcount;
3761	bip->bio_bcount = bp->b_bcount;	/* XXX: remove */
3762	bdata2bio(bp, bip);
3763	bip->bio_done = bufdonebio;
3764	bip->bio_caller2 = bp;
3765	bip->bio_dev = dev;
3766	(*csw->d_strategy)(bip);
3767}
3768
3769/*
3770 *	bufdone:
3771 *
3772 *	Finish I/O on a buffer, optionally calling a completion function.
3773 *	This is usually called from an interrupt so process blocking is
3774 *	not allowed.
3775 *
3776 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3777 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3778 *	assuming B_INVAL is clear.
3779 *
3780 *	For the VMIO case, we set B_CACHE if the op was a read and no
3781 *	read error occured, or if the op was a write.  B_CACHE is never
3782 *	set if the buffer is invalid or otherwise uncacheable.
3783 *
3784 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
3785 *	initiator to leave B_INVAL set to brelse the buffer out of existance
3786 *	in the biodone routine.
3787 */
3788void
3789bufdone(struct buf *bp)
3790{
3791	struct bufobj *dropobj;
3792	void    (*biodone)(struct buf *);
3793
3794	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3795	dropobj = NULL;
3796
3797	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3798	BUF_ASSERT_HELD(bp);
3799
3800	runningbufwakeup(bp);
3801	if (bp->b_iocmd == BIO_WRITE)
3802		dropobj = bp->b_bufobj;
3803	/* call optional completion function if requested */
3804	if (bp->b_iodone != NULL) {
3805		biodone = bp->b_iodone;
3806		bp->b_iodone = NULL;
3807		(*biodone) (bp);
3808		if (dropobj)
3809			bufobj_wdrop(dropobj);
3810		return;
3811	}
3812
3813	bufdone_finish(bp);
3814
3815	if (dropobj)
3816		bufobj_wdrop(dropobj);
3817}
3818
3819void
3820bufdone_finish(struct buf *bp)
3821{
3822	BUF_ASSERT_HELD(bp);
3823
3824	if (!LIST_EMPTY(&bp->b_dep))
3825		buf_complete(bp);
3826
3827	if (bp->b_flags & B_VMIO) {
3828		vm_ooffset_t foff;
3829		vm_page_t m;
3830		vm_object_t obj;
3831		struct vnode *vp;
3832		int bogus, i, iosize;
3833
3834		obj = bp->b_bufobj->bo_object;
3835		KASSERT(obj->paging_in_progress >= bp->b_npages,
3836		    ("biodone_finish: paging in progress(%d) < b_npages(%d)",
3837		    obj->paging_in_progress, bp->b_npages));
3838
3839		vp = bp->b_vp;
3840		KASSERT(vp->v_holdcnt > 0,
3841		    ("biodone_finish: vnode %p has zero hold count", vp));
3842		KASSERT(vp->v_object != NULL,
3843		    ("biodone_finish: vnode %p has no vm_object", vp));
3844
3845		foff = bp->b_offset;
3846		KASSERT(bp->b_offset != NOOFFSET,
3847		    ("biodone_finish: bp %p has no buffer offset", bp));
3848
3849		/*
3850		 * Set B_CACHE if the op was a normal read and no error
3851		 * occured.  B_CACHE is set for writes in the b*write()
3852		 * routines.
3853		 */
3854		iosize = bp->b_bcount - bp->b_resid;
3855		if (bp->b_iocmd == BIO_READ &&
3856		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
3857		    !(bp->b_ioflags & BIO_ERROR)) {
3858			bp->b_flags |= B_CACHE;
3859		}
3860		bogus = 0;
3861		VM_OBJECT_WLOCK(obj);
3862		for (i = 0; i < bp->b_npages; i++) {
3863			int bogusflag = 0;
3864			int resid;
3865
3866			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3867			if (resid > iosize)
3868				resid = iosize;
3869
3870			/*
3871			 * cleanup bogus pages, restoring the originals
3872			 */
3873			m = bp->b_pages[i];
3874			if (m == bogus_page) {
3875				bogus = bogusflag = 1;
3876				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3877				if (m == NULL)
3878					panic("biodone: page disappeared!");
3879				bp->b_pages[i] = m;
3880			}
3881			KASSERT(OFF_TO_IDX(foff) == m->pindex,
3882			    ("biodone_finish: foff(%jd)/pindex(%ju) mismatch",
3883			    (intmax_t)foff, (uintmax_t)m->pindex));
3884
3885			/*
3886			 * In the write case, the valid and clean bits are
3887			 * already changed correctly ( see bdwrite() ), so we
3888			 * only need to do this here in the read case.
3889			 */
3890			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
3891				KASSERT((m->dirty & vm_page_bits(foff &
3892				    PAGE_MASK, resid)) == 0, ("bufdone_finish:"
3893				    " page %p has unexpected dirty bits", m));
3894				vfs_page_set_valid(bp, foff, m);
3895			}
3896
3897			vm_page_sunbusy(m);
3898			vm_object_pip_subtract(obj, 1);
3899			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3900			iosize -= resid;
3901		}
3902		vm_object_pip_wakeupn(obj, 0);
3903		VM_OBJECT_WUNLOCK(obj);
3904		if (bogus && buf_mapped(bp)) {
3905			BUF_CHECK_MAPPED(bp);
3906			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3907			    bp->b_pages, bp->b_npages);
3908		}
3909	}
3910
3911	/*
3912	 * For asynchronous completions, release the buffer now. The brelse
3913	 * will do a wakeup there if necessary - so no need to do a wakeup
3914	 * here in the async case. The sync case always needs to do a wakeup.
3915	 */
3916
3917	if (bp->b_flags & B_ASYNC) {
3918		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
3919			brelse(bp);
3920		else
3921			bqrelse(bp);
3922	} else
3923		bdone(bp);
3924}
3925
3926/*
3927 * This routine is called in lieu of iodone in the case of
3928 * incomplete I/O.  This keeps the busy status for pages
3929 * consistant.
3930 */
3931void
3932vfs_unbusy_pages(struct buf *bp)
3933{
3934	int i;
3935	vm_object_t obj;
3936	vm_page_t m;
3937
3938	runningbufwakeup(bp);
3939	if (!(bp->b_flags & B_VMIO))
3940		return;
3941
3942	obj = bp->b_bufobj->bo_object;
3943	VM_OBJECT_WLOCK(obj);
3944	for (i = 0; i < bp->b_npages; i++) {
3945		m = bp->b_pages[i];
3946		if (m == bogus_page) {
3947			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3948			if (!m)
3949				panic("vfs_unbusy_pages: page missing\n");
3950			bp->b_pages[i] = m;
3951			if (buf_mapped(bp)) {
3952				BUF_CHECK_MAPPED(bp);
3953				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3954				    bp->b_pages, bp->b_npages);
3955			} else
3956				BUF_CHECK_UNMAPPED(bp);
3957		}
3958		vm_object_pip_subtract(obj, 1);
3959		vm_page_sunbusy(m);
3960	}
3961	vm_object_pip_wakeupn(obj, 0);
3962	VM_OBJECT_WUNLOCK(obj);
3963}
3964
3965/*
3966 * vfs_page_set_valid:
3967 *
3968 *	Set the valid bits in a page based on the supplied offset.   The
3969 *	range is restricted to the buffer's size.
3970 *
3971 *	This routine is typically called after a read completes.
3972 */
3973static void
3974vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
3975{
3976	vm_ooffset_t eoff;
3977
3978	/*
3979	 * Compute the end offset, eoff, such that [off, eoff) does not span a
3980	 * page boundary and eoff is not greater than the end of the buffer.
3981	 * The end of the buffer, in this case, is our file EOF, not the
3982	 * allocation size of the buffer.
3983	 */
3984	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
3985	if (eoff > bp->b_offset + bp->b_bcount)
3986		eoff = bp->b_offset + bp->b_bcount;
3987
3988	/*
3989	 * Set valid range.  This is typically the entire buffer and thus the
3990	 * entire page.
3991	 */
3992	if (eoff > off)
3993		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
3994}
3995
3996/*
3997 * vfs_page_set_validclean:
3998 *
3999 *	Set the valid bits and clear the dirty bits in a page based on the
4000 *	supplied offset.   The range is restricted to the buffer's size.
4001 */
4002static void
4003vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4004{
4005	vm_ooffset_t soff, eoff;
4006
4007	/*
4008	 * Start and end offsets in buffer.  eoff - soff may not cross a
4009	 * page boundry or cross the end of the buffer.  The end of the
4010	 * buffer, in this case, is our file EOF, not the allocation size
4011	 * of the buffer.
4012	 */
4013	soff = off;
4014	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4015	if (eoff > bp->b_offset + bp->b_bcount)
4016		eoff = bp->b_offset + bp->b_bcount;
4017
4018	/*
4019	 * Set valid range.  This is typically the entire buffer and thus the
4020	 * entire page.
4021	 */
4022	if (eoff > soff) {
4023		vm_page_set_validclean(
4024		    m,
4025		   (vm_offset_t) (soff & PAGE_MASK),
4026		   (vm_offset_t) (eoff - soff)
4027		);
4028	}
4029}
4030
4031/*
4032 * Ensure that all buffer pages are not exclusive busied.  If any page is
4033 * exclusive busy, drain it.
4034 */
4035void
4036vfs_drain_busy_pages(struct buf *bp)
4037{
4038	vm_page_t m;
4039	int i, last_busied;
4040
4041	VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4042	last_busied = 0;
4043	for (i = 0; i < bp->b_npages; i++) {
4044		m = bp->b_pages[i];
4045		if (vm_page_xbusied(m)) {
4046			for (; last_busied < i; last_busied++)
4047				vm_page_sbusy(bp->b_pages[last_busied]);
4048			while (vm_page_xbusied(m)) {
4049				vm_page_lock(m);
4050				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4051				vm_page_busy_sleep(m, "vbpage");
4052				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4053			}
4054		}
4055	}
4056	for (i = 0; i < last_busied; i++)
4057		vm_page_sunbusy(bp->b_pages[i]);
4058}
4059
4060/*
4061 * This routine is called before a device strategy routine.
4062 * It is used to tell the VM system that paging I/O is in
4063 * progress, and treat the pages associated with the buffer
4064 * almost as being exclusive busy.  Also the object paging_in_progress
4065 * flag is handled to make sure that the object doesn't become
4066 * inconsistant.
4067 *
4068 * Since I/O has not been initiated yet, certain buffer flags
4069 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
4070 * and should be ignored.
4071 */
4072void
4073vfs_busy_pages(struct buf *bp, int clear_modify)
4074{
4075	int i, bogus;
4076	vm_object_t obj;
4077	vm_ooffset_t foff;
4078	vm_page_t m;
4079
4080	if (!(bp->b_flags & B_VMIO))
4081		return;
4082
4083	obj = bp->b_bufobj->bo_object;
4084	foff = bp->b_offset;
4085	KASSERT(bp->b_offset != NOOFFSET,
4086	    ("vfs_busy_pages: no buffer offset"));
4087	VM_OBJECT_WLOCK(obj);
4088	vfs_drain_busy_pages(bp);
4089	if (bp->b_bufsize != 0)
4090		vfs_setdirty_locked_object(bp);
4091	bogus = 0;
4092	for (i = 0; i < bp->b_npages; i++) {
4093		m = bp->b_pages[i];
4094
4095		if ((bp->b_flags & B_CLUSTER) == 0) {
4096			vm_object_pip_add(obj, 1);
4097			vm_page_sbusy(m);
4098		}
4099		/*
4100		 * When readying a buffer for a read ( i.e
4101		 * clear_modify == 0 ), it is important to do
4102		 * bogus_page replacement for valid pages in
4103		 * partially instantiated buffers.  Partially
4104		 * instantiated buffers can, in turn, occur when
4105		 * reconstituting a buffer from its VM backing store
4106		 * base.  We only have to do this if B_CACHE is
4107		 * clear ( which causes the I/O to occur in the
4108		 * first place ).  The replacement prevents the read
4109		 * I/O from overwriting potentially dirty VM-backed
4110		 * pages.  XXX bogus page replacement is, uh, bogus.
4111		 * It may not work properly with small-block devices.
4112		 * We need to find a better way.
4113		 */
4114		if (clear_modify) {
4115			pmap_remove_write(m);
4116			vfs_page_set_validclean(bp, foff, m);
4117		} else if (m->valid == VM_PAGE_BITS_ALL &&
4118		    (bp->b_flags & B_CACHE) == 0) {
4119			bp->b_pages[i] = bogus_page;
4120			bogus++;
4121		}
4122		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4123	}
4124	VM_OBJECT_WUNLOCK(obj);
4125	if (bogus && buf_mapped(bp)) {
4126		BUF_CHECK_MAPPED(bp);
4127		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4128		    bp->b_pages, bp->b_npages);
4129	}
4130}
4131
4132/*
4133 *	vfs_bio_set_valid:
4134 *
4135 *	Set the range within the buffer to valid.  The range is
4136 *	relative to the beginning of the buffer, b_offset.  Note that
4137 *	b_offset itself may be offset from the beginning of the first
4138 *	page.
4139 */
4140void
4141vfs_bio_set_valid(struct buf *bp, int base, int size)
4142{
4143	int i, n;
4144	vm_page_t m;
4145
4146	if (!(bp->b_flags & B_VMIO))
4147		return;
4148
4149	/*
4150	 * Fixup base to be relative to beginning of first page.
4151	 * Set initial n to be the maximum number of bytes in the
4152	 * first page that can be validated.
4153	 */
4154	base += (bp->b_offset & PAGE_MASK);
4155	n = PAGE_SIZE - (base & PAGE_MASK);
4156
4157	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4158	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4159		m = bp->b_pages[i];
4160		if (n > size)
4161			n = size;
4162		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4163		base += n;
4164		size -= n;
4165		n = PAGE_SIZE;
4166	}
4167	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4168}
4169
4170/*
4171 *	vfs_bio_clrbuf:
4172 *
4173 *	If the specified buffer is a non-VMIO buffer, clear the entire
4174 *	buffer.  If the specified buffer is a VMIO buffer, clear and
4175 *	validate only the previously invalid portions of the buffer.
4176 *	This routine essentially fakes an I/O, so we need to clear
4177 *	BIO_ERROR and B_INVAL.
4178 *
4179 *	Note that while we only theoretically need to clear through b_bcount,
4180 *	we go ahead and clear through b_bufsize.
4181 */
4182void
4183vfs_bio_clrbuf(struct buf *bp)
4184{
4185	int i, j, mask, sa, ea, slide;
4186
4187	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4188		clrbuf(bp);
4189		return;
4190	}
4191	bp->b_flags &= ~B_INVAL;
4192	bp->b_ioflags &= ~BIO_ERROR;
4193	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4194	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4195	    (bp->b_offset & PAGE_MASK) == 0) {
4196		if (bp->b_pages[0] == bogus_page)
4197			goto unlock;
4198		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4199		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4200		if ((bp->b_pages[0]->valid & mask) == mask)
4201			goto unlock;
4202		if ((bp->b_pages[0]->valid & mask) == 0) {
4203			pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4204			bp->b_pages[0]->valid |= mask;
4205			goto unlock;
4206		}
4207	}
4208	sa = bp->b_offset & PAGE_MASK;
4209	slide = 0;
4210	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4211		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4212		ea = slide & PAGE_MASK;
4213		if (ea == 0)
4214			ea = PAGE_SIZE;
4215		if (bp->b_pages[i] == bogus_page)
4216			continue;
4217		j = sa / DEV_BSIZE;
4218		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4219		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4220		if ((bp->b_pages[i]->valid & mask) == mask)
4221			continue;
4222		if ((bp->b_pages[i]->valid & mask) == 0)
4223			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4224		else {
4225			for (; sa < ea; sa += DEV_BSIZE, j++) {
4226				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4227					pmap_zero_page_area(bp->b_pages[i],
4228					    sa, DEV_BSIZE);
4229				}
4230			}
4231		}
4232		bp->b_pages[i]->valid |= mask;
4233	}
4234unlock:
4235	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4236	bp->b_resid = 0;
4237}
4238
4239void
4240vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4241{
4242	vm_page_t m;
4243	int i, n;
4244
4245	if (buf_mapped(bp)) {
4246		BUF_CHECK_MAPPED(bp);
4247		bzero(bp->b_data + base, size);
4248	} else {
4249		BUF_CHECK_UNMAPPED(bp);
4250		n = PAGE_SIZE - (base & PAGE_MASK);
4251		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4252			m = bp->b_pages[i];
4253			if (n > size)
4254				n = size;
4255			pmap_zero_page_area(m, base & PAGE_MASK, n);
4256			base += n;
4257			size -= n;
4258			n = PAGE_SIZE;
4259		}
4260	}
4261}
4262
4263/*
4264 * vm_hold_load_pages and vm_hold_free_pages get pages into
4265 * a buffers address space.  The pages are anonymous and are
4266 * not associated with a file object.
4267 */
4268static void
4269vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4270{
4271	vm_offset_t pg;
4272	vm_page_t p;
4273	int index;
4274
4275	BUF_CHECK_MAPPED(bp);
4276
4277	to = round_page(to);
4278	from = round_page(from);
4279	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4280
4281	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4282tryagain:
4283		/*
4284		 * note: must allocate system pages since blocking here
4285		 * could interfere with paging I/O, no matter which
4286		 * process we are.
4287		 */
4288		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4289		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
4290		if (p == NULL) {
4291			VM_WAIT;
4292			goto tryagain;
4293		}
4294		pmap_qenter(pg, &p, 1);
4295		bp->b_pages[index] = p;
4296	}
4297	bp->b_npages = index;
4298}
4299
4300/* Return pages associated with this buf to the vm system */
4301static void
4302vm_hold_free_pages(struct buf *bp, int newbsize)
4303{
4304	vm_offset_t from;
4305	vm_page_t p;
4306	int index, newnpages;
4307
4308	BUF_CHECK_MAPPED(bp);
4309
4310	from = round_page((vm_offset_t)bp->b_data + newbsize);
4311	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4312	if (bp->b_npages > newnpages)
4313		pmap_qremove(from, bp->b_npages - newnpages);
4314	for (index = newnpages; index < bp->b_npages; index++) {
4315		p = bp->b_pages[index];
4316		bp->b_pages[index] = NULL;
4317		if (vm_page_sbusied(p))
4318			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
4319			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4320		p->wire_count--;
4321		vm_page_free(p);
4322		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
4323	}
4324	bp->b_npages = newnpages;
4325}
4326
4327/*
4328 * Map an IO request into kernel virtual address space.
4329 *
4330 * All requests are (re)mapped into kernel VA space.
4331 * Notice that we use b_bufsize for the size of the buffer
4332 * to be mapped.  b_bcount might be modified by the driver.
4333 *
4334 * Note that even if the caller determines that the address space should
4335 * be valid, a race or a smaller-file mapped into a larger space may
4336 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4337 * check the return value.
4338 *
4339 * This function only works with pager buffers.
4340 */
4341int
4342vmapbuf(struct buf *bp, int mapbuf)
4343{
4344	vm_prot_t prot;
4345	int pidx;
4346
4347	if (bp->b_bufsize < 0)
4348		return (-1);
4349	prot = VM_PROT_READ;
4350	if (bp->b_iocmd == BIO_READ)
4351		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4352	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4353	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4354	    btoc(MAXPHYS))) < 0)
4355		return (-1);
4356	bp->b_npages = pidx;
4357	bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4358	if (mapbuf || !unmapped_buf_allowed) {
4359		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4360		bp->b_data = bp->b_kvabase + bp->b_offset;
4361	} else
4362		bp->b_data = unmapped_buf;
4363	return(0);
4364}
4365
4366/*
4367 * Free the io map PTEs associated with this IO operation.
4368 * We also invalidate the TLB entries and restore the original b_addr.
4369 *
4370 * This function only works with pager buffers.
4371 */
4372void
4373vunmapbuf(struct buf *bp)
4374{
4375	int npages;
4376
4377	npages = bp->b_npages;
4378	if (buf_mapped(bp))
4379		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4380	vm_page_unhold_pages(bp->b_pages, npages);
4381
4382	bp->b_data = unmapped_buf;
4383}
4384
4385void
4386bdone(struct buf *bp)
4387{
4388	struct mtx *mtxp;
4389
4390	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4391	mtx_lock(mtxp);
4392	bp->b_flags |= B_DONE;
4393	wakeup(bp);
4394	mtx_unlock(mtxp);
4395}
4396
4397void
4398bwait(struct buf *bp, u_char pri, const char *wchan)
4399{
4400	struct mtx *mtxp;
4401
4402	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4403	mtx_lock(mtxp);
4404	while ((bp->b_flags & B_DONE) == 0)
4405		msleep(bp, mtxp, pri, wchan, 0);
4406	mtx_unlock(mtxp);
4407}
4408
4409int
4410bufsync(struct bufobj *bo, int waitfor)
4411{
4412
4413	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
4414}
4415
4416void
4417bufstrategy(struct bufobj *bo, struct buf *bp)
4418{
4419	int i = 0;
4420	struct vnode *vp;
4421
4422	vp = bp->b_vp;
4423	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4424	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4425	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4426	i = VOP_STRATEGY(vp, bp);
4427	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4428}
4429
4430void
4431bufobj_wrefl(struct bufobj *bo)
4432{
4433
4434	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4435	ASSERT_BO_WLOCKED(bo);
4436	bo->bo_numoutput++;
4437}
4438
4439void
4440bufobj_wref(struct bufobj *bo)
4441{
4442
4443	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4444	BO_LOCK(bo);
4445	bo->bo_numoutput++;
4446	BO_UNLOCK(bo);
4447}
4448
4449void
4450bufobj_wdrop(struct bufobj *bo)
4451{
4452
4453	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4454	BO_LOCK(bo);
4455	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4456	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4457		bo->bo_flag &= ~BO_WWAIT;
4458		wakeup(&bo->bo_numoutput);
4459	}
4460	BO_UNLOCK(bo);
4461}
4462
4463int
4464bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4465{
4466	int error;
4467
4468	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4469	ASSERT_BO_WLOCKED(bo);
4470	error = 0;
4471	while (bo->bo_numoutput) {
4472		bo->bo_flag |= BO_WWAIT;
4473		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
4474		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4475		if (error)
4476			break;
4477	}
4478	return (error);
4479}
4480
4481void
4482bpin(struct buf *bp)
4483{
4484	struct mtx *mtxp;
4485
4486	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4487	mtx_lock(mtxp);
4488	bp->b_pin_count++;
4489	mtx_unlock(mtxp);
4490}
4491
4492void
4493bunpin(struct buf *bp)
4494{
4495	struct mtx *mtxp;
4496
4497	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4498	mtx_lock(mtxp);
4499	if (--bp->b_pin_count == 0)
4500		wakeup(bp);
4501	mtx_unlock(mtxp);
4502}
4503
4504void
4505bunpin_wait(struct buf *bp)
4506{
4507	struct mtx *mtxp;
4508
4509	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4510	mtx_lock(mtxp);
4511	while (bp->b_pin_count > 0)
4512		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4513	mtx_unlock(mtxp);
4514}
4515
4516/*
4517 * Set bio_data or bio_ma for struct bio from the struct buf.
4518 */
4519void
4520bdata2bio(struct buf *bp, struct bio *bip)
4521{
4522
4523	if (!buf_mapped(bp)) {
4524		KASSERT(unmapped_buf_allowed, ("unmapped"));
4525		bip->bio_ma = bp->b_pages;
4526		bip->bio_ma_n = bp->b_npages;
4527		bip->bio_data = unmapped_buf;
4528		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4529		bip->bio_flags |= BIO_UNMAPPED;
4530		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4531		    PAGE_SIZE == bp->b_npages,
4532		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4533		    (long long)bip->bio_length, bip->bio_ma_n));
4534	} else {
4535		bip->bio_data = bp->b_data;
4536		bip->bio_ma = NULL;
4537	}
4538}
4539
4540#include "opt_ddb.h"
4541#ifdef DDB
4542#include <ddb/ddb.h>
4543
4544/* DDB command to show buffer data */
4545DB_SHOW_COMMAND(buffer, db_show_buffer)
4546{
4547	/* get args */
4548	struct buf *bp = (struct buf *)addr;
4549
4550	if (!have_addr) {
4551		db_printf("usage: show buffer <addr>\n");
4552		return;
4553	}
4554
4555	db_printf("buf at %p\n", bp);
4556	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4557	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4558	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4559	db_printf(
4560	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4561	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4562	    "b_dep = %p\n",
4563	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4564	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4565	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4566	db_printf("b_kvabase = %p, b_kvasize = %d\n",
4567	    bp->b_kvabase, bp->b_kvasize);
4568	if (bp->b_npages) {
4569		int i;
4570		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4571		for (i = 0; i < bp->b_npages; i++) {
4572			vm_page_t m;
4573			m = bp->b_pages[i];
4574			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4575			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4576			if ((i + 1) < bp->b_npages)
4577				db_printf(",");
4578		}
4579		db_printf("\n");
4580	}
4581	db_printf(" ");
4582	BUF_LOCKPRINTINFO(bp);
4583}
4584
4585DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4586{
4587	struct buf *bp;
4588	int i;
4589
4590	for (i = 0; i < nbuf; i++) {
4591		bp = &buf[i];
4592		if (BUF_ISLOCKED(bp)) {
4593			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4594			db_printf("\n");
4595		}
4596	}
4597}
4598
4599DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4600{
4601	struct vnode *vp;
4602	struct buf *bp;
4603
4604	if (!have_addr) {
4605		db_printf("usage: show vnodebufs <addr>\n");
4606		return;
4607	}
4608	vp = (struct vnode *)addr;
4609	db_printf("Clean buffers:\n");
4610	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4611		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4612		db_printf("\n");
4613	}
4614	db_printf("Dirty buffers:\n");
4615	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4616		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4617		db_printf("\n");
4618	}
4619}
4620
4621DB_COMMAND(countfreebufs, db_coundfreebufs)
4622{
4623	struct buf *bp;
4624	int i, used = 0, nfree = 0;
4625
4626	if (have_addr) {
4627		db_printf("usage: countfreebufs\n");
4628		return;
4629	}
4630
4631	for (i = 0; i < nbuf; i++) {
4632		bp = &buf[i];
4633		if ((bp->b_flags & B_INFREECNT) != 0)
4634			nfree++;
4635		else
4636			used++;
4637	}
4638
4639	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
4640	    nfree + used);
4641	db_printf("numfreebuffers is %d\n", numfreebuffers);
4642}
4643#endif /* DDB */
4644