1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * this file contains a new buffer I/O scheme implementing a coherent
34 * VM object and buffer cache scheme.  Pains have been taken to make
35 * sure that the performance degradation associated with schemes such
36 * as this is not realized.
37 *
38 * Author:  John S. Dyson
39 * Significant help during the development and debugging phases
40 * had been provided by David Greenman, also of the FreeBSD core team.
41 *
42 * see man buf(9) for more info.
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/11/sys/kern/vfs_bio.c 367145 2020-10-29 22:00:15Z brooks $");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/bio.h>
51#include <sys/conf.h>
52#include <sys/buf.h>
53#include <sys/devicestat.h>
54#include <sys/eventhandler.h>
55#include <sys/fail.h>
56#include <sys/limits.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mount.h>
60#include <sys/mutex.h>
61#include <sys/kernel.h>
62#include <sys/kthread.h>
63#include <sys/proc.h>
64#include <sys/racct.h>
65#include <sys/resourcevar.h>
66#include <sys/rwlock.h>
67#include <sys/smp.h>
68#include <sys/sysctl.h>
69#include <sys/sysproto.h>
70#include <sys/vmem.h>
71#include <sys/vmmeter.h>
72#include <sys/vnode.h>
73#include <sys/watchdog.h>
74#include <geom/geom.h>
75#include <vm/vm.h>
76#include <vm/vm_param.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_object.h>
79#include <vm/vm_page.h>
80#include <vm/vm_pageout.h>
81#include <vm/vm_pager.h>
82#include <vm/vm_extern.h>
83#include <vm/vm_map.h>
84#include <vm/swap_pager.h>
85#include "opt_compat.h"
86#include "opt_swap.h"
87
88static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
89
90struct	bio_ops bioops;		/* I/O operation notification */
91
92struct	buf_ops buf_ops_bio = {
93	.bop_name	=	"buf_ops_bio",
94	.bop_write	=	bufwrite,
95	.bop_strategy	=	bufstrategy,
96	.bop_sync	=	bufsync,
97	.bop_bdflush	=	bufbdflush,
98};
99
100static struct buf *buf;		/* buffer header pool */
101extern struct buf *swbuf;	/* Swap buffer header pool. */
102caddr_t unmapped_buf;
103
104/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
105struct proc *bufdaemonproc;
106struct proc *bufspacedaemonproc;
107
108static int inmem(struct vnode *vp, daddr_t blkno);
109static void vm_hold_free_pages(struct buf *bp, int newbsize);
110static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
111		vm_offset_t to);
112static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
113static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
114		vm_page_t m);
115static void vfs_clean_pages_dirty_buf(struct buf *bp);
116static void vfs_setdirty_locked_object(struct buf *bp);
117static void vfs_vmio_invalidate(struct buf *bp);
118static void vfs_vmio_truncate(struct buf *bp, int npages);
119static void vfs_vmio_extend(struct buf *bp, int npages, int size);
120static int vfs_bio_clcheck(struct vnode *vp, int size,
121		daddr_t lblkno, daddr_t blkno);
122static int buf_flush(struct vnode *vp, int);
123static int buf_recycle(bool);
124static int buf_scan(bool);
125static int flushbufqueues(struct vnode *, int, int);
126static void buf_daemon(void);
127static void bremfreel(struct buf *bp);
128static __inline void bd_wakeup(void);
129static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
130static void bufkva_reclaim(vmem_t *, int);
131static void bufkva_free(struct buf *);
132static int buf_import(void *, void **, int, int);
133static void buf_release(void *, void **, int);
134static void maxbcachebuf_adjust(void);
135
136#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
137    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
138static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
139#endif
140
141int vmiodirenable = TRUE;
142SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
143    "Use the VM system for directory writes");
144long runningbufspace;
145SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
146    "Amount of presently outstanding async buffer io");
147static long bufspace;
148#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
149    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
150SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
151    &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
152#else
153SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
154    "Physical memory used for buffers");
155#endif
156static long bufkvaspace;
157SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0,
158    "Kernel virtual memory used for buffers");
159static long maxbufspace;
160SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, &maxbufspace, 0,
161    "Maximum allowed value of bufspace (including metadata)");
162static long bufmallocspace;
163SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
164    "Amount of malloced memory for buffers");
165static long maxbufmallocspace;
166SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
167    0, "Maximum amount of malloced memory for buffers");
168static long lobufspace;
169SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RW, &lobufspace, 0,
170    "Minimum amount of buffers we want to have");
171long hibufspace;
172SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RW, &hibufspace, 0,
173    "Maximum allowed value of bufspace (excluding metadata)");
174long bufspacethresh;
175SYSCTL_LONG(_vfs, OID_AUTO, bufspacethresh, CTLFLAG_RW, &bufspacethresh,
176    0, "Bufspace consumed before waking the daemon to free some");
177static int buffreekvacnt;
178SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
179    "Number of times we have freed the KVA space from some buffer");
180static int bufdefragcnt;
181SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
182    "Number of times we have had to repeat buffer allocation to defragment");
183static long lorunningspace;
184SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
185    CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
186    "Minimum preferred space used for in-progress I/O");
187static long hirunningspace;
188SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
189    CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
190    "Maximum amount of space to use for in-progress I/O");
191int dirtybufferflushes;
192SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
193    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
194int bdwriteskip;
195SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
196    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
197int altbufferflushes;
198SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
199    0, "Number of fsync flushes to limit dirty buffers");
200static int recursiveflushes;
201SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
202    0, "Number of flushes skipped due to being recursive");
203static int numdirtybuffers;
204SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
205    "Number of buffers that are dirty (has unwritten changes) at the moment");
206static int lodirtybuffers;
207SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
208    "How many buffers we want to have free before bufdaemon can sleep");
209static int hidirtybuffers;
210SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
211    "When the number of dirty buffers is considered severe");
212int dirtybufthresh;
213SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
214    0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
215static int numfreebuffers;
216SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
217    "Number of free buffers");
218static int lofreebuffers;
219SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
220   "Target number of free buffers");
221static int hifreebuffers;
222SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
223   "Threshold for clean buffer recycling");
224static int getnewbufcalls;
225SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
226   "Number of calls to getnewbuf");
227static int getnewbufrestarts;
228SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
229    "Number of times getnewbuf has had to restart a buffer acquisition");
230static int mappingrestarts;
231SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
232    "Number of times getblk has had to restart a buffer mapping for "
233    "unmapped buffer");
234static int numbufallocfails;
235SYSCTL_INT(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW, &numbufallocfails, 0,
236    "Number of times buffer allocations failed");
237static int flushbufqtarget = 100;
238SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
239    "Amount of work to do in flushbufqueues when helping bufdaemon");
240static long notbufdflushes;
241SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes, 0,
242    "Number of dirty buffer flushes done by the bufdaemon helpers");
243static long barrierwrites;
244SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
245    "Number of barrier writes");
246SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
247    &unmapped_buf_allowed, 0,
248    "Permit the use of the unmapped i/o");
249int maxbcachebuf = MAXBCACHEBUF;
250SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
251    "Maximum size of a buffer cache block");
252
253/*
254 * This lock synchronizes access to bd_request.
255 */
256static struct mtx_padalign __exclusive_cache_line bdlock;
257
258/*
259 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
260 * waitrunningbufspace().
261 */
262static struct mtx_padalign __exclusive_cache_line rbreqlock;
263
264/*
265 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
266 */
267static struct rwlock_padalign __exclusive_cache_line nblock;
268
269/*
270 * Lock that protects bdirtywait.
271 */
272static struct mtx_padalign __exclusive_cache_line bdirtylock;
273
274/*
275 * Wakeup point for bufdaemon, as well as indicator of whether it is already
276 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
277 * is idling.
278 */
279static int bd_request;
280
281/*
282 * Request/wakeup point for the bufspace daemon.
283 */
284static int bufspace_request;
285
286/*
287 * Request for the buf daemon to write more buffers than is indicated by
288 * lodirtybuf.  This may be necessary to push out excess dependencies or
289 * defragment the address space where a simple count of the number of dirty
290 * buffers is insufficient to characterize the demand for flushing them.
291 */
292static int bd_speedupreq;
293
294/*
295 * bogus page -- for I/O to/from partially complete buffers
296 * this is a temporary solution to the problem, but it is not
297 * really that bad.  it would be better to split the buffer
298 * for input in the case of buffers partially already in memory,
299 * but the code is intricate enough already.
300 */
301vm_page_t bogus_page;
302
303/*
304 * Synchronization (sleep/wakeup) variable for active buffer space requests.
305 * Set when wait starts, cleared prior to wakeup().
306 * Used in runningbufwakeup() and waitrunningbufspace().
307 */
308static int runningbufreq;
309
310/*
311 * Synchronization (sleep/wakeup) variable for buffer requests.
312 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
313 * by and/or.
314 * Used in numdirtywakeup(), bufspace_wakeup(), bwillwrite(),
315 * getnewbuf(), and getblk().
316 */
317static volatile int needsbuffer;
318
319/*
320 * Synchronization for bwillwrite() waiters.
321 */
322static int bdirtywait;
323
324/*
325 * Definitions for the buffer free lists.
326 */
327#define QUEUE_NONE	0	/* on no queue */
328#define QUEUE_EMPTY	1	/* empty buffer headers */
329#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
330#define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
331#define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
332
333/* Maximum number of clean buffer queues. */
334#define	CLEAN_QUEUES	16
335
336/* Configured number of clean queues. */
337static int clean_queues;
338
339/* Maximum number of buffer queues. */
340#define BUFFER_QUEUES	(QUEUE_CLEAN + CLEAN_QUEUES)
341
342/* Queues for free buffers with various properties */
343static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
344#ifdef INVARIANTS
345static int bq_len[BUFFER_QUEUES];
346#endif
347
348/*
349 * Lock for each bufqueue
350 */
351static struct mtx_padalign __exclusive_cache_line bqlocks[BUFFER_QUEUES];
352
353/*
354 * per-cpu empty buffer cache.
355 */
356uma_zone_t buf_zone;
357
358/*
359 * Single global constant for BUF_WMESG, to avoid getting multiple references.
360 * buf_wmesg is referred from macros.
361 */
362const char *buf_wmesg = BUF_WMESG;
363
364static int
365sysctl_runningspace(SYSCTL_HANDLER_ARGS)
366{
367	long value;
368	int error;
369
370	value = *(long *)arg1;
371	error = sysctl_handle_long(oidp, &value, 0, req);
372	if (error != 0 || req->newptr == NULL)
373		return (error);
374	mtx_lock(&rbreqlock);
375	if (arg1 == &hirunningspace) {
376		if (value < lorunningspace)
377			error = EINVAL;
378		else
379			hirunningspace = value;
380	} else {
381		KASSERT(arg1 == &lorunningspace,
382		    ("%s: unknown arg1", __func__));
383		if (value > hirunningspace)
384			error = EINVAL;
385		else
386			lorunningspace = value;
387	}
388	mtx_unlock(&rbreqlock);
389	return (error);
390}
391
392#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
393    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
394static int
395sysctl_bufspace(SYSCTL_HANDLER_ARGS)
396{
397	long lvalue;
398	int ivalue;
399
400	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
401		return (sysctl_handle_long(oidp, arg1, arg2, req));
402	lvalue = *(long *)arg1;
403	if (lvalue > INT_MAX)
404		/* On overflow, still write out a long to trigger ENOMEM. */
405		return (sysctl_handle_long(oidp, &lvalue, 0, req));
406	ivalue = lvalue;
407	return (sysctl_handle_int(oidp, &ivalue, 0, req));
408}
409#endif
410
411static int
412bqcleanq(void)
413{
414	static int nextq;
415
416	return ((atomic_fetchadd_int(&nextq, 1) % clean_queues) + QUEUE_CLEAN);
417}
418
419static int
420bqisclean(int qindex)
421{
422
423	return (qindex >= QUEUE_CLEAN && qindex < QUEUE_CLEAN + CLEAN_QUEUES);
424}
425
426/*
427 *	bqlock:
428 *
429 *	Return the appropriate queue lock based on the index.
430 */
431static inline struct mtx *
432bqlock(int qindex)
433{
434
435	return (struct mtx *)&bqlocks[qindex];
436}
437
438/*
439 *	bdirtywakeup:
440 *
441 *	Wakeup any bwillwrite() waiters.
442 */
443static void
444bdirtywakeup(void)
445{
446	mtx_lock(&bdirtylock);
447	if (bdirtywait) {
448		bdirtywait = 0;
449		wakeup(&bdirtywait);
450	}
451	mtx_unlock(&bdirtylock);
452}
453
454/*
455 *	bdirtysub:
456 *
457 *	Decrement the numdirtybuffers count by one and wakeup any
458 *	threads blocked in bwillwrite().
459 */
460static void
461bdirtysub(void)
462{
463
464	if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
465	    (lodirtybuffers + hidirtybuffers) / 2)
466		bdirtywakeup();
467}
468
469/*
470 *	bdirtyadd:
471 *
472 *	Increment the numdirtybuffers count by one and wakeup the buf
473 *	daemon if needed.
474 */
475static void
476bdirtyadd(void)
477{
478
479	/*
480	 * Only do the wakeup once as we cross the boundary.  The
481	 * buf daemon will keep running until the condition clears.
482	 */
483	if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
484	    (lodirtybuffers + hidirtybuffers) / 2)
485		bd_wakeup();
486}
487
488/*
489 *	bufspace_wakeup:
490 *
491 *	Called when buffer space is potentially available for recovery.
492 *	getnewbuf() will block on this flag when it is unable to free
493 *	sufficient buffer space.  Buffer space becomes recoverable when
494 *	bp's get placed back in the queues.
495 */
496static void
497bufspace_wakeup(void)
498{
499
500	/*
501	 * If someone is waiting for bufspace, wake them up.
502	 *
503	 * Since needsbuffer is set prior to doing an additional queue
504	 * scan it is safe to check for the flag prior to acquiring the
505	 * lock.  The thread that is preparing to scan again before
506	 * blocking would discover the buf we released.
507	 */
508	if (needsbuffer) {
509		rw_rlock(&nblock);
510		if (atomic_cmpset_int(&needsbuffer, 1, 0) == 1)
511			wakeup(__DEVOLATILE(void *, &needsbuffer));
512		rw_runlock(&nblock);
513	}
514}
515
516/*
517 *	bufspace_daemonwakeup:
518 *
519 *	Wakeup the daemon responsible for freeing clean bufs.
520 */
521static void
522bufspace_daemonwakeup(void)
523{
524	rw_rlock(&nblock);
525	if (bufspace_request == 0) {
526		bufspace_request = 1;
527		wakeup(&bufspace_request);
528	}
529	rw_runlock(&nblock);
530}
531
532/*
533 *	bufspace_adjust:
534 *
535 *	Adjust the reported bufspace for a KVA managed buffer, possibly
536 * 	waking any waiters.
537 */
538static void
539bufspace_adjust(struct buf *bp, int bufsize)
540{
541	long space;
542	int diff;
543
544	KASSERT((bp->b_flags & B_MALLOC) == 0,
545	    ("bufspace_adjust: malloc buf %p", bp));
546	diff = bufsize - bp->b_bufsize;
547	if (diff < 0) {
548		atomic_subtract_long(&bufspace, -diff);
549		bufspace_wakeup();
550	} else {
551		space = atomic_fetchadd_long(&bufspace, diff);
552		/* Wake up the daemon on the transition. */
553		if (space < bufspacethresh && space + diff >= bufspacethresh)
554			bufspace_daemonwakeup();
555	}
556	bp->b_bufsize = bufsize;
557}
558
559/*
560 *	bufspace_reserve:
561 *
562 *	Reserve bufspace before calling allocbuf().  metadata has a
563 *	different space limit than data.
564 */
565static int
566bufspace_reserve(int size, bool metadata)
567{
568	long limit;
569	long space;
570
571	if (metadata)
572		limit = maxbufspace;
573	else
574		limit = hibufspace;
575	do {
576		space = bufspace;
577		if (space + size > limit)
578			return (ENOSPC);
579	} while (atomic_cmpset_long(&bufspace, space, space + size) == 0);
580
581	/* Wake up the daemon on the transition. */
582	if (space < bufspacethresh && space + size >= bufspacethresh)
583		bufspace_daemonwakeup();
584
585	return (0);
586}
587
588/*
589 *	bufspace_release:
590 *
591 *	Release reserved bufspace after bufspace_adjust() has consumed it.
592 */
593static void
594bufspace_release(int size)
595{
596	atomic_subtract_long(&bufspace, size);
597	bufspace_wakeup();
598}
599
600/*
601 *	bufspace_wait:
602 *
603 *	Wait for bufspace, acting as the buf daemon if a locked vnode is
604 *	supplied.  needsbuffer must be set in a safe fashion prior to
605 *	polling for space.  The operation must be re-tried on return.
606 */
607static void
608bufspace_wait(struct vnode *vp, int gbflags, int slpflag, int slptimeo)
609{
610	struct thread *td;
611	int error, fl, norunbuf;
612
613	if ((gbflags & GB_NOWAIT_BD) != 0)
614		return;
615
616	td = curthread;
617	rw_wlock(&nblock);
618	while (needsbuffer != 0) {
619		if (vp != NULL && vp->v_type != VCHR &&
620		    (td->td_pflags & TDP_BUFNEED) == 0) {
621			rw_wunlock(&nblock);
622			/*
623			 * getblk() is called with a vnode locked, and
624			 * some majority of the dirty buffers may as
625			 * well belong to the vnode.  Flushing the
626			 * buffers there would make a progress that
627			 * cannot be achieved by the buf_daemon, that
628			 * cannot lock the vnode.
629			 */
630			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
631			    (td->td_pflags & TDP_NORUNNINGBUF);
632
633			/*
634			 * Play bufdaemon.  The getnewbuf() function
635			 * may be called while the thread owns lock
636			 * for another dirty buffer for the same
637			 * vnode, which makes it impossible to use
638			 * VOP_FSYNC() there, due to the buffer lock
639			 * recursion.
640			 */
641			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
642			fl = buf_flush(vp, flushbufqtarget);
643			td->td_pflags &= norunbuf;
644			rw_wlock(&nblock);
645			if (fl != 0)
646				continue;
647			if (needsbuffer == 0)
648				break;
649		}
650		error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
651		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
652		if (error != 0)
653			break;
654	}
655	rw_wunlock(&nblock);
656}
657
658
659/*
660 *	bufspace_daemon:
661 *
662 *	buffer space management daemon.  Tries to maintain some marginal
663 *	amount of free buffer space so that requesting processes neither
664 *	block nor work to reclaim buffers.
665 */
666static void
667bufspace_daemon(void)
668{
669	for (;;) {
670		kproc_suspend_check(bufspacedaemonproc);
671
672		/*
673		 * Free buffers from the clean queue until we meet our
674		 * targets.
675		 *
676		 * Theory of operation:  The buffer cache is most efficient
677		 * when some free buffer headers and space are always
678		 * available to getnewbuf().  This daemon attempts to prevent
679		 * the excessive blocking and synchronization associated
680		 * with shortfall.  It goes through three phases according
681		 * demand:
682		 *
683		 * 1)	The daemon wakes up voluntarily once per-second
684		 *	during idle periods when the counters are below
685		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
686		 *
687		 * 2)	The daemon wakes up as we cross the thresholds
688		 *	ahead of any potential blocking.  This may bounce
689		 *	slightly according to the rate of consumption and
690		 *	release.
691		 *
692		 * 3)	The daemon and consumers are starved for working
693		 *	clean buffers.  This is the 'bufspace' sleep below
694		 *	which will inefficiently trade bufs with bqrelse
695		 *	until we return to condition 2.
696		 */
697		while (bufspace > lobufspace ||
698		    numfreebuffers < hifreebuffers) {
699			if (buf_recycle(false) != 0) {
700				atomic_set_int(&needsbuffer, 1);
701				if (buf_recycle(false) != 0) {
702					rw_wlock(&nblock);
703					if (needsbuffer)
704						rw_sleep(__DEVOLATILE(void *,
705						    &needsbuffer), &nblock,
706						    PRIBIO|PDROP, "bufspace",
707						    hz/10);
708					else
709						rw_wunlock(&nblock);
710				}
711			}
712			maybe_yield();
713		}
714
715		/*
716		 * Re-check our limits under the exclusive nblock.
717		 */
718		rw_wlock(&nblock);
719		if (bufspace < bufspacethresh &&
720		    numfreebuffers > lofreebuffers) {
721			bufspace_request = 0;
722			rw_sleep(&bufspace_request, &nblock, PRIBIO|PDROP,
723			    "-", hz);
724		} else
725			rw_wunlock(&nblock);
726	}
727}
728
729static struct kproc_desc bufspace_kp = {
730	"bufspacedaemon",
731	bufspace_daemon,
732	&bufspacedaemonproc
733};
734SYSINIT(bufspacedaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start,
735    &bufspace_kp);
736
737/*
738 *	bufmallocadjust:
739 *
740 *	Adjust the reported bufspace for a malloc managed buffer, possibly
741 *	waking any waiters.
742 */
743static void
744bufmallocadjust(struct buf *bp, int bufsize)
745{
746	int diff;
747
748	KASSERT((bp->b_flags & B_MALLOC) != 0,
749	    ("bufmallocadjust: non-malloc buf %p", bp));
750	diff = bufsize - bp->b_bufsize;
751	if (diff < 0)
752		atomic_subtract_long(&bufmallocspace, -diff);
753	else
754		atomic_add_long(&bufmallocspace, diff);
755	bp->b_bufsize = bufsize;
756}
757
758/*
759 *	runningwakeup:
760 *
761 *	Wake up processes that are waiting on asynchronous writes to fall
762 *	below lorunningspace.
763 */
764static void
765runningwakeup(void)
766{
767
768	mtx_lock(&rbreqlock);
769	if (runningbufreq) {
770		runningbufreq = 0;
771		wakeup(&runningbufreq);
772	}
773	mtx_unlock(&rbreqlock);
774}
775
776/*
777 *	runningbufwakeup:
778 *
779 *	Decrement the outstanding write count according.
780 */
781void
782runningbufwakeup(struct buf *bp)
783{
784	long space, bspace;
785
786	bspace = bp->b_runningbufspace;
787	if (bspace == 0)
788		return;
789	space = atomic_fetchadd_long(&runningbufspace, -bspace);
790	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
791	    space, bspace));
792	bp->b_runningbufspace = 0;
793	/*
794	 * Only acquire the lock and wakeup on the transition from exceeding
795	 * the threshold to falling below it.
796	 */
797	if (space < lorunningspace)
798		return;
799	if (space - bspace > lorunningspace)
800		return;
801	runningwakeup();
802}
803
804/*
805 *	waitrunningbufspace()
806 *
807 *	runningbufspace is a measure of the amount of I/O currently
808 *	running.  This routine is used in async-write situations to
809 *	prevent creating huge backups of pending writes to a device.
810 *	Only asynchronous writes are governed by this function.
811 *
812 *	This does NOT turn an async write into a sync write.  It waits
813 *	for earlier writes to complete and generally returns before the
814 *	caller's write has reached the device.
815 */
816void
817waitrunningbufspace(void)
818{
819
820	mtx_lock(&rbreqlock);
821	while (runningbufspace > hirunningspace) {
822		runningbufreq = 1;
823		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
824	}
825	mtx_unlock(&rbreqlock);
826}
827
828
829/*
830 *	vfs_buf_test_cache:
831 *
832 *	Called when a buffer is extended.  This function clears the B_CACHE
833 *	bit if the newly extended portion of the buffer does not contain
834 *	valid data.
835 */
836static __inline void
837vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
838    vm_offset_t size, vm_page_t m)
839{
840
841	VM_OBJECT_ASSERT_LOCKED(m->object);
842	if (bp->b_flags & B_CACHE) {
843		int base = (foff + off) & PAGE_MASK;
844		if (vm_page_is_valid(m, base, size) == 0)
845			bp->b_flags &= ~B_CACHE;
846	}
847}
848
849/* Wake up the buffer daemon if necessary */
850static __inline void
851bd_wakeup(void)
852{
853
854	mtx_lock(&bdlock);
855	if (bd_request == 0) {
856		bd_request = 1;
857		wakeup(&bd_request);
858	}
859	mtx_unlock(&bdlock);
860}
861
862/*
863 * Adjust the maxbcachbuf tunable.
864 */
865static void
866maxbcachebuf_adjust(void)
867{
868	int i;
869
870	/*
871	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
872	 */
873	i = 2;
874	while (i * 2 <= maxbcachebuf)
875		i *= 2;
876	maxbcachebuf = i;
877	if (maxbcachebuf < MAXBSIZE)
878		maxbcachebuf = MAXBSIZE;
879	if (maxbcachebuf > MAXPHYS)
880		maxbcachebuf = MAXPHYS;
881	if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
882		printf("maxbcachebuf=%d\n", maxbcachebuf);
883}
884
885/*
886 * bd_speedup - speedup the buffer cache flushing code
887 */
888void
889bd_speedup(void)
890{
891	int needwake;
892
893	mtx_lock(&bdlock);
894	needwake = 0;
895	if (bd_speedupreq == 0 || bd_request == 0)
896		needwake = 1;
897	bd_speedupreq = 1;
898	bd_request = 1;
899	if (needwake)
900		wakeup(&bd_request);
901	mtx_unlock(&bdlock);
902}
903
904#ifndef NSWBUF_MIN
905#define	NSWBUF_MIN	16
906#endif
907
908#ifdef __i386__
909#define	TRANSIENT_DENOM	5
910#else
911#define	TRANSIENT_DENOM 10
912#endif
913
914/*
915 * Calculating buffer cache scaling values and reserve space for buffer
916 * headers.  This is called during low level kernel initialization and
917 * may be called more then once.  We CANNOT write to the memory area
918 * being reserved at this time.
919 */
920caddr_t
921kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
922{
923	int tuned_nbuf;
924	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
925
926	/*
927	 * physmem_est is in pages.  Convert it to kilobytes (assumes
928	 * PAGE_SIZE is >= 1K)
929	 */
930	physmem_est = physmem_est * (PAGE_SIZE / 1024);
931
932	maxbcachebuf_adjust();
933	/*
934	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
935	 * For the first 64MB of ram nominally allocate sufficient buffers to
936	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
937	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
938	 * the buffer cache we limit the eventual kva reservation to
939	 * maxbcache bytes.
940	 *
941	 * factor represents the 1/4 x ram conversion.
942	 */
943	if (nbuf == 0) {
944		int factor = 4 * BKVASIZE / 1024;
945
946		nbuf = 50;
947		if (physmem_est > 4096)
948			nbuf += min((physmem_est - 4096) / factor,
949			    65536 / factor);
950		if (physmem_est > 65536)
951			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
952			    32 * 1024 * 1024 / (factor * 5));
953
954		if (maxbcache && nbuf > maxbcache / BKVASIZE)
955			nbuf = maxbcache / BKVASIZE;
956		tuned_nbuf = 1;
957	} else
958		tuned_nbuf = 0;
959
960	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
961	maxbuf = (LONG_MAX / 3) / BKVASIZE;
962	if (nbuf > maxbuf) {
963		if (!tuned_nbuf)
964			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
965			    maxbuf);
966		nbuf = maxbuf;
967	}
968
969	/*
970	 * Ideal allocation size for the transient bio submap is 10%
971	 * of the maximal space buffer map.  This roughly corresponds
972	 * to the amount of the buffer mapped for typical UFS load.
973	 *
974	 * Clip the buffer map to reserve space for the transient
975	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
976	 * maximum buffer map extent on the platform.
977	 *
978	 * The fall-back to the maxbuf in case of maxbcache unset,
979	 * allows to not trim the buffer KVA for the architectures
980	 * with ample KVA space.
981	 */
982	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
983		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
984		buf_sz = (long)nbuf * BKVASIZE;
985		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
986		    (TRANSIENT_DENOM - 1)) {
987			/*
988			 * There is more KVA than memory.  Do not
989			 * adjust buffer map size, and assign the rest
990			 * of maxbuf to transient map.
991			 */
992			biotmap_sz = maxbuf_sz - buf_sz;
993		} else {
994			/*
995			 * Buffer map spans all KVA we could afford on
996			 * this platform.  Give 10% (20% on i386) of
997			 * the buffer map to the transient bio map.
998			 */
999			biotmap_sz = buf_sz / TRANSIENT_DENOM;
1000			buf_sz -= biotmap_sz;
1001		}
1002		if (biotmap_sz / INT_MAX > MAXPHYS)
1003			bio_transient_maxcnt = INT_MAX;
1004		else
1005			bio_transient_maxcnt = biotmap_sz / MAXPHYS;
1006		/*
1007		 * Artificially limit to 1024 simultaneous in-flight I/Os
1008		 * using the transient mapping.
1009		 */
1010		if (bio_transient_maxcnt > 1024)
1011			bio_transient_maxcnt = 1024;
1012		if (tuned_nbuf)
1013			nbuf = buf_sz / BKVASIZE;
1014	}
1015
1016	/*
1017	 * swbufs are used as temporary holders for I/O, such as paging I/O.
1018	 * We have no less then 16 and no more then 256.
1019	 */
1020	nswbuf = min(nbuf / 4, 256);
1021	TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
1022	if (nswbuf < NSWBUF_MIN)
1023		nswbuf = NSWBUF_MIN;
1024
1025	/*
1026	 * Reserve space for the buffer cache buffers
1027	 */
1028	swbuf = (void *)v;
1029	v = (caddr_t)(swbuf + nswbuf);
1030	buf = (void *)v;
1031	v = (caddr_t)(buf + nbuf);
1032
1033	return(v);
1034}
1035
1036/* Initialize the buffer subsystem.  Called before use of any buffers. */
1037void
1038bufinit(void)
1039{
1040	struct buf *bp;
1041	int i;
1042
1043	KASSERT(maxbcachebuf >= MAXBSIZE,
1044	    ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1045	    MAXBSIZE));
1046	mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF);
1047	mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF);
1048	for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++)
1049		mtx_init(&bqlocks[i], "bufq clean lock", NULL, MTX_DEF);
1050	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1051	rw_init(&nblock, "needsbuffer lock");
1052	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1053	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1054
1055	/* next, make a null set of free lists */
1056	for (i = 0; i < BUFFER_QUEUES; i++)
1057		TAILQ_INIT(&bufqueues[i]);
1058
1059	unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
1060
1061	/* finally, initialize each buffer header and stick on empty q */
1062	for (i = 0; i < nbuf; i++) {
1063		bp = &buf[i];
1064		bzero(bp, sizeof *bp);
1065		bp->b_flags = B_INVAL;
1066		bp->b_rcred = NOCRED;
1067		bp->b_wcred = NOCRED;
1068		bp->b_qindex = QUEUE_EMPTY;
1069		bp->b_xflags = 0;
1070		bp->b_data = bp->b_kvabase = unmapped_buf;
1071		LIST_INIT(&bp->b_dep);
1072		BUF_LOCKINIT(bp);
1073		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
1074#ifdef INVARIANTS
1075		bq_len[QUEUE_EMPTY]++;
1076#endif
1077	}
1078
1079	/*
1080	 * maxbufspace is the absolute maximum amount of buffer space we are
1081	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1082	 * is nominally used by metadata.  hibufspace is the nominal maximum
1083	 * used by most other requests.  The differential is required to
1084	 * ensure that metadata deadlocks don't occur.
1085	 *
1086	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1087	 * this may result in KVM fragmentation which is not handled optimally
1088	 * by the system. XXX This is less true with vmem.  We could use
1089	 * PAGE_SIZE.
1090	 */
1091	maxbufspace = (long)nbuf * BKVASIZE;
1092	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1093	lobufspace = (hibufspace / 20) * 19; /* 95% */
1094	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1095
1096	/*
1097	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1098	 * arbitrarily and may need further tuning. It corresponds to
1099	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1100	 * which fits with many RAID controllers' tagged queuing limits.
1101	 * The lower 1 MiB limit is the historical upper limit for
1102	 * hirunningspace.
1103	 */
1104	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1105	    16 * 1024 * 1024), 1024 * 1024);
1106	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1107
1108	/*
1109	 * Limit the amount of malloc memory since it is wired permanently into
1110	 * the kernel space.  Even though this is accounted for in the buffer
1111	 * allocation, we don't want the malloced region to grow uncontrolled.
1112	 * The malloc scheme improves memory utilization significantly on
1113	 * average (small) directories.
1114	 */
1115	maxbufmallocspace = hibufspace / 20;
1116
1117	/*
1118	 * Reduce the chance of a deadlock occurring by limiting the number
1119	 * of delayed-write dirty buffers we allow to stack up.
1120	 */
1121	hidirtybuffers = nbuf / 4 + 20;
1122	dirtybufthresh = hidirtybuffers * 9 / 10;
1123	numdirtybuffers = 0;
1124	/*
1125	 * To support extreme low-memory systems, make sure hidirtybuffers
1126	 * cannot eat up all available buffer space.  This occurs when our
1127	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1128	 * buffer space assuming BKVASIZE'd buffers.
1129	 */
1130	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1131		hidirtybuffers >>= 1;
1132	}
1133	lodirtybuffers = hidirtybuffers / 2;
1134
1135	/*
1136	 * lofreebuffers should be sufficient to avoid stalling waiting on
1137	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1138	 * are counted as free but will be unavailable to threads executing
1139	 * on other cpus.
1140	 *
1141	 * hifreebuffers is the free target for the bufspace daemon.  This
1142	 * should be set appropriately to limit work per-iteration.
1143	 */
1144	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1145	hifreebuffers = (3 * lofreebuffers) / 2;
1146	numfreebuffers = nbuf;
1147
1148	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
1149	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
1150
1151	/* Setup the kva and free list allocators. */
1152	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1153	buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
1154	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1155
1156	/*
1157	 * Size the clean queue according to the amount of buffer space.
1158	 * One queue per-256mb up to the max.  More queues gives better
1159	 * concurrency but less accurate LRU.
1160	 */
1161	clean_queues = MIN(howmany(maxbufspace, 256*1024*1024), CLEAN_QUEUES);
1162
1163}
1164
1165#ifdef INVARIANTS
1166static inline void
1167vfs_buf_check_mapped(struct buf *bp)
1168{
1169
1170	KASSERT(bp->b_kvabase != unmapped_buf,
1171	    ("mapped buf: b_kvabase was not updated %p", bp));
1172	KASSERT(bp->b_data != unmapped_buf,
1173	    ("mapped buf: b_data was not updated %p", bp));
1174	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1175	    MAXPHYS, ("b_data + b_offset unmapped %p", bp));
1176}
1177
1178static inline void
1179vfs_buf_check_unmapped(struct buf *bp)
1180{
1181
1182	KASSERT(bp->b_data == unmapped_buf,
1183	    ("unmapped buf: corrupted b_data %p", bp));
1184}
1185
1186#define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1187#define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1188#else
1189#define	BUF_CHECK_MAPPED(bp) do {} while (0)
1190#define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1191#endif
1192
1193static int
1194isbufbusy(struct buf *bp)
1195{
1196	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1197	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1198		return (1);
1199	return (0);
1200}
1201
1202/*
1203 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1204 */
1205void
1206bufshutdown(int show_busybufs)
1207{
1208	static int first_buf_printf = 1;
1209	struct buf *bp;
1210	int iter, nbusy, pbusy;
1211#ifndef PREEMPTION
1212	int subiter;
1213#endif
1214
1215	/*
1216	 * Sync filesystems for shutdown
1217	 */
1218	wdog_kern_pat(WD_LASTVAL);
1219	sys_sync(curthread, NULL);
1220
1221	/*
1222	 * With soft updates, some buffers that are
1223	 * written will be remarked as dirty until other
1224	 * buffers are written.
1225	 */
1226	for (iter = pbusy = 0; iter < 20; iter++) {
1227		nbusy = 0;
1228		for (bp = &buf[nbuf]; --bp >= buf; )
1229			if (isbufbusy(bp))
1230				nbusy++;
1231		if (nbusy == 0) {
1232			if (first_buf_printf)
1233				printf("All buffers synced.");
1234			break;
1235		}
1236		if (first_buf_printf) {
1237			printf("Syncing disks, buffers remaining... ");
1238			first_buf_printf = 0;
1239		}
1240		printf("%d ", nbusy);
1241		if (nbusy < pbusy)
1242			iter = 0;
1243		pbusy = nbusy;
1244
1245		wdog_kern_pat(WD_LASTVAL);
1246		sys_sync(curthread, NULL);
1247
1248#ifdef PREEMPTION
1249		/*
1250		 * Drop Giant and spin for a while to allow
1251		 * interrupt threads to run.
1252		 */
1253		DROP_GIANT();
1254		DELAY(50000 * iter);
1255		PICKUP_GIANT();
1256#else
1257		/*
1258		 * Drop Giant and context switch several times to
1259		 * allow interrupt threads to run.
1260		 */
1261		DROP_GIANT();
1262		for (subiter = 0; subiter < 50 * iter; subiter++) {
1263			thread_lock(curthread);
1264			mi_switch(SW_VOL, NULL);
1265			thread_unlock(curthread);
1266			DELAY(1000);
1267		}
1268		PICKUP_GIANT();
1269#endif
1270	}
1271	printf("\n");
1272	/*
1273	 * Count only busy local buffers to prevent forcing
1274	 * a fsck if we're just a client of a wedged NFS server
1275	 */
1276	nbusy = 0;
1277	for (bp = &buf[nbuf]; --bp >= buf; ) {
1278		if (isbufbusy(bp)) {
1279#if 0
1280/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1281			if (bp->b_dev == NULL) {
1282				TAILQ_REMOVE(&mountlist,
1283				    bp->b_vp->v_mount, mnt_list);
1284				continue;
1285			}
1286#endif
1287			nbusy++;
1288			if (show_busybufs > 0) {
1289				printf(
1290	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1291				    nbusy, bp, bp->b_vp, bp->b_flags,
1292				    (intmax_t)bp->b_blkno,
1293				    (intmax_t)bp->b_lblkno);
1294				BUF_LOCKPRINTINFO(bp);
1295				if (show_busybufs > 1)
1296					vn_printf(bp->b_vp,
1297					    "vnode content: ");
1298			}
1299		}
1300	}
1301	if (nbusy) {
1302		/*
1303		 * Failed to sync all blocks. Indicate this and don't
1304		 * unmount filesystems (thus forcing an fsck on reboot).
1305		 */
1306		printf("Giving up on %d buffers\n", nbusy);
1307		DELAY(5000000);	/* 5 seconds */
1308	} else {
1309		if (!first_buf_printf)
1310			printf("Final sync complete\n");
1311		/*
1312		 * Unmount filesystems
1313		 */
1314		if (panicstr == NULL)
1315			vfs_unmountall();
1316	}
1317	swapoff_all();
1318	DELAY(100000);		/* wait for console output to finish */
1319}
1320
1321static void
1322bpmap_qenter(struct buf *bp)
1323{
1324
1325	BUF_CHECK_MAPPED(bp);
1326
1327	/*
1328	 * bp->b_data is relative to bp->b_offset, but
1329	 * bp->b_offset may be offset into the first page.
1330	 */
1331	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1332	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1333	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1334	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1335}
1336
1337/*
1338 *	binsfree:
1339 *
1340 *	Insert the buffer into the appropriate free list.
1341 */
1342static void
1343binsfree(struct buf *bp, int qindex)
1344{
1345	struct mtx *olock, *nlock;
1346
1347	if (qindex != QUEUE_EMPTY) {
1348		BUF_ASSERT_XLOCKED(bp);
1349	}
1350
1351	/*
1352	 * Stick to the same clean queue for the lifetime of the buf to
1353	 * limit locking below.  Otherwise pick ont sequentially.
1354	 */
1355	if (qindex == QUEUE_CLEAN) {
1356		if (bqisclean(bp->b_qindex))
1357			qindex = bp->b_qindex;
1358		else
1359			qindex = bqcleanq();
1360	}
1361
1362	/*
1363	 * Handle delayed bremfree() processing.
1364	 */
1365	nlock = bqlock(qindex);
1366	if (bp->b_flags & B_REMFREE) {
1367		olock = bqlock(bp->b_qindex);
1368		mtx_lock(olock);
1369		bremfreel(bp);
1370		if (olock != nlock) {
1371			mtx_unlock(olock);
1372			mtx_lock(nlock);
1373		}
1374	} else
1375		mtx_lock(nlock);
1376
1377	if (bp->b_qindex != QUEUE_NONE)
1378		panic("binsfree: free buffer onto another queue???");
1379
1380	bp->b_qindex = qindex;
1381	if (bp->b_flags & B_AGE)
1382		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1383	else
1384		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1385#ifdef INVARIANTS
1386	bq_len[bp->b_qindex]++;
1387#endif
1388	mtx_unlock(nlock);
1389}
1390
1391/*
1392 * buf_free:
1393 *
1394 *	Free a buffer to the buf zone once it no longer has valid contents.
1395 */
1396static void
1397buf_free(struct buf *bp)
1398{
1399
1400	if (bp->b_flags & B_REMFREE)
1401		bremfreef(bp);
1402	if (bp->b_vflags & BV_BKGRDINPROG)
1403		panic("losing buffer 1");
1404	if (bp->b_rcred != NOCRED) {
1405		crfree(bp->b_rcred);
1406		bp->b_rcred = NOCRED;
1407	}
1408	if (bp->b_wcred != NOCRED) {
1409		crfree(bp->b_wcred);
1410		bp->b_wcred = NOCRED;
1411	}
1412	if (!LIST_EMPTY(&bp->b_dep))
1413		buf_deallocate(bp);
1414	bufkva_free(bp);
1415	BUF_UNLOCK(bp);
1416	uma_zfree(buf_zone, bp);
1417	atomic_add_int(&numfreebuffers, 1);
1418	bufspace_wakeup();
1419}
1420
1421/*
1422 * buf_import:
1423 *
1424 *	Import bufs into the uma cache from the buf list.  The system still
1425 *	expects a static array of bufs and much of the synchronization
1426 *	around bufs assumes type stable storage.  As a result, UMA is used
1427 *	only as a per-cpu cache of bufs still maintained on a global list.
1428 */
1429static int
1430buf_import(void *arg, void **store, int cnt, int flags)
1431{
1432	struct buf *bp;
1433	int i;
1434
1435	mtx_lock(&bqlocks[QUEUE_EMPTY]);
1436	for (i = 0; i < cnt; i++) {
1437		bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1438		if (bp == NULL)
1439			break;
1440		bremfreel(bp);
1441		store[i] = bp;
1442	}
1443	mtx_unlock(&bqlocks[QUEUE_EMPTY]);
1444
1445	return (i);
1446}
1447
1448/*
1449 * buf_release:
1450 *
1451 *	Release bufs from the uma cache back to the buffer queues.
1452 */
1453static void
1454buf_release(void *arg, void **store, int cnt)
1455{
1456        int i;
1457
1458        for (i = 0; i < cnt; i++)
1459		binsfree(store[i], QUEUE_EMPTY);
1460}
1461
1462/*
1463 * buf_alloc:
1464 *
1465 *	Allocate an empty buffer header.
1466 */
1467static struct buf *
1468buf_alloc(void)
1469{
1470	struct buf *bp;
1471
1472	bp = uma_zalloc(buf_zone, M_NOWAIT);
1473	if (bp == NULL) {
1474		bufspace_daemonwakeup();
1475		atomic_add_int(&numbufallocfails, 1);
1476		return (NULL);
1477	}
1478
1479	/*
1480	 * Wake-up the bufspace daemon on transition.
1481	 */
1482	if (atomic_fetchadd_int(&numfreebuffers, -1) == lofreebuffers)
1483		bufspace_daemonwakeup();
1484
1485	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1486		panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
1487
1488	KASSERT(bp->b_vp == NULL,
1489	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1490	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1491	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1492	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1493	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1494	KASSERT(bp->b_npages == 0,
1495	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1496	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1497	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1498
1499	bp->b_flags = 0;
1500	bp->b_ioflags = 0;
1501	bp->b_xflags = 0;
1502	bp->b_vflags = 0;
1503	bp->b_vp = NULL;
1504	bp->b_blkno = bp->b_lblkno = 0;
1505	bp->b_offset = NOOFFSET;
1506	bp->b_iodone = 0;
1507	bp->b_error = 0;
1508	bp->b_resid = 0;
1509	bp->b_bcount = 0;
1510	bp->b_npages = 0;
1511	bp->b_dirtyoff = bp->b_dirtyend = 0;
1512	bp->b_bufobj = NULL;
1513	bp->b_pin_count = 0;
1514	bp->b_data = bp->b_kvabase = unmapped_buf;
1515	bp->b_fsprivate1 = NULL;
1516	bp->b_fsprivate2 = NULL;
1517	bp->b_fsprivate3 = NULL;
1518	LIST_INIT(&bp->b_dep);
1519
1520	return (bp);
1521}
1522
1523/*
1524 *	buf_qrecycle:
1525 *
1526 *	Free a buffer from the given bufqueue.  kva controls whether the
1527 *	freed buf must own some kva resources.  This is used for
1528 *	defragmenting.
1529 */
1530static int
1531buf_qrecycle(int qindex, bool kva)
1532{
1533	struct buf *bp, *nbp;
1534
1535	if (kva)
1536		atomic_add_int(&bufdefragcnt, 1);
1537	nbp = NULL;
1538	mtx_lock(&bqlocks[qindex]);
1539	nbp = TAILQ_FIRST(&bufqueues[qindex]);
1540
1541	/*
1542	 * Run scan, possibly freeing data and/or kva mappings on the fly
1543	 * depending.
1544	 */
1545	while ((bp = nbp) != NULL) {
1546		/*
1547		 * Calculate next bp (we can only use it if we do not
1548		 * release the bqlock).
1549		 */
1550		nbp = TAILQ_NEXT(bp, b_freelist);
1551
1552		/*
1553		 * If we are defragging then we need a buffer with
1554		 * some kva to reclaim.
1555		 */
1556		if (kva && bp->b_kvasize == 0)
1557			continue;
1558
1559		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1560			continue;
1561
1562		/*
1563		 * Skip buffers with background writes in progress.
1564		 */
1565		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1566			BUF_UNLOCK(bp);
1567			continue;
1568		}
1569
1570		KASSERT(bp->b_qindex == qindex,
1571		    ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
1572		/*
1573		 * NOTE:  nbp is now entirely invalid.  We can only restart
1574		 * the scan from this point on.
1575		 */
1576		bremfreel(bp);
1577		mtx_unlock(&bqlocks[qindex]);
1578
1579		/*
1580		 * Requeue the background write buffer with error and
1581		 * restart the scan.
1582		 */
1583		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1584			bqrelse(bp);
1585			mtx_lock(&bqlocks[qindex]);
1586			nbp = TAILQ_FIRST(&bufqueues[qindex]);
1587			continue;
1588		}
1589		bp->b_flags |= B_INVAL;
1590		brelse(bp);
1591		return (0);
1592	}
1593	mtx_unlock(&bqlocks[qindex]);
1594
1595	return (ENOBUFS);
1596}
1597
1598/*
1599 *	buf_recycle:
1600 *
1601 *	Iterate through all clean queues until we find a buf to recycle or
1602 *	exhaust the search.
1603 */
1604static int
1605buf_recycle(bool kva)
1606{
1607	int qindex, first_qindex;
1608
1609	qindex = first_qindex = bqcleanq();
1610	do {
1611		if (buf_qrecycle(qindex, kva) == 0)
1612			return (0);
1613		if (++qindex == QUEUE_CLEAN + clean_queues)
1614			qindex = QUEUE_CLEAN;
1615	} while (qindex != first_qindex);
1616
1617	return (ENOBUFS);
1618}
1619
1620/*
1621 *	buf_scan:
1622 *
1623 *	Scan the clean queues looking for a buffer to recycle.  needsbuffer
1624 *	is set on failure so that the caller may optionally bufspace_wait()
1625 *	in a race-free fashion.
1626 */
1627static int
1628buf_scan(bool defrag)
1629{
1630	int error;
1631
1632	/*
1633	 * To avoid heavy synchronization and wakeup races we set
1634	 * needsbuffer and re-poll before failing.  This ensures that
1635	 * no frees can be missed between an unsuccessful poll and
1636	 * going to sleep in a synchronized fashion.
1637	 */
1638	if ((error = buf_recycle(defrag)) != 0) {
1639		atomic_set_int(&needsbuffer, 1);
1640		bufspace_daemonwakeup();
1641		error = buf_recycle(defrag);
1642	}
1643	if (error == 0)
1644		atomic_add_int(&getnewbufrestarts, 1);
1645	return (error);
1646}
1647
1648/*
1649 *	bremfree:
1650 *
1651 *	Mark the buffer for removal from the appropriate free list.
1652 *
1653 */
1654void
1655bremfree(struct buf *bp)
1656{
1657
1658	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1659	KASSERT((bp->b_flags & B_REMFREE) == 0,
1660	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1661	KASSERT(bp->b_qindex != QUEUE_NONE,
1662	    ("bremfree: buffer %p not on a queue.", bp));
1663	BUF_ASSERT_XLOCKED(bp);
1664
1665	bp->b_flags |= B_REMFREE;
1666}
1667
1668/*
1669 *	bremfreef:
1670 *
1671 *	Force an immediate removal from a free list.  Used only in nfs when
1672 *	it abuses the b_freelist pointer.
1673 */
1674void
1675bremfreef(struct buf *bp)
1676{
1677	struct mtx *qlock;
1678
1679	qlock = bqlock(bp->b_qindex);
1680	mtx_lock(qlock);
1681	bremfreel(bp);
1682	mtx_unlock(qlock);
1683}
1684
1685/*
1686 *	bremfreel:
1687 *
1688 *	Removes a buffer from the free list, must be called with the
1689 *	correct qlock held.
1690 */
1691static void
1692bremfreel(struct buf *bp)
1693{
1694
1695	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
1696	    bp, bp->b_vp, bp->b_flags);
1697	KASSERT(bp->b_qindex != QUEUE_NONE,
1698	    ("bremfreel: buffer %p not on a queue.", bp));
1699	if (bp->b_qindex != QUEUE_EMPTY) {
1700		BUF_ASSERT_XLOCKED(bp);
1701	}
1702	mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
1703
1704	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
1705#ifdef INVARIANTS
1706	KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
1707	    bp->b_qindex));
1708	bq_len[bp->b_qindex]--;
1709#endif
1710	bp->b_qindex = QUEUE_NONE;
1711	bp->b_flags &= ~B_REMFREE;
1712}
1713
1714/*
1715 *	bufkva_free:
1716 *
1717 *	Free the kva allocation for a buffer.
1718 *
1719 */
1720static void
1721bufkva_free(struct buf *bp)
1722{
1723
1724#ifdef INVARIANTS
1725	if (bp->b_kvasize == 0) {
1726		KASSERT(bp->b_kvabase == unmapped_buf &&
1727		    bp->b_data == unmapped_buf,
1728		    ("Leaked KVA space on %p", bp));
1729	} else if (buf_mapped(bp))
1730		BUF_CHECK_MAPPED(bp);
1731	else
1732		BUF_CHECK_UNMAPPED(bp);
1733#endif
1734	if (bp->b_kvasize == 0)
1735		return;
1736
1737	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1738	atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
1739	atomic_add_int(&buffreekvacnt, 1);
1740	bp->b_data = bp->b_kvabase = unmapped_buf;
1741	bp->b_kvasize = 0;
1742}
1743
1744/*
1745 *	bufkva_alloc:
1746 *
1747 *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
1748 */
1749static int
1750bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
1751{
1752	vm_offset_t addr;
1753	int error;
1754
1755	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
1756	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
1757
1758	bufkva_free(bp);
1759
1760	addr = 0;
1761	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
1762	if (error != 0) {
1763		/*
1764		 * Buffer map is too fragmented.  Request the caller
1765		 * to defragment the map.
1766		 */
1767		return (error);
1768	}
1769	bp->b_kvabase = (caddr_t)addr;
1770	bp->b_kvasize = maxsize;
1771	atomic_add_long(&bufkvaspace, bp->b_kvasize);
1772	if ((gbflags & GB_UNMAPPED) != 0) {
1773		bp->b_data = unmapped_buf;
1774		BUF_CHECK_UNMAPPED(bp);
1775	} else {
1776		bp->b_data = bp->b_kvabase;
1777		BUF_CHECK_MAPPED(bp);
1778	}
1779	return (0);
1780}
1781
1782/*
1783 *	bufkva_reclaim:
1784 *
1785 *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
1786 *	callback that fires to avoid returning failure.
1787 */
1788static void
1789bufkva_reclaim(vmem_t *vmem, int flags)
1790{
1791	int i;
1792
1793	for (i = 0; i < 5; i++)
1794		if (buf_scan(true) != 0)
1795			break;
1796	return;
1797}
1798
1799
1800/*
1801 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
1802 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
1803 * the buffer is valid and we do not have to do anything.
1804 */
1805void
1806breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
1807    int cnt, struct ucred * cred)
1808{
1809	struct buf *rabp;
1810	int i;
1811
1812	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
1813		if (inmem(vp, *rablkno))
1814			continue;
1815		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
1816
1817		if ((rabp->b_flags & B_CACHE) == 0) {
1818			if (!TD_IS_IDLETHREAD(curthread)) {
1819#ifdef RACCT
1820				if (racct_enable) {
1821					PROC_LOCK(curproc);
1822					racct_add_buf(curproc, rabp, 0);
1823					PROC_UNLOCK(curproc);
1824				}
1825#endif /* RACCT */
1826				curthread->td_ru.ru_inblock++;
1827			}
1828			rabp->b_flags |= B_ASYNC;
1829			rabp->b_flags &= ~B_INVAL;
1830			rabp->b_ioflags &= ~BIO_ERROR;
1831			rabp->b_iocmd = BIO_READ;
1832			if (rabp->b_rcred == NOCRED && cred != NOCRED)
1833				rabp->b_rcred = crhold(cred);
1834			vfs_busy_pages(rabp, 0);
1835			BUF_KERNPROC(rabp);
1836			rabp->b_iooffset = dbtob(rabp->b_blkno);
1837			bstrategy(rabp);
1838		} else {
1839			brelse(rabp);
1840		}
1841	}
1842}
1843
1844/*
1845 * Entry point for bread() and breadn() via #defines in sys/buf.h.
1846 *
1847 * Get a buffer with the specified data.  Look in the cache first.  We
1848 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
1849 * is set, the buffer is valid and we do not have to do anything, see
1850 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1851 *
1852 * Always return a NULL buffer pointer (in bpp) when returning an error.
1853 */
1854int
1855breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1856    int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
1857{
1858	struct buf *bp;
1859	int rv = 0, readwait = 0;
1860
1861	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
1862	/*
1863	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
1864	 */
1865	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1866	if (bp == NULL)
1867		return (EBUSY);
1868
1869	/* if not found in cache, do some I/O */
1870	if ((bp->b_flags & B_CACHE) == 0) {
1871		if (!TD_IS_IDLETHREAD(curthread)) {
1872#ifdef RACCT
1873			if (racct_enable) {
1874				PROC_LOCK(curproc);
1875				racct_add_buf(curproc, bp, 0);
1876				PROC_UNLOCK(curproc);
1877			}
1878#endif /* RACCT */
1879			curthread->td_ru.ru_inblock++;
1880		}
1881		bp->b_iocmd = BIO_READ;
1882		bp->b_flags &= ~B_INVAL;
1883		bp->b_ioflags &= ~BIO_ERROR;
1884		if (bp->b_rcred == NOCRED && cred != NOCRED)
1885			bp->b_rcred = crhold(cred);
1886		vfs_busy_pages(bp, 0);
1887		bp->b_iooffset = dbtob(bp->b_blkno);
1888		bstrategy(bp);
1889		++readwait;
1890	}
1891
1892	breada(vp, rablkno, rabsize, cnt, cred);
1893
1894	if (readwait) {
1895		rv = bufwait(bp);
1896		if (rv != 0) {
1897			brelse(bp);
1898			*bpp = NULL;
1899		}
1900	}
1901	return (rv);
1902}
1903
1904/*
1905 * Write, release buffer on completion.  (Done by iodone
1906 * if async).  Do not bother writing anything if the buffer
1907 * is invalid.
1908 *
1909 * Note that we set B_CACHE here, indicating that buffer is
1910 * fully valid and thus cacheable.  This is true even of NFS
1911 * now so we set it generally.  This could be set either here
1912 * or in biodone() since the I/O is synchronous.  We put it
1913 * here.
1914 */
1915int
1916bufwrite(struct buf *bp)
1917{
1918	int oldflags;
1919	struct vnode *vp;
1920	long space;
1921	int vp_md;
1922
1923	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1924	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
1925		bp->b_flags |= B_INVAL | B_RELBUF;
1926		bp->b_flags &= ~B_CACHE;
1927		brelse(bp);
1928		return (ENXIO);
1929	}
1930	if (bp->b_flags & B_INVAL) {
1931		brelse(bp);
1932		return (0);
1933	}
1934
1935	if (bp->b_flags & B_BARRIER)
1936		atomic_add_long(&barrierwrites, 1);
1937
1938	oldflags = bp->b_flags;
1939
1940	BUF_ASSERT_HELD(bp);
1941
1942	if (bp->b_pin_count > 0)
1943		bunpin_wait(bp);
1944
1945	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1946	    ("FFS background buffer should not get here %p", bp));
1947
1948	vp = bp->b_vp;
1949	if (vp)
1950		vp_md = vp->v_vflag & VV_MD;
1951	else
1952		vp_md = 0;
1953
1954	/*
1955	 * Mark the buffer clean.  Increment the bufobj write count
1956	 * before bundirty() call, to prevent other thread from seeing
1957	 * empty dirty list and zero counter for writes in progress,
1958	 * falsely indicating that the bufobj is clean.
1959	 */
1960	bufobj_wref(bp->b_bufobj);
1961	bundirty(bp);
1962
1963	bp->b_flags &= ~B_DONE;
1964	bp->b_ioflags &= ~BIO_ERROR;
1965	bp->b_flags |= B_CACHE;
1966	bp->b_iocmd = BIO_WRITE;
1967
1968	vfs_busy_pages(bp, 1);
1969
1970	/*
1971	 * Normal bwrites pipeline writes
1972	 */
1973	bp->b_runningbufspace = bp->b_bufsize;
1974	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
1975
1976	if (!TD_IS_IDLETHREAD(curthread)) {
1977#ifdef RACCT
1978		if (racct_enable) {
1979			PROC_LOCK(curproc);
1980			racct_add_buf(curproc, bp, 1);
1981			PROC_UNLOCK(curproc);
1982		}
1983#endif /* RACCT */
1984		curthread->td_ru.ru_oublock++;
1985	}
1986	if (oldflags & B_ASYNC)
1987		BUF_KERNPROC(bp);
1988	bp->b_iooffset = dbtob(bp->b_blkno);
1989	bstrategy(bp);
1990
1991	if ((oldflags & B_ASYNC) == 0) {
1992		int rtval = bufwait(bp);
1993		brelse(bp);
1994		return (rtval);
1995	} else if (space > hirunningspace) {
1996		/*
1997		 * don't allow the async write to saturate the I/O
1998		 * system.  We will not deadlock here because
1999		 * we are blocking waiting for I/O that is already in-progress
2000		 * to complete. We do not block here if it is the update
2001		 * or syncer daemon trying to clean up as that can lead
2002		 * to deadlock.
2003		 */
2004		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2005			waitrunningbufspace();
2006	}
2007
2008	return (0);
2009}
2010
2011void
2012bufbdflush(struct bufobj *bo, struct buf *bp)
2013{
2014	struct buf *nbp;
2015
2016	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
2017		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2018		altbufferflushes++;
2019	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
2020		BO_LOCK(bo);
2021		/*
2022		 * Try to find a buffer to flush.
2023		 */
2024		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2025			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2026			    BUF_LOCK(nbp,
2027				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
2028				continue;
2029			if (bp == nbp)
2030				panic("bdwrite: found ourselves");
2031			BO_UNLOCK(bo);
2032			/* Don't countdeps with the bo lock held. */
2033			if (buf_countdeps(nbp, 0)) {
2034				BO_LOCK(bo);
2035				BUF_UNLOCK(nbp);
2036				continue;
2037			}
2038			if (nbp->b_flags & B_CLUSTEROK) {
2039				vfs_bio_awrite(nbp);
2040			} else {
2041				bremfree(nbp);
2042				bawrite(nbp);
2043			}
2044			dirtybufferflushes++;
2045			break;
2046		}
2047		if (nbp == NULL)
2048			BO_UNLOCK(bo);
2049	}
2050}
2051
2052/*
2053 * Delayed write. (Buffer is marked dirty).  Do not bother writing
2054 * anything if the buffer is marked invalid.
2055 *
2056 * Note that since the buffer must be completely valid, we can safely
2057 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2058 * biodone() in order to prevent getblk from writing the buffer
2059 * out synchronously.
2060 */
2061void
2062bdwrite(struct buf *bp)
2063{
2064	struct thread *td = curthread;
2065	struct vnode *vp;
2066	struct bufobj *bo;
2067
2068	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2069	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2070	KASSERT((bp->b_flags & B_BARRIER) == 0,
2071	    ("Barrier request in delayed write %p", bp));
2072	BUF_ASSERT_HELD(bp);
2073
2074	if (bp->b_flags & B_INVAL) {
2075		brelse(bp);
2076		return;
2077	}
2078
2079	/*
2080	 * If we have too many dirty buffers, don't create any more.
2081	 * If we are wildly over our limit, then force a complete
2082	 * cleanup. Otherwise, just keep the situation from getting
2083	 * out of control. Note that we have to avoid a recursive
2084	 * disaster and not try to clean up after our own cleanup!
2085	 */
2086	vp = bp->b_vp;
2087	bo = bp->b_bufobj;
2088	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2089		td->td_pflags |= TDP_INBDFLUSH;
2090		BO_BDFLUSH(bo, bp);
2091		td->td_pflags &= ~TDP_INBDFLUSH;
2092	} else
2093		recursiveflushes++;
2094
2095	bdirty(bp);
2096	/*
2097	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2098	 * true even of NFS now.
2099	 */
2100	bp->b_flags |= B_CACHE;
2101
2102	/*
2103	 * This bmap keeps the system from needing to do the bmap later,
2104	 * perhaps when the system is attempting to do a sync.  Since it
2105	 * is likely that the indirect block -- or whatever other datastructure
2106	 * that the filesystem needs is still in memory now, it is a good
2107	 * thing to do this.  Note also, that if the pageout daemon is
2108	 * requesting a sync -- there might not be enough memory to do
2109	 * the bmap then...  So, this is important to do.
2110	 */
2111	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2112		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2113	}
2114
2115	/*
2116	 * Set the *dirty* buffer range based upon the VM system dirty
2117	 * pages.
2118	 *
2119	 * Mark the buffer pages as clean.  We need to do this here to
2120	 * satisfy the vnode_pager and the pageout daemon, so that it
2121	 * thinks that the pages have been "cleaned".  Note that since
2122	 * the pages are in a delayed write buffer -- the VFS layer
2123	 * "will" see that the pages get written out on the next sync,
2124	 * or perhaps the cluster will be completed.
2125	 */
2126	vfs_clean_pages_dirty_buf(bp);
2127	bqrelse(bp);
2128
2129	/*
2130	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2131	 * due to the softdep code.
2132	 */
2133}
2134
2135/*
2136 *	bdirty:
2137 *
2138 *	Turn buffer into delayed write request.  We must clear BIO_READ and
2139 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2140 *	itself to properly update it in the dirty/clean lists.  We mark it
2141 *	B_DONE to ensure that any asynchronization of the buffer properly
2142 *	clears B_DONE ( else a panic will occur later ).
2143 *
2144 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2145 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2146 *	should only be called if the buffer is known-good.
2147 *
2148 *	Since the buffer is not on a queue, we do not update the numfreebuffers
2149 *	count.
2150 *
2151 *	The buffer must be on QUEUE_NONE.
2152 */
2153void
2154bdirty(struct buf *bp)
2155{
2156
2157	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2158	    bp, bp->b_vp, bp->b_flags);
2159	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2160	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2161	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2162	BUF_ASSERT_HELD(bp);
2163	bp->b_flags &= ~(B_RELBUF);
2164	bp->b_iocmd = BIO_WRITE;
2165
2166	if ((bp->b_flags & B_DELWRI) == 0) {
2167		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2168		reassignbuf(bp);
2169		bdirtyadd();
2170	}
2171}
2172
2173/*
2174 *	bundirty:
2175 *
2176 *	Clear B_DELWRI for buffer.
2177 *
2178 *	Since the buffer is not on a queue, we do not update the numfreebuffers
2179 *	count.
2180 *
2181 *	The buffer must be on QUEUE_NONE.
2182 */
2183
2184void
2185bundirty(struct buf *bp)
2186{
2187
2188	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2189	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2190	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2191	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2192	BUF_ASSERT_HELD(bp);
2193
2194	if (bp->b_flags & B_DELWRI) {
2195		bp->b_flags &= ~B_DELWRI;
2196		reassignbuf(bp);
2197		bdirtysub();
2198	}
2199	/*
2200	 * Since it is now being written, we can clear its deferred write flag.
2201	 */
2202	bp->b_flags &= ~B_DEFERRED;
2203}
2204
2205/*
2206 *	bawrite:
2207 *
2208 *	Asynchronous write.  Start output on a buffer, but do not wait for
2209 *	it to complete.  The buffer is released when the output completes.
2210 *
2211 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2212 *	B_INVAL buffers.  Not us.
2213 */
2214void
2215bawrite(struct buf *bp)
2216{
2217
2218	bp->b_flags |= B_ASYNC;
2219	(void) bwrite(bp);
2220}
2221
2222/*
2223 *	babarrierwrite:
2224 *
2225 *	Asynchronous barrier write.  Start output on a buffer, but do not
2226 *	wait for it to complete.  Place a write barrier after this write so
2227 *	that this buffer and all buffers written before it are committed to
2228 *	the disk before any buffers written after this write are committed
2229 *	to the disk.  The buffer is released when the output completes.
2230 */
2231void
2232babarrierwrite(struct buf *bp)
2233{
2234
2235	bp->b_flags |= B_ASYNC | B_BARRIER;
2236	(void) bwrite(bp);
2237}
2238
2239/*
2240 *	bbarrierwrite:
2241 *
2242 *	Synchronous barrier write.  Start output on a buffer and wait for
2243 *	it to complete.  Place a write barrier after this write so that
2244 *	this buffer and all buffers written before it are committed to
2245 *	the disk before any buffers written after this write are committed
2246 *	to the disk.  The buffer is released when the output completes.
2247 */
2248int
2249bbarrierwrite(struct buf *bp)
2250{
2251
2252	bp->b_flags |= B_BARRIER;
2253	return (bwrite(bp));
2254}
2255
2256/*
2257 *	bwillwrite:
2258 *
2259 *	Called prior to the locking of any vnodes when we are expecting to
2260 *	write.  We do not want to starve the buffer cache with too many
2261 *	dirty buffers so we block here.  By blocking prior to the locking
2262 *	of any vnodes we attempt to avoid the situation where a locked vnode
2263 *	prevents the various system daemons from flushing related buffers.
2264 */
2265void
2266bwillwrite(void)
2267{
2268
2269	if (numdirtybuffers >= hidirtybuffers) {
2270		mtx_lock(&bdirtylock);
2271		while (numdirtybuffers >= hidirtybuffers) {
2272			bdirtywait = 1;
2273			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2274			    "flswai", 0);
2275		}
2276		mtx_unlock(&bdirtylock);
2277	}
2278}
2279
2280/*
2281 * Return true if we have too many dirty buffers.
2282 */
2283int
2284buf_dirty_count_severe(void)
2285{
2286
2287	return(numdirtybuffers >= hidirtybuffers);
2288}
2289
2290/*
2291 *	brelse:
2292 *
2293 *	Release a busy buffer and, if requested, free its resources.  The
2294 *	buffer will be stashed in the appropriate bufqueue[] allowing it
2295 *	to be accessed later as a cache entity or reused for other purposes.
2296 */
2297void
2298brelse(struct buf *bp)
2299{
2300	int qindex;
2301
2302	/*
2303	 * Many functions erroneously call brelse with a NULL bp under rare
2304	 * error conditions. Simply return when called with a NULL bp.
2305	 */
2306	if (bp == NULL)
2307		return;
2308	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2309	    bp, bp->b_vp, bp->b_flags);
2310	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2311	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2312	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2313	    ("brelse: non-VMIO buffer marked NOREUSE"));
2314
2315	if (BUF_LOCKRECURSED(bp)) {
2316		/*
2317		 * Do not process, in particular, do not handle the
2318		 * B_INVAL/B_RELBUF and do not release to free list.
2319		 */
2320		BUF_UNLOCK(bp);
2321		return;
2322	}
2323
2324	if (bp->b_flags & B_MANAGED) {
2325		bqrelse(bp);
2326		return;
2327	}
2328
2329	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2330		BO_LOCK(bp->b_bufobj);
2331		bp->b_vflags &= ~BV_BKGRDERR;
2332		BO_UNLOCK(bp->b_bufobj);
2333		bdirty(bp);
2334	}
2335	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2336	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2337	    !(bp->b_flags & B_INVAL)) {
2338		/*
2339		 * Failed write, redirty.  All errors except ENXIO (which
2340		 * means the device is gone) are expected to be potentially
2341		 * transient - underlying media might work if tried again
2342		 * after EIO, and memory might be available after an ENOMEM.
2343		 *
2344		 * Do this also for buffers that failed with ENXIO, but have
2345		 * non-empty dependencies - the soft updates code might need
2346		 * to access the buffer to untangle them.
2347		 *
2348		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2349		 */
2350		bp->b_ioflags &= ~BIO_ERROR;
2351		bdirty(bp);
2352	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2353	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2354		/*
2355		 * Either a failed read I/O, or we were asked to free or not
2356		 * cache the buffer, or we failed to write to a device that's
2357		 * no longer present.
2358		 */
2359		bp->b_flags |= B_INVAL;
2360		if (!LIST_EMPTY(&bp->b_dep))
2361			buf_deallocate(bp);
2362		if (bp->b_flags & B_DELWRI)
2363			bdirtysub();
2364		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2365		if ((bp->b_flags & B_VMIO) == 0) {
2366			allocbuf(bp, 0);
2367			if (bp->b_vp)
2368				brelvp(bp);
2369		}
2370	}
2371
2372	/*
2373	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2374	 * is called with B_DELWRI set, the underlying pages may wind up
2375	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2376	 * because pages associated with a B_DELWRI bp are marked clean.
2377	 *
2378	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2379	 * if B_DELWRI is set.
2380	 */
2381	if (bp->b_flags & B_DELWRI)
2382		bp->b_flags &= ~B_RELBUF;
2383
2384	/*
2385	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2386	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2387	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2388	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2389	 *
2390	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2391	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2392	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2393	 *
2394	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2395	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2396	 * the commit state and we cannot afford to lose the buffer. If the
2397	 * buffer has a background write in progress, we need to keep it
2398	 * around to prevent it from being reconstituted and starting a second
2399	 * background write.
2400	 */
2401	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2402	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2403	    !(bp->b_vp->v_mount != NULL &&
2404	    (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2405	    !vn_isdisk(bp->b_vp, NULL) && (bp->b_flags & B_DELWRI))) {
2406		vfs_vmio_invalidate(bp);
2407		allocbuf(bp, 0);
2408	}
2409
2410	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2411	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2412		allocbuf(bp, 0);
2413		bp->b_flags &= ~B_NOREUSE;
2414		if (bp->b_vp != NULL)
2415			brelvp(bp);
2416	}
2417
2418	/*
2419	 * If the buffer has junk contents signal it and eventually
2420	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2421	 * doesn't find it.
2422	 */
2423	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2424	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2425		bp->b_flags |= B_INVAL;
2426	if (bp->b_flags & B_INVAL) {
2427		if (bp->b_flags & B_DELWRI)
2428			bundirty(bp);
2429		if (bp->b_vp)
2430			brelvp(bp);
2431	}
2432
2433	/* buffers with no memory */
2434	if (bp->b_bufsize == 0) {
2435		buf_free(bp);
2436		return;
2437	}
2438	/* buffers with junk contents */
2439	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2440	    (bp->b_ioflags & BIO_ERROR)) {
2441		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2442		if (bp->b_vflags & BV_BKGRDINPROG)
2443			panic("losing buffer 2");
2444		qindex = QUEUE_CLEAN;
2445		bp->b_flags |= B_AGE;
2446	/* remaining buffers */
2447	} else if (bp->b_flags & B_DELWRI)
2448		qindex = QUEUE_DIRTY;
2449	else
2450		qindex = QUEUE_CLEAN;
2451
2452	binsfree(bp, qindex);
2453
2454	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
2455	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2456		panic("brelse: not dirty");
2457	/* unlock */
2458	BUF_UNLOCK(bp);
2459	if (qindex == QUEUE_CLEAN)
2460		bufspace_wakeup();
2461}
2462
2463/*
2464 * Release a buffer back to the appropriate queue but do not try to free
2465 * it.  The buffer is expected to be used again soon.
2466 *
2467 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2468 * biodone() to requeue an async I/O on completion.  It is also used when
2469 * known good buffers need to be requeued but we think we may need the data
2470 * again soon.
2471 *
2472 * XXX we should be able to leave the B_RELBUF hint set on completion.
2473 */
2474void
2475bqrelse(struct buf *bp)
2476{
2477	int qindex;
2478
2479	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2480	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2481	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2482
2483	qindex = QUEUE_NONE;
2484	if (BUF_LOCKRECURSED(bp)) {
2485		/* do not release to free list */
2486		BUF_UNLOCK(bp);
2487		return;
2488	}
2489	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2490
2491	if (bp->b_flags & B_MANAGED) {
2492		if (bp->b_flags & B_REMFREE)
2493			bremfreef(bp);
2494		goto out;
2495	}
2496
2497	/* buffers with stale but valid contents */
2498	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2499	    BV_BKGRDERR)) == BV_BKGRDERR) {
2500		BO_LOCK(bp->b_bufobj);
2501		bp->b_vflags &= ~BV_BKGRDERR;
2502		BO_UNLOCK(bp->b_bufobj);
2503		qindex = QUEUE_DIRTY;
2504	} else {
2505		if ((bp->b_flags & B_DELWRI) == 0 &&
2506		    (bp->b_xflags & BX_VNDIRTY))
2507			panic("bqrelse: not dirty");
2508		if ((bp->b_flags & B_NOREUSE) != 0) {
2509			brelse(bp);
2510			return;
2511		}
2512		qindex = QUEUE_CLEAN;
2513	}
2514	binsfree(bp, qindex);
2515
2516out:
2517	/* unlock */
2518	BUF_UNLOCK(bp);
2519	if (qindex == QUEUE_CLEAN)
2520		bufspace_wakeup();
2521}
2522
2523/*
2524 * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2525 * restore bogus pages.
2526 */
2527static void
2528vfs_vmio_iodone(struct buf *bp)
2529{
2530	vm_ooffset_t foff;
2531	vm_page_t m;
2532	vm_object_t obj;
2533	struct vnode *vp;
2534	int i, iosize, resid;
2535	bool bogus;
2536
2537	obj = bp->b_bufobj->bo_object;
2538	KASSERT(obj->paging_in_progress >= bp->b_npages,
2539	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2540	    obj->paging_in_progress, bp->b_npages));
2541
2542	vp = bp->b_vp;
2543	KASSERT(vp->v_holdcnt > 0,
2544	    ("vfs_vmio_iodone: vnode %p has zero hold count", vp));
2545	KASSERT(vp->v_object != NULL,
2546	    ("vfs_vmio_iodone: vnode %p has no vm_object", vp));
2547
2548	foff = bp->b_offset;
2549	KASSERT(bp->b_offset != NOOFFSET,
2550	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2551
2552	bogus = false;
2553	iosize = bp->b_bcount - bp->b_resid;
2554	VM_OBJECT_WLOCK(obj);
2555	for (i = 0; i < bp->b_npages; i++) {
2556		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2557		if (resid > iosize)
2558			resid = iosize;
2559
2560		/*
2561		 * cleanup bogus pages, restoring the originals
2562		 */
2563		m = bp->b_pages[i];
2564		if (m == bogus_page) {
2565			bogus = true;
2566			m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2567			if (m == NULL)
2568				panic("biodone: page disappeared!");
2569			bp->b_pages[i] = m;
2570		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2571			/*
2572			 * In the write case, the valid and clean bits are
2573			 * already changed correctly ( see bdwrite() ), so we
2574			 * only need to do this here in the read case.
2575			 */
2576			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2577			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2578			    "has unexpected dirty bits", m));
2579			vfs_page_set_valid(bp, foff, m);
2580		}
2581		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2582		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2583		    (intmax_t)foff, (uintmax_t)m->pindex));
2584
2585		vm_page_sunbusy(m);
2586		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2587		iosize -= resid;
2588	}
2589	vm_object_pip_wakeupn(obj, bp->b_npages);
2590	VM_OBJECT_WUNLOCK(obj);
2591	if (bogus && buf_mapped(bp)) {
2592		BUF_CHECK_MAPPED(bp);
2593		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2594		    bp->b_pages, bp->b_npages);
2595	}
2596}
2597
2598/*
2599 * Unwire a page held by a buf and place it on the appropriate vm queue.
2600 */
2601static void
2602vfs_vmio_unwire(struct buf *bp, vm_page_t m)
2603{
2604	bool freed;
2605
2606	vm_page_lock(m);
2607	if (vm_page_unwire(m, PQ_NONE)) {
2608		/*
2609		 * Determine if the page should be freed before adding
2610		 * it to the inactive queue.
2611		 */
2612		if (m->valid == 0) {
2613			freed = !vm_page_busied(m);
2614			if (freed)
2615				vm_page_free(m);
2616		} else if ((bp->b_flags & B_DIRECT) != 0)
2617			freed = vm_page_try_to_free(m);
2618		else
2619			freed = false;
2620		if (!freed) {
2621			/*
2622			 * If the page is unlikely to be reused, let the
2623			 * VM know.  Otherwise, maintain LRU page
2624			 * ordering and put the page at the tail of the
2625			 * inactive queue.
2626			 */
2627			if ((bp->b_flags & B_NOREUSE) != 0)
2628				vm_page_deactivate_noreuse(m);
2629			else
2630				vm_page_deactivate(m);
2631		}
2632	}
2633	vm_page_unlock(m);
2634}
2635
2636/*
2637 * Perform page invalidation when a buffer is released.  The fully invalid
2638 * pages will be reclaimed later in vfs_vmio_truncate().
2639 */
2640static void
2641vfs_vmio_invalidate(struct buf *bp)
2642{
2643	vm_object_t obj;
2644	vm_page_t m;
2645	int i, resid, poffset, presid;
2646
2647	if (buf_mapped(bp)) {
2648		BUF_CHECK_MAPPED(bp);
2649		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2650	} else
2651		BUF_CHECK_UNMAPPED(bp);
2652	/*
2653	 * Get the base offset and length of the buffer.  Note that
2654	 * in the VMIO case if the buffer block size is not
2655	 * page-aligned then b_data pointer may not be page-aligned.
2656	 * But our b_pages[] array *IS* page aligned.
2657	 *
2658	 * block sizes less then DEV_BSIZE (usually 512) are not
2659	 * supported due to the page granularity bits (m->valid,
2660	 * m->dirty, etc...).
2661	 *
2662	 * See man buf(9) for more information
2663	 */
2664	obj = bp->b_bufobj->bo_object;
2665	resid = bp->b_bufsize;
2666	poffset = bp->b_offset & PAGE_MASK;
2667	VM_OBJECT_WLOCK(obj);
2668	for (i = 0; i < bp->b_npages; i++) {
2669		m = bp->b_pages[i];
2670		if (m == bogus_page)
2671			panic("vfs_vmio_invalidate: Unexpected bogus page.");
2672		bp->b_pages[i] = NULL;
2673
2674		presid = resid > (PAGE_SIZE - poffset) ?
2675		    (PAGE_SIZE - poffset) : resid;
2676		KASSERT(presid >= 0, ("brelse: extra page"));
2677		while (vm_page_xbusied(m)) {
2678			vm_page_lock(m);
2679			VM_OBJECT_WUNLOCK(obj);
2680			vm_page_busy_sleep(m, "mbncsh", true);
2681			VM_OBJECT_WLOCK(obj);
2682		}
2683		if (pmap_page_wired_mappings(m) == 0)
2684			vm_page_set_invalid(m, poffset, presid);
2685		vfs_vmio_unwire(bp, m);
2686		resid -= presid;
2687		poffset = 0;
2688	}
2689	VM_OBJECT_WUNLOCK(obj);
2690	bp->b_npages = 0;
2691}
2692
2693/*
2694 * Page-granular truncation of an existing VMIO buffer.
2695 */
2696static void
2697vfs_vmio_truncate(struct buf *bp, int desiredpages)
2698{
2699	vm_object_t obj;
2700	vm_page_t m;
2701	int i;
2702
2703	if (bp->b_npages == desiredpages)
2704		return;
2705
2706	if (buf_mapped(bp)) {
2707		BUF_CHECK_MAPPED(bp);
2708		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
2709		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
2710	} else
2711		BUF_CHECK_UNMAPPED(bp);
2712	obj = bp->b_bufobj->bo_object;
2713	if (obj != NULL)
2714		VM_OBJECT_WLOCK(obj);
2715	for (i = desiredpages; i < bp->b_npages; i++) {
2716		m = bp->b_pages[i];
2717		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
2718		bp->b_pages[i] = NULL;
2719		vfs_vmio_unwire(bp, m);
2720	}
2721	if (obj != NULL)
2722		VM_OBJECT_WUNLOCK(obj);
2723	bp->b_npages = desiredpages;
2724}
2725
2726/*
2727 * Byte granular extension of VMIO buffers.
2728 */
2729static void
2730vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
2731{
2732	/*
2733	 * We are growing the buffer, possibly in a
2734	 * byte-granular fashion.
2735	 */
2736	vm_object_t obj;
2737	vm_offset_t toff;
2738	vm_offset_t tinc;
2739	vm_page_t m;
2740
2741	/*
2742	 * Step 1, bring in the VM pages from the object, allocating
2743	 * them if necessary.  We must clear B_CACHE if these pages
2744	 * are not valid for the range covered by the buffer.
2745	 */
2746	obj = bp->b_bufobj->bo_object;
2747	VM_OBJECT_WLOCK(obj);
2748	if (bp->b_npages < desiredpages) {
2749		/*
2750		 * We must allocate system pages since blocking
2751		 * here could interfere with paging I/O, no
2752		 * matter which process we are.
2753		 *
2754		 * Only exclusive busy can be tested here.
2755		 * Blocking on shared busy might lead to
2756		 * deadlocks once allocbuf() is called after
2757		 * pages are vfs_busy_pages().
2758		 */
2759		(void)vm_page_grab_pages(obj,
2760		    OFF_TO_IDX(bp->b_offset) + bp->b_npages,
2761		    VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
2762		    VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
2763		    &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
2764		bp->b_npages = desiredpages;
2765	}
2766
2767	/*
2768	 * Step 2.  We've loaded the pages into the buffer,
2769	 * we have to figure out if we can still have B_CACHE
2770	 * set.  Note that B_CACHE is set according to the
2771	 * byte-granular range ( bcount and size ), not the
2772	 * aligned range ( newbsize ).
2773	 *
2774	 * The VM test is against m->valid, which is DEV_BSIZE
2775	 * aligned.  Needless to say, the validity of the data
2776	 * needs to also be DEV_BSIZE aligned.  Note that this
2777	 * fails with NFS if the server or some other client
2778	 * extends the file's EOF.  If our buffer is resized,
2779	 * B_CACHE may remain set! XXX
2780	 */
2781	toff = bp->b_bcount;
2782	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2783	while ((bp->b_flags & B_CACHE) && toff < size) {
2784		vm_pindex_t pi;
2785
2786		if (tinc > (size - toff))
2787			tinc = size - toff;
2788		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
2789		m = bp->b_pages[pi];
2790		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
2791		toff += tinc;
2792		tinc = PAGE_SIZE;
2793	}
2794	VM_OBJECT_WUNLOCK(obj);
2795
2796	/*
2797	 * Step 3, fixup the KVA pmap.
2798	 */
2799	if (buf_mapped(bp))
2800		bpmap_qenter(bp);
2801	else
2802		BUF_CHECK_UNMAPPED(bp);
2803}
2804
2805/*
2806 * Check to see if a block at a particular lbn is available for a clustered
2807 * write.
2808 */
2809static int
2810vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
2811{
2812	struct buf *bpa;
2813	int match;
2814
2815	match = 0;
2816
2817	/* If the buf isn't in core skip it */
2818	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
2819		return (0);
2820
2821	/* If the buf is busy we don't want to wait for it */
2822	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2823		return (0);
2824
2825	/* Only cluster with valid clusterable delayed write buffers */
2826	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
2827	    (B_DELWRI | B_CLUSTEROK))
2828		goto done;
2829
2830	if (bpa->b_bufsize != size)
2831		goto done;
2832
2833	/*
2834	 * Check to see if it is in the expected place on disk and that the
2835	 * block has been mapped.
2836	 */
2837	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
2838		match = 1;
2839done:
2840	BUF_UNLOCK(bpa);
2841	return (match);
2842}
2843
2844/*
2845 *	vfs_bio_awrite:
2846 *
2847 *	Implement clustered async writes for clearing out B_DELWRI buffers.
2848 *	This is much better then the old way of writing only one buffer at
2849 *	a time.  Note that we may not be presented with the buffers in the
2850 *	correct order, so we search for the cluster in both directions.
2851 */
2852int
2853vfs_bio_awrite(struct buf *bp)
2854{
2855	struct bufobj *bo;
2856	int i;
2857	int j;
2858	daddr_t lblkno = bp->b_lblkno;
2859	struct vnode *vp = bp->b_vp;
2860	int ncl;
2861	int nwritten;
2862	int size;
2863	int maxcl;
2864	int gbflags;
2865
2866	bo = &vp->v_bufobj;
2867	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
2868	/*
2869	 * right now we support clustered writing only to regular files.  If
2870	 * we find a clusterable block we could be in the middle of a cluster
2871	 * rather then at the beginning.
2872	 */
2873	if ((vp->v_type == VREG) &&
2874	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
2875	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
2876
2877		size = vp->v_mount->mnt_stat.f_iosize;
2878		maxcl = MAXPHYS / size;
2879
2880		BO_RLOCK(bo);
2881		for (i = 1; i < maxcl; i++)
2882			if (vfs_bio_clcheck(vp, size, lblkno + i,
2883			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
2884				break;
2885
2886		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
2887			if (vfs_bio_clcheck(vp, size, lblkno - j,
2888			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2889				break;
2890		BO_RUNLOCK(bo);
2891		--j;
2892		ncl = i + j;
2893		/*
2894		 * this is a possible cluster write
2895		 */
2896		if (ncl != 1) {
2897			BUF_UNLOCK(bp);
2898			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
2899			    gbflags);
2900			return (nwritten);
2901		}
2902	}
2903	bremfree(bp);
2904	bp->b_flags |= B_ASYNC;
2905	/*
2906	 * default (old) behavior, writing out only one block
2907	 *
2908	 * XXX returns b_bufsize instead of b_bcount for nwritten?
2909	 */
2910	nwritten = bp->b_bufsize;
2911	(void) bwrite(bp);
2912
2913	return (nwritten);
2914}
2915
2916/*
2917 *	getnewbuf_kva:
2918 *
2919 *	Allocate KVA for an empty buf header according to gbflags.
2920 */
2921static int
2922getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
2923{
2924
2925	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
2926		/*
2927		 * In order to keep fragmentation sane we only allocate kva
2928		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
2929		 */
2930		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2931
2932		if (maxsize != bp->b_kvasize &&
2933		    bufkva_alloc(bp, maxsize, gbflags))
2934			return (ENOSPC);
2935	}
2936	return (0);
2937}
2938
2939/*
2940 *	getnewbuf:
2941 *
2942 *	Find and initialize a new buffer header, freeing up existing buffers
2943 *	in the bufqueues as necessary.  The new buffer is returned locked.
2944 *
2945 *	We block if:
2946 *		We have insufficient buffer headers
2947 *		We have insufficient buffer space
2948 *		buffer_arena is too fragmented ( space reservation fails )
2949 *		If we have to flush dirty buffers ( but we try to avoid this )
2950 *
2951 *	The caller is responsible for releasing the reserved bufspace after
2952 *	allocbuf() is called.
2953 */
2954static struct buf *
2955getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
2956{
2957	struct buf *bp;
2958	bool metadata, reserved;
2959
2960	bp = NULL;
2961	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2962	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2963	if (!unmapped_buf_allowed)
2964		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2965
2966	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2967	    vp->v_type == VCHR)
2968		metadata = true;
2969	else
2970		metadata = false;
2971	atomic_add_int(&getnewbufcalls, 1);
2972	reserved = false;
2973	do {
2974		if (reserved == false &&
2975		    bufspace_reserve(maxsize, metadata) != 0)
2976			continue;
2977		reserved = true;
2978		if ((bp = buf_alloc()) == NULL)
2979			continue;
2980		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
2981			return (bp);
2982		break;
2983	} while(buf_scan(false) == 0);
2984
2985	if (reserved)
2986		atomic_subtract_long(&bufspace, maxsize);
2987	if (bp != NULL) {
2988		bp->b_flags |= B_INVAL;
2989		brelse(bp);
2990	}
2991	bufspace_wait(vp, gbflags, slpflag, slptimeo);
2992
2993	return (NULL);
2994}
2995
2996/*
2997 *	buf_daemon:
2998 *
2999 *	buffer flushing daemon.  Buffers are normally flushed by the
3000 *	update daemon but if it cannot keep up this process starts to
3001 *	take the load in an attempt to prevent getnewbuf() from blocking.
3002 */
3003static struct kproc_desc buf_kp = {
3004	"bufdaemon",
3005	buf_daemon,
3006	&bufdaemonproc
3007};
3008SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3009
3010static int
3011buf_flush(struct vnode *vp, int target)
3012{
3013	int flushed;
3014
3015	flushed = flushbufqueues(vp, target, 0);
3016	if (flushed == 0) {
3017		/*
3018		 * Could not find any buffers without rollback
3019		 * dependencies, so just write the first one
3020		 * in the hopes of eventually making progress.
3021		 */
3022		if (vp != NULL && target > 2)
3023			target /= 2;
3024		flushbufqueues(vp, target, 1);
3025	}
3026	return (flushed);
3027}
3028
3029static void
3030buf_daemon()
3031{
3032	int lodirty;
3033
3034	/*
3035	 * This process needs to be suspended prior to shutdown sync.
3036	 */
3037	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
3038	    SHUTDOWN_PRI_LAST);
3039
3040	/*
3041	 * This process is allowed to take the buffer cache to the limit
3042	 */
3043	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3044	mtx_lock(&bdlock);
3045	for (;;) {
3046		bd_request = 0;
3047		mtx_unlock(&bdlock);
3048
3049		kproc_suspend_check(bufdaemonproc);
3050		lodirty = lodirtybuffers;
3051		if (bd_speedupreq) {
3052			lodirty = numdirtybuffers / 2;
3053			bd_speedupreq = 0;
3054		}
3055		/*
3056		 * Do the flush.  Limit the amount of in-transit I/O we
3057		 * allow to build up, otherwise we would completely saturate
3058		 * the I/O system.
3059		 */
3060		while (numdirtybuffers > lodirty) {
3061			if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
3062				break;
3063			kern_yield(PRI_USER);
3064		}
3065
3066		/*
3067		 * Only clear bd_request if we have reached our low water
3068		 * mark.  The buf_daemon normally waits 1 second and
3069		 * then incrementally flushes any dirty buffers that have
3070		 * built up, within reason.
3071		 *
3072		 * If we were unable to hit our low water mark and couldn't
3073		 * find any flushable buffers, we sleep for a short period
3074		 * to avoid endless loops on unlockable buffers.
3075		 */
3076		mtx_lock(&bdlock);
3077		if (numdirtybuffers <= lodirtybuffers) {
3078			/*
3079			 * We reached our low water mark, reset the
3080			 * request and sleep until we are needed again.
3081			 * The sleep is just so the suspend code works.
3082			 */
3083			bd_request = 0;
3084			/*
3085			 * Do an extra wakeup in case dirty threshold
3086			 * changed via sysctl and the explicit transition
3087			 * out of shortfall was missed.
3088			 */
3089			bdirtywakeup();
3090			if (runningbufspace <= lorunningspace)
3091				runningwakeup();
3092			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3093		} else {
3094			/*
3095			 * We couldn't find any flushable dirty buffers but
3096			 * still have too many dirty buffers, we
3097			 * have to sleep and try again.  (rare)
3098			 */
3099			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3100		}
3101	}
3102}
3103
3104/*
3105 *	flushbufqueues:
3106 *
3107 *	Try to flush a buffer in the dirty queue.  We must be careful to
3108 *	free up B_INVAL buffers instead of write them, which NFS is
3109 *	particularly sensitive to.
3110 */
3111static int flushwithdeps = 0;
3112SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
3113    0, "Number of buffers flushed with dependecies that require rollbacks");
3114
3115static int
3116flushbufqueues(struct vnode *lvp, int target, int flushdeps)
3117{
3118	struct buf *sentinel;
3119	struct vnode *vp;
3120	struct mount *mp;
3121	struct buf *bp;
3122	int hasdeps;
3123	int flushed;
3124	int queue;
3125	int error;
3126	bool unlock;
3127
3128	flushed = 0;
3129	queue = QUEUE_DIRTY;
3130	bp = NULL;
3131	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3132	sentinel->b_qindex = QUEUE_SENTINEL;
3133	mtx_lock(&bqlocks[queue]);
3134	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
3135	mtx_unlock(&bqlocks[queue]);
3136	while (flushed != target) {
3137		maybe_yield();
3138		mtx_lock(&bqlocks[queue]);
3139		bp = TAILQ_NEXT(sentinel, b_freelist);
3140		if (bp != NULL) {
3141			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3142			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
3143			    b_freelist);
3144		} else {
3145			mtx_unlock(&bqlocks[queue]);
3146			break;
3147		}
3148		/*
3149		 * Skip sentinels inserted by other invocations of the
3150		 * flushbufqueues(), taking care to not reorder them.
3151		 *
3152		 * Only flush the buffers that belong to the
3153		 * vnode locked by the curthread.
3154		 */
3155		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3156		    bp->b_vp != lvp)) {
3157			mtx_unlock(&bqlocks[queue]);
3158 			continue;
3159		}
3160		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3161		mtx_unlock(&bqlocks[queue]);
3162		if (error != 0)
3163			continue;
3164		if (bp->b_pin_count > 0) {
3165			BUF_UNLOCK(bp);
3166			continue;
3167		}
3168		/*
3169		 * BKGRDINPROG can only be set with the buf and bufobj
3170		 * locks both held.  We tolerate a race to clear it here.
3171		 */
3172		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3173		    (bp->b_flags & B_DELWRI) == 0) {
3174			BUF_UNLOCK(bp);
3175			continue;
3176		}
3177		if (bp->b_flags & B_INVAL) {
3178			bremfreef(bp);
3179			brelse(bp);
3180			flushed++;
3181			continue;
3182		}
3183
3184		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3185			if (flushdeps == 0) {
3186				BUF_UNLOCK(bp);
3187				continue;
3188			}
3189			hasdeps = 1;
3190		} else
3191			hasdeps = 0;
3192		/*
3193		 * We must hold the lock on a vnode before writing
3194		 * one of its buffers. Otherwise we may confuse, or
3195		 * in the case of a snapshot vnode, deadlock the
3196		 * system.
3197		 *
3198		 * The lock order here is the reverse of the normal
3199		 * of vnode followed by buf lock.  This is ok because
3200		 * the NOWAIT will prevent deadlock.
3201		 */
3202		vp = bp->b_vp;
3203		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3204			BUF_UNLOCK(bp);
3205			continue;
3206		}
3207		if (lvp == NULL) {
3208			unlock = true;
3209			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3210		} else {
3211			ASSERT_VOP_LOCKED(vp, "getbuf");
3212			unlock = false;
3213			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3214			    vn_lock(vp, LK_TRYUPGRADE);
3215		}
3216		if (error == 0) {
3217			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3218			    bp, bp->b_vp, bp->b_flags);
3219			if (curproc == bufdaemonproc) {
3220				vfs_bio_awrite(bp);
3221			} else {
3222				bremfree(bp);
3223				bwrite(bp);
3224				notbufdflushes++;
3225			}
3226			vn_finished_write(mp);
3227			if (unlock)
3228				VOP_UNLOCK(vp, 0);
3229			flushwithdeps += hasdeps;
3230			flushed++;
3231
3232			/*
3233			 * Sleeping on runningbufspace while holding
3234			 * vnode lock leads to deadlock.
3235			 */
3236			if (curproc == bufdaemonproc &&
3237			    runningbufspace > hirunningspace)
3238				waitrunningbufspace();
3239			continue;
3240		}
3241		vn_finished_write(mp);
3242		BUF_UNLOCK(bp);
3243	}
3244	mtx_lock(&bqlocks[queue]);
3245	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3246	mtx_unlock(&bqlocks[queue]);
3247	free(sentinel, M_TEMP);
3248	return (flushed);
3249}
3250
3251/*
3252 * Check to see if a block is currently memory resident.
3253 */
3254struct buf *
3255incore(struct bufobj *bo, daddr_t blkno)
3256{
3257	struct buf *bp;
3258
3259	BO_RLOCK(bo);
3260	bp = gbincore(bo, blkno);
3261	BO_RUNLOCK(bo);
3262	return (bp);
3263}
3264
3265/*
3266 * Returns true if no I/O is needed to access the
3267 * associated VM object.  This is like incore except
3268 * it also hunts around in the VM system for the data.
3269 */
3270
3271static int
3272inmem(struct vnode * vp, daddr_t blkno)
3273{
3274	vm_object_t obj;
3275	vm_offset_t toff, tinc, size;
3276	vm_page_t m;
3277	vm_ooffset_t off;
3278
3279	ASSERT_VOP_LOCKED(vp, "inmem");
3280
3281	if (incore(&vp->v_bufobj, blkno))
3282		return 1;
3283	if (vp->v_mount == NULL)
3284		return 0;
3285	obj = vp->v_object;
3286	if (obj == NULL)
3287		return (0);
3288
3289	size = PAGE_SIZE;
3290	if (size > vp->v_mount->mnt_stat.f_iosize)
3291		size = vp->v_mount->mnt_stat.f_iosize;
3292	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3293
3294	VM_OBJECT_RLOCK(obj);
3295	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3296		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
3297		if (!m)
3298			goto notinmem;
3299		tinc = size;
3300		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3301			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3302		if (vm_page_is_valid(m,
3303		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
3304			goto notinmem;
3305	}
3306	VM_OBJECT_RUNLOCK(obj);
3307	return 1;
3308
3309notinmem:
3310	VM_OBJECT_RUNLOCK(obj);
3311	return (0);
3312}
3313
3314/*
3315 * Set the dirty range for a buffer based on the status of the dirty
3316 * bits in the pages comprising the buffer.  The range is limited
3317 * to the size of the buffer.
3318 *
3319 * Tell the VM system that the pages associated with this buffer
3320 * are clean.  This is used for delayed writes where the data is
3321 * going to go to disk eventually without additional VM intevention.
3322 *
3323 * Note that while we only really need to clean through to b_bcount, we
3324 * just go ahead and clean through to b_bufsize.
3325 */
3326static void
3327vfs_clean_pages_dirty_buf(struct buf *bp)
3328{
3329	vm_ooffset_t foff, noff, eoff;
3330	vm_page_t m;
3331	int i;
3332
3333	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3334		return;
3335
3336	foff = bp->b_offset;
3337	KASSERT(bp->b_offset != NOOFFSET,
3338	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3339
3340	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3341	vfs_drain_busy_pages(bp);
3342	vfs_setdirty_locked_object(bp);
3343	for (i = 0; i < bp->b_npages; i++) {
3344		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3345		eoff = noff;
3346		if (eoff > bp->b_offset + bp->b_bufsize)
3347			eoff = bp->b_offset + bp->b_bufsize;
3348		m = bp->b_pages[i];
3349		vfs_page_set_validclean(bp, foff, m);
3350		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3351		foff = noff;
3352	}
3353	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3354}
3355
3356static void
3357vfs_setdirty_locked_object(struct buf *bp)
3358{
3359	vm_object_t object;
3360	int i;
3361
3362	object = bp->b_bufobj->bo_object;
3363	VM_OBJECT_ASSERT_WLOCKED(object);
3364
3365	/*
3366	 * We qualify the scan for modified pages on whether the
3367	 * object has been flushed yet.
3368	 */
3369	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
3370		vm_offset_t boffset;
3371		vm_offset_t eoffset;
3372
3373		/*
3374		 * test the pages to see if they have been modified directly
3375		 * by users through the VM system.
3376		 */
3377		for (i = 0; i < bp->b_npages; i++)
3378			vm_page_test_dirty(bp->b_pages[i]);
3379
3380		/*
3381		 * Calculate the encompassing dirty range, boffset and eoffset,
3382		 * (eoffset - boffset) bytes.
3383		 */
3384
3385		for (i = 0; i < bp->b_npages; i++) {
3386			if (bp->b_pages[i]->dirty)
3387				break;
3388		}
3389		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3390
3391		for (i = bp->b_npages - 1; i >= 0; --i) {
3392			if (bp->b_pages[i]->dirty) {
3393				break;
3394			}
3395		}
3396		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3397
3398		/*
3399		 * Fit it to the buffer.
3400		 */
3401
3402		if (eoffset > bp->b_bcount)
3403			eoffset = bp->b_bcount;
3404
3405		/*
3406		 * If we have a good dirty range, merge with the existing
3407		 * dirty range.
3408		 */
3409
3410		if (boffset < eoffset) {
3411			if (bp->b_dirtyoff > boffset)
3412				bp->b_dirtyoff = boffset;
3413			if (bp->b_dirtyend < eoffset)
3414				bp->b_dirtyend = eoffset;
3415		}
3416	}
3417}
3418
3419/*
3420 * Allocate the KVA mapping for an existing buffer.
3421 * If an unmapped buffer is provided but a mapped buffer is requested, take
3422 * also care to properly setup mappings between pages and KVA.
3423 */
3424static void
3425bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3426{
3427	int bsize, maxsize, need_mapping, need_kva;
3428	off_t offset;
3429
3430	need_mapping = bp->b_data == unmapped_buf &&
3431	    (gbflags & GB_UNMAPPED) == 0;
3432	need_kva = bp->b_kvabase == unmapped_buf &&
3433	    bp->b_data == unmapped_buf &&
3434	    (gbflags & GB_KVAALLOC) != 0;
3435	if (!need_mapping && !need_kva)
3436		return;
3437
3438	BUF_CHECK_UNMAPPED(bp);
3439
3440	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3441		/*
3442		 * Buffer is not mapped, but the KVA was already
3443		 * reserved at the time of the instantiation.  Use the
3444		 * allocated space.
3445		 */
3446		goto has_addr;
3447	}
3448
3449	/*
3450	 * Calculate the amount of the address space we would reserve
3451	 * if the buffer was mapped.
3452	 */
3453	bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3454	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3455	offset = blkno * bsize;
3456	maxsize = size + (offset & PAGE_MASK);
3457	maxsize = imax(maxsize, bsize);
3458
3459	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3460		if ((gbflags & GB_NOWAIT_BD) != 0) {
3461			/*
3462			 * XXXKIB: defragmentation cannot
3463			 * succeed, not sure what else to do.
3464			 */
3465			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3466		}
3467		atomic_add_int(&mappingrestarts, 1);
3468		bufspace_wait(bp->b_vp, gbflags, 0, 0);
3469	}
3470has_addr:
3471	if (need_mapping) {
3472		/* b_offset is handled by bpmap_qenter. */
3473		bp->b_data = bp->b_kvabase;
3474		BUF_CHECK_MAPPED(bp);
3475		bpmap_qenter(bp);
3476	}
3477}
3478
3479/*
3480 *	getblk:
3481 *
3482 *	Get a block given a specified block and offset into a file/device.
3483 *	The buffers B_DONE bit will be cleared on return, making it almost
3484 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3485 *	return.  The caller should clear B_INVAL prior to initiating a
3486 *	READ.
3487 *
3488 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3489 *	an existing buffer.
3490 *
3491 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3492 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3493 *	and then cleared based on the backing VM.  If the previous buffer is
3494 *	non-0-sized but invalid, B_CACHE will be cleared.
3495 *
3496 *	If getblk() must create a new buffer, the new buffer is returned with
3497 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3498 *	case it is returned with B_INVAL clear and B_CACHE set based on the
3499 *	backing VM.
3500 *
3501 *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
3502 *	B_CACHE bit is clear.
3503 *
3504 *	What this means, basically, is that the caller should use B_CACHE to
3505 *	determine whether the buffer is fully valid or not and should clear
3506 *	B_INVAL prior to issuing a read.  If the caller intends to validate
3507 *	the buffer by loading its data area with something, the caller needs
3508 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3509 *	the caller should set B_CACHE ( as an optimization ), else the caller
3510 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3511 *	a write attempt or if it was a successful read.  If the caller
3512 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3513 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3514 */
3515struct buf *
3516getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3517    int flags)
3518{
3519	struct buf *bp;
3520	struct bufobj *bo;
3521	int bsize, error, maxsize, vmio;
3522	off_t offset;
3523
3524	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3525	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3526	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3527	ASSERT_VOP_LOCKED(vp, "getblk");
3528	if (size > maxbcachebuf)
3529		panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3530		    maxbcachebuf);
3531	if (!unmapped_buf_allowed)
3532		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3533
3534	bo = &vp->v_bufobj;
3535loop:
3536	BO_RLOCK(bo);
3537	bp = gbincore(bo, blkno);
3538	if (bp != NULL) {
3539		int lockflags;
3540		/*
3541		 * Buffer is in-core.  If the buffer is not busy nor managed,
3542		 * it must be on a queue.
3543		 */
3544		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3545
3546		if (flags & GB_LOCK_NOWAIT)
3547			lockflags |= LK_NOWAIT;
3548
3549		error = BUF_TIMELOCK(bp, lockflags,
3550		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3551
3552		/*
3553		 * If we slept and got the lock we have to restart in case
3554		 * the buffer changed identities.
3555		 */
3556		if (error == ENOLCK)
3557			goto loop;
3558		/* We timed out or were interrupted. */
3559		else if (error)
3560			return (NULL);
3561		/* If recursed, assume caller knows the rules. */
3562		else if (BUF_LOCKRECURSED(bp))
3563			goto end;
3564
3565		/*
3566		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3567		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3568		 * and for a VMIO buffer B_CACHE is adjusted according to the
3569		 * backing VM cache.
3570		 */
3571		if (bp->b_flags & B_INVAL)
3572			bp->b_flags &= ~B_CACHE;
3573		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3574			bp->b_flags |= B_CACHE;
3575		if (bp->b_flags & B_MANAGED)
3576			MPASS(bp->b_qindex == QUEUE_NONE);
3577		else
3578			bremfree(bp);
3579
3580		/*
3581		 * check for size inconsistencies for non-VMIO case.
3582		 */
3583		if (bp->b_bcount != size) {
3584			if ((bp->b_flags & B_VMIO) == 0 ||
3585			    (size > bp->b_kvasize)) {
3586				if (bp->b_flags & B_DELWRI) {
3587					/*
3588					 * If buffer is pinned and caller does
3589					 * not want sleep  waiting for it to be
3590					 * unpinned, bail out
3591					 * */
3592					if (bp->b_pin_count > 0) {
3593						if (flags & GB_LOCK_NOWAIT) {
3594							bqrelse(bp);
3595							return (NULL);
3596						} else {
3597							bunpin_wait(bp);
3598						}
3599					}
3600					bp->b_flags |= B_NOCACHE;
3601					bwrite(bp);
3602				} else {
3603					if (LIST_EMPTY(&bp->b_dep)) {
3604						bp->b_flags |= B_RELBUF;
3605						brelse(bp);
3606					} else {
3607						bp->b_flags |= B_NOCACHE;
3608						bwrite(bp);
3609					}
3610				}
3611				goto loop;
3612			}
3613		}
3614
3615		/*
3616		 * Handle the case of unmapped buffer which should
3617		 * become mapped, or the buffer for which KVA
3618		 * reservation is requested.
3619		 */
3620		bp_unmapped_get_kva(bp, blkno, size, flags);
3621
3622		/*
3623		 * If the size is inconsistent in the VMIO case, we can resize
3624		 * the buffer.  This might lead to B_CACHE getting set or
3625		 * cleared.  If the size has not changed, B_CACHE remains
3626		 * unchanged from its previous state.
3627		 */
3628		allocbuf(bp, size);
3629
3630		KASSERT(bp->b_offset != NOOFFSET,
3631		    ("getblk: no buffer offset"));
3632
3633		/*
3634		 * A buffer with B_DELWRI set and B_CACHE clear must
3635		 * be committed before we can return the buffer in
3636		 * order to prevent the caller from issuing a read
3637		 * ( due to B_CACHE not being set ) and overwriting
3638		 * it.
3639		 *
3640		 * Most callers, including NFS and FFS, need this to
3641		 * operate properly either because they assume they
3642		 * can issue a read if B_CACHE is not set, or because
3643		 * ( for example ) an uncached B_DELWRI might loop due
3644		 * to softupdates re-dirtying the buffer.  In the latter
3645		 * case, B_CACHE is set after the first write completes,
3646		 * preventing further loops.
3647		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3648		 * above while extending the buffer, we cannot allow the
3649		 * buffer to remain with B_CACHE set after the write
3650		 * completes or it will represent a corrupt state.  To
3651		 * deal with this we set B_NOCACHE to scrap the buffer
3652		 * after the write.
3653		 *
3654		 * We might be able to do something fancy, like setting
3655		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3656		 * so the below call doesn't set B_CACHE, but that gets real
3657		 * confusing.  This is much easier.
3658		 */
3659
3660		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3661			bp->b_flags |= B_NOCACHE;
3662			bwrite(bp);
3663			goto loop;
3664		}
3665		bp->b_flags &= ~B_DONE;
3666	} else {
3667		/*
3668		 * Buffer is not in-core, create new buffer.  The buffer
3669		 * returned by getnewbuf() is locked.  Note that the returned
3670		 * buffer is also considered valid (not marked B_INVAL).
3671		 */
3672		BO_RUNLOCK(bo);
3673		/*
3674		 * If the user does not want us to create the buffer, bail out
3675		 * here.
3676		 */
3677		if (flags & GB_NOCREAT)
3678			return NULL;
3679		if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread))
3680			return NULL;
3681
3682		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3683		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3684		offset = blkno * bsize;
3685		vmio = vp->v_object != NULL;
3686		if (vmio) {
3687			maxsize = size + (offset & PAGE_MASK);
3688		} else {
3689			maxsize = size;
3690			/* Do not allow non-VMIO notmapped buffers. */
3691			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3692		}
3693		maxsize = imax(maxsize, bsize);
3694
3695		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
3696		if (bp == NULL) {
3697			if (slpflag || slptimeo)
3698				return NULL;
3699			/*
3700			 * XXX This is here until the sleep path is diagnosed
3701			 * enough to work under very low memory conditions.
3702			 *
3703			 * There's an issue on low memory, 4BSD+non-preempt
3704			 * systems (eg MIPS routers with 32MB RAM) where buffer
3705			 * exhaustion occurs without sleeping for buffer
3706			 * reclaimation.  This just sticks in a loop and
3707			 * constantly attempts to allocate a buffer, which
3708			 * hits exhaustion and tries to wakeup bufdaemon.
3709			 * This never happens because we never yield.
3710			 *
3711			 * The real solution is to identify and fix these cases
3712			 * so we aren't effectively busy-waiting in a loop
3713			 * until the reclaimation path has cycles to run.
3714			 */
3715			kern_yield(PRI_USER);
3716			goto loop;
3717		}
3718
3719		/*
3720		 * This code is used to make sure that a buffer is not
3721		 * created while the getnewbuf routine is blocked.
3722		 * This can be a problem whether the vnode is locked or not.
3723		 * If the buffer is created out from under us, we have to
3724		 * throw away the one we just created.
3725		 *
3726		 * Note: this must occur before we associate the buffer
3727		 * with the vp especially considering limitations in
3728		 * the splay tree implementation when dealing with duplicate
3729		 * lblkno's.
3730		 */
3731		BO_LOCK(bo);
3732		if (gbincore(bo, blkno)) {
3733			BO_UNLOCK(bo);
3734			bp->b_flags |= B_INVAL;
3735			brelse(bp);
3736			bufspace_release(maxsize);
3737			goto loop;
3738		}
3739
3740		/*
3741		 * Insert the buffer into the hash, so that it can
3742		 * be found by incore.
3743		 */
3744		bp->b_blkno = bp->b_lblkno = blkno;
3745		bp->b_offset = offset;
3746		bgetvp(vp, bp);
3747		BO_UNLOCK(bo);
3748
3749		/*
3750		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
3751		 * buffer size starts out as 0, B_CACHE will be set by
3752		 * allocbuf() for the VMIO case prior to it testing the
3753		 * backing store for validity.
3754		 */
3755
3756		if (vmio) {
3757			bp->b_flags |= B_VMIO;
3758			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3759			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3760			    bp, vp->v_object, bp->b_bufobj->bo_object));
3761		} else {
3762			bp->b_flags &= ~B_VMIO;
3763			KASSERT(bp->b_bufobj->bo_object == NULL,
3764			    ("ARGH! has b_bufobj->bo_object %p %p\n",
3765			    bp, bp->b_bufobj->bo_object));
3766			BUF_CHECK_MAPPED(bp);
3767		}
3768
3769		allocbuf(bp, size);
3770		bufspace_release(maxsize);
3771		bp->b_flags &= ~B_DONE;
3772	}
3773	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3774	BUF_ASSERT_HELD(bp);
3775end:
3776	KASSERT(bp->b_bufobj == bo,
3777	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3778	return (bp);
3779}
3780
3781/*
3782 * Get an empty, disassociated buffer of given size.  The buffer is initially
3783 * set to B_INVAL.
3784 */
3785struct buf *
3786geteblk(int size, int flags)
3787{
3788	struct buf *bp;
3789	int maxsize;
3790
3791	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3792	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
3793		if ((flags & GB_NOWAIT_BD) &&
3794		    (curthread->td_pflags & TDP_BUFNEED) != 0)
3795			return (NULL);
3796	}
3797	allocbuf(bp, size);
3798	bufspace_release(maxsize);
3799	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3800	BUF_ASSERT_HELD(bp);
3801	return (bp);
3802}
3803
3804/*
3805 * Truncate the backing store for a non-vmio buffer.
3806 */
3807static void
3808vfs_nonvmio_truncate(struct buf *bp, int newbsize)
3809{
3810
3811	if (bp->b_flags & B_MALLOC) {
3812		/*
3813		 * malloced buffers are not shrunk
3814		 */
3815		if (newbsize == 0) {
3816			bufmallocadjust(bp, 0);
3817			free(bp->b_data, M_BIOBUF);
3818			bp->b_data = bp->b_kvabase;
3819			bp->b_flags &= ~B_MALLOC;
3820		}
3821		return;
3822	}
3823	vm_hold_free_pages(bp, newbsize);
3824	bufspace_adjust(bp, newbsize);
3825}
3826
3827/*
3828 * Extend the backing for a non-VMIO buffer.
3829 */
3830static void
3831vfs_nonvmio_extend(struct buf *bp, int newbsize)
3832{
3833	caddr_t origbuf;
3834	int origbufsize;
3835
3836	/*
3837	 * We only use malloced memory on the first allocation.
3838	 * and revert to page-allocated memory when the buffer
3839	 * grows.
3840	 *
3841	 * There is a potential smp race here that could lead
3842	 * to bufmallocspace slightly passing the max.  It
3843	 * is probably extremely rare and not worth worrying
3844	 * over.
3845	 */
3846	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
3847	    bufmallocspace < maxbufmallocspace) {
3848		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
3849		bp->b_flags |= B_MALLOC;
3850		bufmallocadjust(bp, newbsize);
3851		return;
3852	}
3853
3854	/*
3855	 * If the buffer is growing on its other-than-first
3856	 * allocation then we revert to the page-allocation
3857	 * scheme.
3858	 */
3859	origbuf = NULL;
3860	origbufsize = 0;
3861	if (bp->b_flags & B_MALLOC) {
3862		origbuf = bp->b_data;
3863		origbufsize = bp->b_bufsize;
3864		bp->b_data = bp->b_kvabase;
3865		bufmallocadjust(bp, 0);
3866		bp->b_flags &= ~B_MALLOC;
3867		newbsize = round_page(newbsize);
3868	}
3869	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
3870	    (vm_offset_t) bp->b_data + newbsize);
3871	if (origbuf != NULL) {
3872		bcopy(origbuf, bp->b_data, origbufsize);
3873		free(origbuf, M_BIOBUF);
3874	}
3875	bufspace_adjust(bp, newbsize);
3876}
3877
3878/*
3879 * This code constitutes the buffer memory from either anonymous system
3880 * memory (in the case of non-VMIO operations) or from an associated
3881 * VM object (in the case of VMIO operations).  This code is able to
3882 * resize a buffer up or down.
3883 *
3884 * Note that this code is tricky, and has many complications to resolve
3885 * deadlock or inconsistent data situations.  Tread lightly!!!
3886 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
3887 * the caller.  Calling this code willy nilly can result in the loss of data.
3888 *
3889 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
3890 * B_CACHE for the non-VMIO case.
3891 */
3892int
3893allocbuf(struct buf *bp, int size)
3894{
3895	int newbsize;
3896
3897	BUF_ASSERT_HELD(bp);
3898
3899	if (bp->b_bcount == size)
3900		return (1);
3901
3902	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
3903		panic("allocbuf: buffer too small");
3904
3905	newbsize = roundup2(size, DEV_BSIZE);
3906	if ((bp->b_flags & B_VMIO) == 0) {
3907		if ((bp->b_flags & B_MALLOC) == 0)
3908			newbsize = round_page(newbsize);
3909		/*
3910		 * Just get anonymous memory from the kernel.  Don't
3911		 * mess with B_CACHE.
3912		 */
3913		if (newbsize < bp->b_bufsize)
3914			vfs_nonvmio_truncate(bp, newbsize);
3915		else if (newbsize > bp->b_bufsize)
3916			vfs_nonvmio_extend(bp, newbsize);
3917	} else {
3918		int desiredpages;
3919
3920		desiredpages = (size == 0) ? 0 :
3921		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3922
3923		if (bp->b_flags & B_MALLOC)
3924			panic("allocbuf: VMIO buffer can't be malloced");
3925		/*
3926		 * Set B_CACHE initially if buffer is 0 length or will become
3927		 * 0-length.
3928		 */
3929		if (size == 0 || bp->b_bufsize == 0)
3930			bp->b_flags |= B_CACHE;
3931
3932		if (newbsize < bp->b_bufsize)
3933			vfs_vmio_truncate(bp, desiredpages);
3934		/* XXX This looks as if it should be newbsize > b_bufsize */
3935		else if (size > bp->b_bcount)
3936			vfs_vmio_extend(bp, desiredpages, size);
3937		bufspace_adjust(bp, newbsize);
3938	}
3939	bp->b_bcount = size;		/* requested buffer size. */
3940	return (1);
3941}
3942
3943extern int inflight_transient_maps;
3944
3945static struct bio_queue nondump_bios;
3946
3947void
3948biodone(struct bio *bp)
3949{
3950	struct mtx *mtxp;
3951	void (*done)(struct bio *);
3952	vm_offset_t start, end;
3953
3954
3955	/*
3956	 * Avoid completing I/O when dumping after a panic since that may
3957	 * result in a deadlock in the filesystem or pager code.  Note that
3958	 * this doesn't affect dumps that were started manually since we aim
3959	 * to keep the system usable after it has been resumed.
3960	 */
3961	if (__predict_false(dumping && SCHEDULER_STOPPED())) {
3962		TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
3963		return;
3964	}
3965	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3966		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
3967		bp->bio_flags |= BIO_UNMAPPED;
3968		start = trunc_page((vm_offset_t)bp->bio_data);
3969		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3970		bp->bio_data = unmapped_buf;
3971		pmap_qremove(start, atop(end - start));
3972		vmem_free(transient_arena, start, end - start);
3973		atomic_add_int(&inflight_transient_maps, -1);
3974	}
3975	done = bp->bio_done;
3976	if (done == NULL) {
3977		mtxp = mtx_pool_find(mtxpool_sleep, bp);
3978		mtx_lock(mtxp);
3979		bp->bio_flags |= BIO_DONE;
3980		wakeup(bp);
3981		mtx_unlock(mtxp);
3982	} else
3983		done(bp);
3984}
3985
3986/*
3987 * Wait for a BIO to finish.
3988 */
3989int
3990biowait(struct bio *bp, const char *wchan)
3991{
3992	struct mtx *mtxp;
3993
3994	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3995	mtx_lock(mtxp);
3996	while ((bp->bio_flags & BIO_DONE) == 0)
3997		msleep(bp, mtxp, PRIBIO, wchan, 0);
3998	mtx_unlock(mtxp);
3999	if (bp->bio_error != 0)
4000		return (bp->bio_error);
4001	if (!(bp->bio_flags & BIO_ERROR))
4002		return (0);
4003	return (EIO);
4004}
4005
4006void
4007biofinish(struct bio *bp, struct devstat *stat, int error)
4008{
4009
4010	if (error) {
4011		bp->bio_error = error;
4012		bp->bio_flags |= BIO_ERROR;
4013	}
4014	if (stat != NULL)
4015		devstat_end_transaction_bio(stat, bp);
4016	biodone(bp);
4017}
4018
4019/*
4020 *	bufwait:
4021 *
4022 *	Wait for buffer I/O completion, returning error status.  The buffer
4023 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
4024 *	error and cleared.
4025 */
4026int
4027bufwait(struct buf *bp)
4028{
4029	if (bp->b_iocmd == BIO_READ)
4030		bwait(bp, PRIBIO, "biord");
4031	else
4032		bwait(bp, PRIBIO, "biowr");
4033	if (bp->b_flags & B_EINTR) {
4034		bp->b_flags &= ~B_EINTR;
4035		return (EINTR);
4036	}
4037	if (bp->b_ioflags & BIO_ERROR) {
4038		return (bp->b_error ? bp->b_error : EIO);
4039	} else {
4040		return (0);
4041	}
4042}
4043
4044/*
4045 *	bufdone:
4046 *
4047 *	Finish I/O on a buffer, optionally calling a completion function.
4048 *	This is usually called from an interrupt so process blocking is
4049 *	not allowed.
4050 *
4051 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4052 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4053 *	assuming B_INVAL is clear.
4054 *
4055 *	For the VMIO case, we set B_CACHE if the op was a read and no
4056 *	read error occurred, or if the op was a write.  B_CACHE is never
4057 *	set if the buffer is invalid or otherwise uncacheable.
4058 *
4059 *	bufdone does not mess with B_INVAL, allowing the I/O routine or the
4060 *	initiator to leave B_INVAL set to brelse the buffer out of existence
4061 *	in the biodone routine.
4062 */
4063void
4064bufdone(struct buf *bp)
4065{
4066	struct bufobj *dropobj;
4067	void    (*biodone)(struct buf *);
4068
4069	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4070	dropobj = NULL;
4071
4072	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4073	BUF_ASSERT_HELD(bp);
4074
4075	runningbufwakeup(bp);
4076	if (bp->b_iocmd == BIO_WRITE)
4077		dropobj = bp->b_bufobj;
4078	/* call optional completion function if requested */
4079	if (bp->b_iodone != NULL) {
4080		biodone = bp->b_iodone;
4081		bp->b_iodone = NULL;
4082		(*biodone) (bp);
4083		if (dropobj)
4084			bufobj_wdrop(dropobj);
4085		return;
4086	}
4087
4088	bufdone_finish(bp);
4089
4090	if (dropobj)
4091		bufobj_wdrop(dropobj);
4092}
4093
4094void
4095bufdone_finish(struct buf *bp)
4096{
4097	BUF_ASSERT_HELD(bp);
4098
4099	if (!LIST_EMPTY(&bp->b_dep))
4100		buf_complete(bp);
4101
4102	if (bp->b_flags & B_VMIO) {
4103		/*
4104		 * Set B_CACHE if the op was a normal read and no error
4105		 * occurred.  B_CACHE is set for writes in the b*write()
4106		 * routines.
4107		 */
4108		if (bp->b_iocmd == BIO_READ &&
4109		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4110		    !(bp->b_ioflags & BIO_ERROR))
4111			bp->b_flags |= B_CACHE;
4112		vfs_vmio_iodone(bp);
4113	}
4114
4115	/*
4116	 * For asynchronous completions, release the buffer now. The brelse
4117	 * will do a wakeup there if necessary - so no need to do a wakeup
4118	 * here in the async case. The sync case always needs to do a wakeup.
4119	 */
4120	if (bp->b_flags & B_ASYNC) {
4121		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4122		    (bp->b_ioflags & BIO_ERROR))
4123			brelse(bp);
4124		else
4125			bqrelse(bp);
4126	} else
4127		bdone(bp);
4128}
4129
4130/*
4131 * This routine is called in lieu of iodone in the case of
4132 * incomplete I/O.  This keeps the busy status for pages
4133 * consistent.
4134 */
4135void
4136vfs_unbusy_pages(struct buf *bp)
4137{
4138	int i;
4139	vm_object_t obj;
4140	vm_page_t m;
4141
4142	runningbufwakeup(bp);
4143	if (!(bp->b_flags & B_VMIO))
4144		return;
4145
4146	obj = bp->b_bufobj->bo_object;
4147	VM_OBJECT_WLOCK(obj);
4148	for (i = 0; i < bp->b_npages; i++) {
4149		m = bp->b_pages[i];
4150		if (m == bogus_page) {
4151			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4152			if (!m)
4153				panic("vfs_unbusy_pages: page missing\n");
4154			bp->b_pages[i] = m;
4155			if (buf_mapped(bp)) {
4156				BUF_CHECK_MAPPED(bp);
4157				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4158				    bp->b_pages, bp->b_npages);
4159			} else
4160				BUF_CHECK_UNMAPPED(bp);
4161		}
4162		vm_page_sunbusy(m);
4163	}
4164	vm_object_pip_wakeupn(obj, bp->b_npages);
4165	VM_OBJECT_WUNLOCK(obj);
4166}
4167
4168/*
4169 * vfs_page_set_valid:
4170 *
4171 *	Set the valid bits in a page based on the supplied offset.   The
4172 *	range is restricted to the buffer's size.
4173 *
4174 *	This routine is typically called after a read completes.
4175 */
4176static void
4177vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4178{
4179	vm_ooffset_t eoff;
4180
4181	/*
4182	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4183	 * page boundary and eoff is not greater than the end of the buffer.
4184	 * The end of the buffer, in this case, is our file EOF, not the
4185	 * allocation size of the buffer.
4186	 */
4187	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4188	if (eoff > bp->b_offset + bp->b_bcount)
4189		eoff = bp->b_offset + bp->b_bcount;
4190
4191	/*
4192	 * Set valid range.  This is typically the entire buffer and thus the
4193	 * entire page.
4194	 */
4195	if (eoff > off)
4196		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4197}
4198
4199/*
4200 * vfs_page_set_validclean:
4201 *
4202 *	Set the valid bits and clear the dirty bits in a page based on the
4203 *	supplied offset.   The range is restricted to the buffer's size.
4204 */
4205static void
4206vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4207{
4208	vm_ooffset_t soff, eoff;
4209
4210	/*
4211	 * Start and end offsets in buffer.  eoff - soff may not cross a
4212	 * page boundary or cross the end of the buffer.  The end of the
4213	 * buffer, in this case, is our file EOF, not the allocation size
4214	 * of the buffer.
4215	 */
4216	soff = off;
4217	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4218	if (eoff > bp->b_offset + bp->b_bcount)
4219		eoff = bp->b_offset + bp->b_bcount;
4220
4221	/*
4222	 * Set valid range.  This is typically the entire buffer and thus the
4223	 * entire page.
4224	 */
4225	if (eoff > soff) {
4226		vm_page_set_validclean(
4227		    m,
4228		   (vm_offset_t) (soff & PAGE_MASK),
4229		   (vm_offset_t) (eoff - soff)
4230		);
4231	}
4232}
4233
4234/*
4235 * Ensure that all buffer pages are not exclusive busied.  If any page is
4236 * exclusive busy, drain it.
4237 */
4238void
4239vfs_drain_busy_pages(struct buf *bp)
4240{
4241	vm_page_t m;
4242	int i, last_busied;
4243
4244	VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4245	last_busied = 0;
4246	for (i = 0; i < bp->b_npages; i++) {
4247		m = bp->b_pages[i];
4248		if (vm_page_xbusied(m)) {
4249			for (; last_busied < i; last_busied++)
4250				vm_page_sbusy(bp->b_pages[last_busied]);
4251			while (vm_page_xbusied(m)) {
4252				vm_page_lock(m);
4253				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4254				vm_page_busy_sleep(m, "vbpage", true);
4255				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4256			}
4257		}
4258	}
4259	for (i = 0; i < last_busied; i++)
4260		vm_page_sunbusy(bp->b_pages[i]);
4261}
4262
4263/*
4264 * This routine is called before a device strategy routine.
4265 * It is used to tell the VM system that paging I/O is in
4266 * progress, and treat the pages associated with the buffer
4267 * almost as being exclusive busy.  Also the object paging_in_progress
4268 * flag is handled to make sure that the object doesn't become
4269 * inconsistent.
4270 *
4271 * Since I/O has not been initiated yet, certain buffer flags
4272 * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4273 * and should be ignored.
4274 */
4275void
4276vfs_busy_pages(struct buf *bp, int clear_modify)
4277{
4278	vm_object_t obj;
4279	vm_ooffset_t foff;
4280	vm_page_t m;
4281	int i;
4282	bool bogus;
4283
4284	if (!(bp->b_flags & B_VMIO))
4285		return;
4286
4287	obj = bp->b_bufobj->bo_object;
4288	foff = bp->b_offset;
4289	KASSERT(bp->b_offset != NOOFFSET,
4290	    ("vfs_busy_pages: no buffer offset"));
4291	VM_OBJECT_WLOCK(obj);
4292	vfs_drain_busy_pages(bp);
4293	if (bp->b_bufsize != 0)
4294		vfs_setdirty_locked_object(bp);
4295	bogus = false;
4296	for (i = 0; i < bp->b_npages; i++) {
4297		m = bp->b_pages[i];
4298
4299		if ((bp->b_flags & B_CLUSTER) == 0) {
4300			vm_object_pip_add(obj, 1);
4301			vm_page_sbusy(m);
4302		}
4303		/*
4304		 * When readying a buffer for a read ( i.e
4305		 * clear_modify == 0 ), it is important to do
4306		 * bogus_page replacement for valid pages in
4307		 * partially instantiated buffers.  Partially
4308		 * instantiated buffers can, in turn, occur when
4309		 * reconstituting a buffer from its VM backing store
4310		 * base.  We only have to do this if B_CACHE is
4311		 * clear ( which causes the I/O to occur in the
4312		 * first place ).  The replacement prevents the read
4313		 * I/O from overwriting potentially dirty VM-backed
4314		 * pages.  XXX bogus page replacement is, uh, bogus.
4315		 * It may not work properly with small-block devices.
4316		 * We need to find a better way.
4317		 */
4318		if (clear_modify) {
4319			pmap_remove_write(m);
4320			vfs_page_set_validclean(bp, foff, m);
4321		} else if (m->valid == VM_PAGE_BITS_ALL &&
4322		    (bp->b_flags & B_CACHE) == 0) {
4323			bp->b_pages[i] = bogus_page;
4324			bogus = true;
4325		}
4326		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4327	}
4328	VM_OBJECT_WUNLOCK(obj);
4329	if (bogus && buf_mapped(bp)) {
4330		BUF_CHECK_MAPPED(bp);
4331		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4332		    bp->b_pages, bp->b_npages);
4333	}
4334}
4335
4336/*
4337 *	vfs_bio_set_valid:
4338 *
4339 *	Set the range within the buffer to valid.  The range is
4340 *	relative to the beginning of the buffer, b_offset.  Note that
4341 *	b_offset itself may be offset from the beginning of the first
4342 *	page.
4343 */
4344void
4345vfs_bio_set_valid(struct buf *bp, int base, int size)
4346{
4347	int i, n;
4348	vm_page_t m;
4349
4350	if (!(bp->b_flags & B_VMIO))
4351		return;
4352
4353	/*
4354	 * Fixup base to be relative to beginning of first page.
4355	 * Set initial n to be the maximum number of bytes in the
4356	 * first page that can be validated.
4357	 */
4358	base += (bp->b_offset & PAGE_MASK);
4359	n = PAGE_SIZE - (base & PAGE_MASK);
4360
4361	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4362	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4363		m = bp->b_pages[i];
4364		if (n > size)
4365			n = size;
4366		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4367		base += n;
4368		size -= n;
4369		n = PAGE_SIZE;
4370	}
4371	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4372}
4373
4374/*
4375 *	vfs_bio_clrbuf:
4376 *
4377 *	If the specified buffer is a non-VMIO buffer, clear the entire
4378 *	buffer.  If the specified buffer is a VMIO buffer, clear and
4379 *	validate only the previously invalid portions of the buffer.
4380 *	This routine essentially fakes an I/O, so we need to clear
4381 *	BIO_ERROR and B_INVAL.
4382 *
4383 *	Note that while we only theoretically need to clear through b_bcount,
4384 *	we go ahead and clear through b_bufsize.
4385 */
4386void
4387vfs_bio_clrbuf(struct buf *bp)
4388{
4389	int i, j, mask, sa, ea, slide;
4390
4391	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4392		clrbuf(bp);
4393		return;
4394	}
4395	bp->b_flags &= ~B_INVAL;
4396	bp->b_ioflags &= ~BIO_ERROR;
4397	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4398	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4399	    (bp->b_offset & PAGE_MASK) == 0) {
4400		if (bp->b_pages[0] == bogus_page)
4401			goto unlock;
4402		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4403		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4404		if ((bp->b_pages[0]->valid & mask) == mask)
4405			goto unlock;
4406		if ((bp->b_pages[0]->valid & mask) == 0) {
4407			pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4408			bp->b_pages[0]->valid |= mask;
4409			goto unlock;
4410		}
4411	}
4412	sa = bp->b_offset & PAGE_MASK;
4413	slide = 0;
4414	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4415		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4416		ea = slide & PAGE_MASK;
4417		if (ea == 0)
4418			ea = PAGE_SIZE;
4419		if (bp->b_pages[i] == bogus_page)
4420			continue;
4421		j = sa / DEV_BSIZE;
4422		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4423		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4424		if ((bp->b_pages[i]->valid & mask) == mask)
4425			continue;
4426		if ((bp->b_pages[i]->valid & mask) == 0)
4427			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4428		else {
4429			for (; sa < ea; sa += DEV_BSIZE, j++) {
4430				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4431					pmap_zero_page_area(bp->b_pages[i],
4432					    sa, DEV_BSIZE);
4433				}
4434			}
4435		}
4436		bp->b_pages[i]->valid |= mask;
4437	}
4438unlock:
4439	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4440	bp->b_resid = 0;
4441}
4442
4443void
4444vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4445{
4446	vm_page_t m;
4447	int i, n;
4448
4449	if (buf_mapped(bp)) {
4450		BUF_CHECK_MAPPED(bp);
4451		bzero(bp->b_data + base, size);
4452	} else {
4453		BUF_CHECK_UNMAPPED(bp);
4454		n = PAGE_SIZE - (base & PAGE_MASK);
4455		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4456			m = bp->b_pages[i];
4457			if (n > size)
4458				n = size;
4459			pmap_zero_page_area(m, base & PAGE_MASK, n);
4460			base += n;
4461			size -= n;
4462			n = PAGE_SIZE;
4463		}
4464	}
4465}
4466
4467/*
4468 * Update buffer flags based on I/O request parameters, optionally releasing the
4469 * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4470 * where they may be placed on a page queue (VMIO) or freed immediately (direct
4471 * I/O).  Otherwise the buffer is released to the cache.
4472 */
4473static void
4474b_io_dismiss(struct buf *bp, int ioflag, bool release)
4475{
4476
4477	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4478	    ("buf %p non-VMIO noreuse", bp));
4479
4480	if ((ioflag & IO_DIRECT) != 0)
4481		bp->b_flags |= B_DIRECT;
4482	if ((ioflag & IO_EXT) != 0)
4483		bp->b_xflags |= BX_ALTDATA;
4484	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4485		bp->b_flags |= B_RELBUF;
4486		if ((ioflag & IO_NOREUSE) != 0)
4487			bp->b_flags |= B_NOREUSE;
4488		if (release)
4489			brelse(bp);
4490	} else if (release)
4491		bqrelse(bp);
4492}
4493
4494void
4495vfs_bio_brelse(struct buf *bp, int ioflag)
4496{
4497
4498	b_io_dismiss(bp, ioflag, true);
4499}
4500
4501void
4502vfs_bio_set_flags(struct buf *bp, int ioflag)
4503{
4504
4505	b_io_dismiss(bp, ioflag, false);
4506}
4507
4508/*
4509 * vm_hold_load_pages and vm_hold_free_pages get pages into
4510 * a buffers address space.  The pages are anonymous and are
4511 * not associated with a file object.
4512 */
4513static void
4514vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4515{
4516	vm_offset_t pg;
4517	vm_page_t p;
4518	int index;
4519
4520	BUF_CHECK_MAPPED(bp);
4521
4522	to = round_page(to);
4523	from = round_page(from);
4524	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4525
4526	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4527		/*
4528		 * note: must allocate system pages since blocking here
4529		 * could interfere with paging I/O, no matter which
4530		 * process we are.
4531		 */
4532		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4533		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) |
4534		    VM_ALLOC_WAITOK);
4535		pmap_qenter(pg, &p, 1);
4536		bp->b_pages[index] = p;
4537	}
4538	bp->b_npages = index;
4539}
4540
4541/* Return pages associated with this buf to the vm system */
4542static void
4543vm_hold_free_pages(struct buf *bp, int newbsize)
4544{
4545	vm_offset_t from;
4546	vm_page_t p;
4547	int index, newnpages;
4548
4549	BUF_CHECK_MAPPED(bp);
4550
4551	from = round_page((vm_offset_t)bp->b_data + newbsize);
4552	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4553	if (bp->b_npages > newnpages)
4554		pmap_qremove(from, bp->b_npages - newnpages);
4555	for (index = newnpages; index < bp->b_npages; index++) {
4556		p = bp->b_pages[index];
4557		bp->b_pages[index] = NULL;
4558		p->wire_count--;
4559		vm_page_free(p);
4560	}
4561	atomic_subtract_int(&vm_cnt.v_wire_count, bp->b_npages - newnpages);
4562	bp->b_npages = newnpages;
4563}
4564
4565/*
4566 * Map an IO request into kernel virtual address space.
4567 *
4568 * All requests are (re)mapped into kernel VA space.
4569 * Notice that we use b_bufsize for the size of the buffer
4570 * to be mapped.  b_bcount might be modified by the driver.
4571 *
4572 * Note that even if the caller determines that the address space should
4573 * be valid, a race or a smaller-file mapped into a larger space may
4574 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4575 * check the return value.
4576 *
4577 * This function only works with pager buffers.
4578 */
4579int
4580vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
4581{
4582	vm_prot_t prot;
4583	int pidx;
4584
4585	prot = VM_PROT_READ;
4586	if (bp->b_iocmd == BIO_READ)
4587		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4588	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4589	    (vm_offset_t)uaddr, len, prot, bp->b_pages,
4590	    btoc(MAXPHYS))) < 0)
4591		return (-1);
4592	bp->b_bufsize = len;
4593	bp->b_npages = pidx;
4594	bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
4595	if (mapbuf || !unmapped_buf_allowed) {
4596		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4597		bp->b_data = bp->b_kvabase + bp->b_offset;
4598	} else
4599		bp->b_data = unmapped_buf;
4600	return(0);
4601}
4602
4603/*
4604 * Free the io map PTEs associated with this IO operation.
4605 * We also invalidate the TLB entries and restore the original b_addr.
4606 *
4607 * This function only works with pager buffers.
4608 */
4609void
4610vunmapbuf(struct buf *bp)
4611{
4612	int npages;
4613
4614	npages = bp->b_npages;
4615	if (buf_mapped(bp))
4616		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4617	vm_page_unhold_pages(bp->b_pages, npages);
4618
4619	bp->b_data = unmapped_buf;
4620}
4621
4622void
4623bdone(struct buf *bp)
4624{
4625	struct mtx *mtxp;
4626
4627	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4628	mtx_lock(mtxp);
4629	bp->b_flags |= B_DONE;
4630	wakeup(bp);
4631	mtx_unlock(mtxp);
4632}
4633
4634void
4635bwait(struct buf *bp, u_char pri, const char *wchan)
4636{
4637	struct mtx *mtxp;
4638
4639	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4640	mtx_lock(mtxp);
4641	while ((bp->b_flags & B_DONE) == 0)
4642		msleep(bp, mtxp, pri, wchan, 0);
4643	mtx_unlock(mtxp);
4644}
4645
4646int
4647bufsync(struct bufobj *bo, int waitfor)
4648{
4649
4650	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
4651}
4652
4653void
4654bufstrategy(struct bufobj *bo, struct buf *bp)
4655{
4656	int i = 0;
4657	struct vnode *vp;
4658
4659	vp = bp->b_vp;
4660	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4661	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4662	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4663	i = VOP_STRATEGY(vp, bp);
4664	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4665}
4666
4667void
4668bufobj_wrefl(struct bufobj *bo)
4669{
4670
4671	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4672	ASSERT_BO_WLOCKED(bo);
4673	bo->bo_numoutput++;
4674}
4675
4676void
4677bufobj_wref(struct bufobj *bo)
4678{
4679
4680	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4681	BO_LOCK(bo);
4682	bo->bo_numoutput++;
4683	BO_UNLOCK(bo);
4684}
4685
4686void
4687bufobj_wdrop(struct bufobj *bo)
4688{
4689
4690	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4691	BO_LOCK(bo);
4692	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4693	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4694		bo->bo_flag &= ~BO_WWAIT;
4695		wakeup(&bo->bo_numoutput);
4696	}
4697	BO_UNLOCK(bo);
4698}
4699
4700int
4701bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4702{
4703	int error;
4704
4705	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4706	ASSERT_BO_WLOCKED(bo);
4707	error = 0;
4708	while (bo->bo_numoutput) {
4709		bo->bo_flag |= BO_WWAIT;
4710		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
4711		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4712		if (error)
4713			break;
4714	}
4715	return (error);
4716}
4717
4718void
4719bpin(struct buf *bp)
4720{
4721	struct mtx *mtxp;
4722
4723	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4724	mtx_lock(mtxp);
4725	bp->b_pin_count++;
4726	mtx_unlock(mtxp);
4727}
4728
4729void
4730bunpin(struct buf *bp)
4731{
4732	struct mtx *mtxp;
4733
4734	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4735	mtx_lock(mtxp);
4736	if (--bp->b_pin_count == 0)
4737		wakeup(bp);
4738	mtx_unlock(mtxp);
4739}
4740
4741void
4742bunpin_wait(struct buf *bp)
4743{
4744	struct mtx *mtxp;
4745
4746	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4747	mtx_lock(mtxp);
4748	while (bp->b_pin_count > 0)
4749		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4750	mtx_unlock(mtxp);
4751}
4752
4753/*
4754 * Set bio_data or bio_ma for struct bio from the struct buf.
4755 */
4756void
4757bdata2bio(struct buf *bp, struct bio *bip)
4758{
4759
4760	if (!buf_mapped(bp)) {
4761		KASSERT(unmapped_buf_allowed, ("unmapped"));
4762		bip->bio_ma = bp->b_pages;
4763		bip->bio_ma_n = bp->b_npages;
4764		bip->bio_data = unmapped_buf;
4765		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4766		bip->bio_flags |= BIO_UNMAPPED;
4767		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4768		    PAGE_SIZE == bp->b_npages,
4769		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4770		    (long long)bip->bio_length, bip->bio_ma_n));
4771	} else {
4772		bip->bio_data = bp->b_data;
4773		bip->bio_ma = NULL;
4774	}
4775}
4776
4777static int buf_pager_relbuf;
4778SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
4779    &buf_pager_relbuf, 0,
4780    "Make buffer pager release buffers after reading");
4781
4782/*
4783 * The buffer pager.  It uses buffer reads to validate pages.
4784 *
4785 * In contrast to the generic local pager from vm/vnode_pager.c, this
4786 * pager correctly and easily handles volumes where the underlying
4787 * device block size is greater than the machine page size.  The
4788 * buffer cache transparently extends the requested page run to be
4789 * aligned at the block boundary, and does the necessary bogus page
4790 * replacements in the addends to avoid obliterating already valid
4791 * pages.
4792 *
4793 * The only non-trivial issue is that the exclusive busy state for
4794 * pages, which is assumed by the vm_pager_getpages() interface, is
4795 * incompatible with the VMIO buffer cache's desire to share-busy the
4796 * pages.  This function performs a trivial downgrade of the pages'
4797 * state before reading buffers, and a less trivial upgrade from the
4798 * shared-busy to excl-busy state after the read.
4799 */
4800int
4801vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
4802    int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
4803    vbg_get_blksize_t get_blksize)
4804{
4805	vm_page_t m;
4806	vm_object_t object;
4807	struct buf *bp;
4808	struct mount *mp;
4809	daddr_t lbn, lbnp;
4810	vm_ooffset_t la, lb, poff, poffe;
4811	long bsize;
4812	int bo_bs, br_flags, error, i, pgsin, pgsin_a, pgsin_b;
4813	bool redo, lpart;
4814
4815	object = vp->v_object;
4816	mp = vp->v_mount;
4817	la = IDX_TO_OFF(ma[count - 1]->pindex);
4818	if (la >= object->un_pager.vnp.vnp_size)
4819		return (VM_PAGER_BAD);
4820
4821	/*
4822	 * Change the meaning of la from where the last requested page starts
4823	 * to where it ends, because that's the end of the requested region
4824	 * and the start of the potential read-ahead region.
4825	 */
4826	la += PAGE_SIZE;
4827	lpart = la > object->un_pager.vnp.vnp_size;
4828	bo_bs = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)));
4829
4830	/*
4831	 * Calculate read-ahead, behind and total pages.
4832	 */
4833	pgsin = count;
4834	lb = IDX_TO_OFF(ma[0]->pindex);
4835	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
4836	pgsin += pgsin_b;
4837	if (rbehind != NULL)
4838		*rbehind = pgsin_b;
4839	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
4840	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
4841		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
4842		    PAGE_SIZE) - la);
4843	pgsin += pgsin_a;
4844	if (rahead != NULL)
4845		*rahead = pgsin_a;
4846	PCPU_INC(cnt.v_vnodein);
4847	PCPU_ADD(cnt.v_vnodepgsin, pgsin);
4848
4849	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
4850	    != 0) ? GB_UNMAPPED : 0;
4851	VM_OBJECT_WLOCK(object);
4852again:
4853	for (i = 0; i < count; i++)
4854		vm_page_busy_downgrade(ma[i]);
4855	VM_OBJECT_WUNLOCK(object);
4856
4857	lbnp = -1;
4858	for (i = 0; i < count; i++) {
4859		m = ma[i];
4860
4861		/*
4862		 * Pages are shared busy and the object lock is not
4863		 * owned, which together allow for the pages'
4864		 * invalidation.  The racy test for validity avoids
4865		 * useless creation of the buffer for the most typical
4866		 * case when invalidation is not used in redo or for
4867		 * parallel read.  The shared->excl upgrade loop at
4868		 * the end of the function catches the race in a
4869		 * reliable way (protected by the object lock).
4870		 */
4871		if (m->valid == VM_PAGE_BITS_ALL)
4872			continue;
4873
4874		poff = IDX_TO_OFF(m->pindex);
4875		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
4876		for (; poff < poffe; poff += bsize) {
4877			lbn = get_lblkno(vp, poff);
4878			if (lbn == lbnp)
4879				goto next_page;
4880			lbnp = lbn;
4881
4882			bsize = get_blksize(vp, lbn);
4883			error = bread_gb(vp, lbn, bsize, curthread->td_ucred,
4884			    br_flags, &bp);
4885			if (error != 0)
4886				goto end_pages;
4887			if (LIST_EMPTY(&bp->b_dep)) {
4888				/*
4889				 * Invalidation clears m->valid, but
4890				 * may leave B_CACHE flag if the
4891				 * buffer existed at the invalidation
4892				 * time.  In this case, recycle the
4893				 * buffer to do real read on next
4894				 * bread() after redo.
4895				 *
4896				 * Otherwise B_RELBUF is not strictly
4897				 * necessary, enable to reduce buf
4898				 * cache pressure.
4899				 */
4900				if (buf_pager_relbuf ||
4901				    m->valid != VM_PAGE_BITS_ALL)
4902					bp->b_flags |= B_RELBUF;
4903
4904				bp->b_flags &= ~B_NOCACHE;
4905				brelse(bp);
4906			} else {
4907				bqrelse(bp);
4908			}
4909		}
4910		KASSERT(1 /* racy, enable for debugging */ ||
4911		    m->valid == VM_PAGE_BITS_ALL || i == count - 1,
4912		    ("buf %d %p invalid", i, m));
4913		if (i == count - 1 && lpart) {
4914			VM_OBJECT_WLOCK(object);
4915			if (m->valid != 0 &&
4916			    m->valid != VM_PAGE_BITS_ALL)
4917				vm_page_zero_invalid(m, TRUE);
4918			VM_OBJECT_WUNLOCK(object);
4919		}
4920next_page:;
4921	}
4922end_pages:
4923
4924	VM_OBJECT_WLOCK(object);
4925	redo = false;
4926	for (i = 0; i < count; i++) {
4927		vm_page_sunbusy(ma[i]);
4928		ma[i] = vm_page_grab(object, ma[i]->pindex, VM_ALLOC_NORMAL);
4929
4930		/*
4931		 * Since the pages were only sbusy while neither the
4932		 * buffer nor the object lock was held by us, or
4933		 * reallocated while vm_page_grab() slept for busy
4934		 * relinguish, they could have been invalidated.
4935		 * Recheck the valid bits and re-read as needed.
4936		 *
4937		 * Note that the last page is made fully valid in the
4938		 * read loop, and partial validity for the page at
4939		 * index count - 1 could mean that the page was
4940		 * invalidated or removed, so we must restart for
4941		 * safety as well.
4942		 */
4943		if (ma[i]->valid != VM_PAGE_BITS_ALL)
4944			redo = true;
4945	}
4946	if (redo && error == 0)
4947		goto again;
4948	VM_OBJECT_WUNLOCK(object);
4949	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
4950}
4951
4952#include "opt_ddb.h"
4953#ifdef DDB
4954#include <ddb/ddb.h>
4955
4956/* DDB command to show buffer data */
4957DB_SHOW_COMMAND(buffer, db_show_buffer)
4958{
4959	/* get args */
4960	struct buf *bp = (struct buf *)addr;
4961
4962	if (!have_addr) {
4963		db_printf("usage: show buffer <addr>\n");
4964		return;
4965	}
4966
4967	db_printf("buf at %p\n", bp);
4968	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4969	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4970	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4971	db_printf(
4972	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4973	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4974	    "b_dep = %p\n",
4975	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4976	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4977	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4978	db_printf("b_kvabase = %p, b_kvasize = %d\n",
4979	    bp->b_kvabase, bp->b_kvasize);
4980	if (bp->b_npages) {
4981		int i;
4982		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4983		for (i = 0; i < bp->b_npages; i++) {
4984			vm_page_t m;
4985			m = bp->b_pages[i];
4986			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4987			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4988			if ((i + 1) < bp->b_npages)
4989				db_printf(",");
4990		}
4991		db_printf("\n");
4992	}
4993	db_printf(" ");
4994	BUF_LOCKPRINTINFO(bp);
4995}
4996
4997DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4998{
4999	struct buf *bp;
5000	int i;
5001
5002	for (i = 0; i < nbuf; i++) {
5003		bp = &buf[i];
5004		if (BUF_ISLOCKED(bp)) {
5005			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5006			db_printf("\n");
5007			if (db_pager_quit)
5008				break;
5009		}
5010	}
5011}
5012
5013DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5014{
5015	struct vnode *vp;
5016	struct buf *bp;
5017
5018	if (!have_addr) {
5019		db_printf("usage: show vnodebufs <addr>\n");
5020		return;
5021	}
5022	vp = (struct vnode *)addr;
5023	db_printf("Clean buffers:\n");
5024	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5025		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5026		db_printf("\n");
5027	}
5028	db_printf("Dirty buffers:\n");
5029	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5030		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5031		db_printf("\n");
5032	}
5033}
5034
5035DB_COMMAND(countfreebufs, db_coundfreebufs)
5036{
5037	struct buf *bp;
5038	int i, used = 0, nfree = 0;
5039
5040	if (have_addr) {
5041		db_printf("usage: countfreebufs\n");
5042		return;
5043	}
5044
5045	for (i = 0; i < nbuf; i++) {
5046		bp = &buf[i];
5047		if (bp->b_qindex == QUEUE_EMPTY)
5048			nfree++;
5049		else
5050			used++;
5051	}
5052
5053	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5054	    nfree + used);
5055	db_printf("numfreebuffers is %d\n", numfreebuffers);
5056}
5057#endif /* DDB */
5058