vfs_bio.c revision 316073
1/*-
2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Konstantin Belousov
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * this file contains a new buffer I/O scheme implementing a coherent
34 * VM object and buffer cache scheme.  Pains have been taken to make
35 * sure that the performance degradation associated with schemes such
36 * as this is not realized.
37 *
38 * Author:  John S. Dyson
39 * Significant help during the development and debugging phases
40 * had been provided by David Greenman, also of the FreeBSD core team.
41 *
42 * see man buf(9) for more info.
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/11/sys/kern/vfs_bio.c 316073 2017-03-28 06:07:59Z kib $");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/bio.h>
51#include <sys/conf.h>
52#include <sys/buf.h>
53#include <sys/devicestat.h>
54#include <sys/eventhandler.h>
55#include <sys/fail.h>
56#include <sys/limits.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mount.h>
60#include <sys/mutex.h>
61#include <sys/kernel.h>
62#include <sys/kthread.h>
63#include <sys/proc.h>
64#include <sys/racct.h>
65#include <sys/resourcevar.h>
66#include <sys/rwlock.h>
67#include <sys/smp.h>
68#include <sys/sysctl.h>
69#include <sys/sysproto.h>
70#include <sys/vmem.h>
71#include <sys/vmmeter.h>
72#include <sys/vnode.h>
73#include <sys/watchdog.h>
74#include <geom/geom.h>
75#include <vm/vm.h>
76#include <vm/vm_param.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_object.h>
79#include <vm/vm_page.h>
80#include <vm/vm_pageout.h>
81#include <vm/vm_pager.h>
82#include <vm/vm_extern.h>
83#include <vm/vm_map.h>
84#include <vm/swap_pager.h>
85#include "opt_compat.h"
86#include "opt_swap.h"
87
88static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
89
90struct	bio_ops bioops;		/* I/O operation notification */
91
92struct	buf_ops buf_ops_bio = {
93	.bop_name	=	"buf_ops_bio",
94	.bop_write	=	bufwrite,
95	.bop_strategy	=	bufstrategy,
96	.bop_sync	=	bufsync,
97	.bop_bdflush	=	bufbdflush,
98};
99
100static struct buf *buf;		/* buffer header pool */
101extern struct buf *swbuf;	/* Swap buffer header pool. */
102caddr_t unmapped_buf;
103
104/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
105struct proc *bufdaemonproc;
106struct proc *bufspacedaemonproc;
107
108static int inmem(struct vnode *vp, daddr_t blkno);
109static void vm_hold_free_pages(struct buf *bp, int newbsize);
110static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
111		vm_offset_t to);
112static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
113static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
114		vm_page_t m);
115static void vfs_clean_pages_dirty_buf(struct buf *bp);
116static void vfs_setdirty_locked_object(struct buf *bp);
117static void vfs_vmio_invalidate(struct buf *bp);
118static void vfs_vmio_truncate(struct buf *bp, int npages);
119static void vfs_vmio_extend(struct buf *bp, int npages, int size);
120static int vfs_bio_clcheck(struct vnode *vp, int size,
121		daddr_t lblkno, daddr_t blkno);
122static int buf_flush(struct vnode *vp, int);
123static int buf_recycle(bool);
124static int buf_scan(bool);
125static int flushbufqueues(struct vnode *, int, int);
126static void buf_daemon(void);
127static void bremfreel(struct buf *bp);
128static __inline void bd_wakeup(void);
129static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
130static void bufkva_reclaim(vmem_t *, int);
131static void bufkva_free(struct buf *);
132static int buf_import(void *, void **, int, int);
133static void buf_release(void *, void **, int);
134
135#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
136    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
137static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
138#endif
139
140int vmiodirenable = TRUE;
141SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
142    "Use the VM system for directory writes");
143long runningbufspace;
144SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
145    "Amount of presently outstanding async buffer io");
146static long bufspace;
147#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
148    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
149SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
150    &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
151#else
152SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
153    "Physical memory used for buffers");
154#endif
155static long bufkvaspace;
156SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0,
157    "Kernel virtual memory used for buffers");
158static long maxbufspace;
159SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, &maxbufspace, 0,
160    "Maximum allowed value of bufspace (including metadata)");
161static long bufmallocspace;
162SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
163    "Amount of malloced memory for buffers");
164static long maxbufmallocspace;
165SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
166    0, "Maximum amount of malloced memory for buffers");
167static long lobufspace;
168SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RW, &lobufspace, 0,
169    "Minimum amount of buffers we want to have");
170long hibufspace;
171SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RW, &hibufspace, 0,
172    "Maximum allowed value of bufspace (excluding metadata)");
173long bufspacethresh;
174SYSCTL_LONG(_vfs, OID_AUTO, bufspacethresh, CTLFLAG_RW, &bufspacethresh,
175    0, "Bufspace consumed before waking the daemon to free some");
176static int buffreekvacnt;
177SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
178    "Number of times we have freed the KVA space from some buffer");
179static int bufdefragcnt;
180SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
181    "Number of times we have had to repeat buffer allocation to defragment");
182static long lorunningspace;
183SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
184    CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
185    "Minimum preferred space used for in-progress I/O");
186static long hirunningspace;
187SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
188    CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
189    "Maximum amount of space to use for in-progress I/O");
190int dirtybufferflushes;
191SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
192    0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
193int bdwriteskip;
194SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
195    0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
196int altbufferflushes;
197SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
198    0, "Number of fsync flushes to limit dirty buffers");
199static int recursiveflushes;
200SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
201    0, "Number of flushes skipped due to being recursive");
202static int numdirtybuffers;
203SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
204    "Number of buffers that are dirty (has unwritten changes) at the moment");
205static int lodirtybuffers;
206SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
207    "How many buffers we want to have free before bufdaemon can sleep");
208static int hidirtybuffers;
209SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
210    "When the number of dirty buffers is considered severe");
211int dirtybufthresh;
212SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
213    0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
214static int numfreebuffers;
215SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
216    "Number of free buffers");
217static int lofreebuffers;
218SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
219   "Target number of free buffers");
220static int hifreebuffers;
221SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
222   "Threshold for clean buffer recycling");
223static int getnewbufcalls;
224SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
225   "Number of calls to getnewbuf");
226static int getnewbufrestarts;
227SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
228    "Number of times getnewbuf has had to restart a buffer acquisition");
229static int mappingrestarts;
230SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
231    "Number of times getblk has had to restart a buffer mapping for "
232    "unmapped buffer");
233static int numbufallocfails;
234SYSCTL_INT(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW, &numbufallocfails, 0,
235    "Number of times buffer allocations failed");
236static int flushbufqtarget = 100;
237SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
238    "Amount of work to do in flushbufqueues when helping bufdaemon");
239static long notbufdflushes;
240SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes, 0,
241    "Number of dirty buffer flushes done by the bufdaemon helpers");
242static long barrierwrites;
243SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
244    "Number of barrier writes");
245SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
246    &unmapped_buf_allowed, 0,
247    "Permit the use of the unmapped i/o");
248
249/*
250 * This lock synchronizes access to bd_request.
251 */
252static struct mtx_padalign bdlock;
253
254/*
255 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
256 * waitrunningbufspace().
257 */
258static struct mtx_padalign rbreqlock;
259
260/*
261 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
262 */
263static struct rwlock_padalign nblock;
264
265/*
266 * Lock that protects bdirtywait.
267 */
268static struct mtx_padalign bdirtylock;
269
270/*
271 * Wakeup point for bufdaemon, as well as indicator of whether it is already
272 * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
273 * is idling.
274 */
275static int bd_request;
276
277/*
278 * Request/wakeup point for the bufspace daemon.
279 */
280static int bufspace_request;
281
282/*
283 * Request for the buf daemon to write more buffers than is indicated by
284 * lodirtybuf.  This may be necessary to push out excess dependencies or
285 * defragment the address space where a simple count of the number of dirty
286 * buffers is insufficient to characterize the demand for flushing them.
287 */
288static int bd_speedupreq;
289
290/*
291 * bogus page -- for I/O to/from partially complete buffers
292 * this is a temporary solution to the problem, but it is not
293 * really that bad.  it would be better to split the buffer
294 * for input in the case of buffers partially already in memory,
295 * but the code is intricate enough already.
296 */
297vm_page_t bogus_page;
298
299/*
300 * Synchronization (sleep/wakeup) variable for active buffer space requests.
301 * Set when wait starts, cleared prior to wakeup().
302 * Used in runningbufwakeup() and waitrunningbufspace().
303 */
304static int runningbufreq;
305
306/*
307 * Synchronization (sleep/wakeup) variable for buffer requests.
308 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
309 * by and/or.
310 * Used in numdirtywakeup(), bufspace_wakeup(), bwillwrite(),
311 * getnewbuf(), and getblk().
312 */
313static volatile int needsbuffer;
314
315/*
316 * Synchronization for bwillwrite() waiters.
317 */
318static int bdirtywait;
319
320/*
321 * Definitions for the buffer free lists.
322 */
323#define QUEUE_NONE	0	/* on no queue */
324#define QUEUE_EMPTY	1	/* empty buffer headers */
325#define QUEUE_DIRTY	2	/* B_DELWRI buffers */
326#define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
327#define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
328
329/* Maximum number of clean buffer queues. */
330#define	CLEAN_QUEUES	16
331
332/* Configured number of clean queues. */
333static int clean_queues;
334
335/* Maximum number of buffer queues. */
336#define BUFFER_QUEUES	(QUEUE_CLEAN + CLEAN_QUEUES)
337
338/* Queues for free buffers with various properties */
339static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
340#ifdef INVARIANTS
341static int bq_len[BUFFER_QUEUES];
342#endif
343
344/*
345 * Lock for each bufqueue
346 */
347static struct mtx_padalign bqlocks[BUFFER_QUEUES];
348
349/*
350 * per-cpu empty buffer cache.
351 */
352uma_zone_t buf_zone;
353
354/*
355 * Single global constant for BUF_WMESG, to avoid getting multiple references.
356 * buf_wmesg is referred from macros.
357 */
358const char *buf_wmesg = BUF_WMESG;
359
360static int
361sysctl_runningspace(SYSCTL_HANDLER_ARGS)
362{
363	long value;
364	int error;
365
366	value = *(long *)arg1;
367	error = sysctl_handle_long(oidp, &value, 0, req);
368	if (error != 0 || req->newptr == NULL)
369		return (error);
370	mtx_lock(&rbreqlock);
371	if (arg1 == &hirunningspace) {
372		if (value < lorunningspace)
373			error = EINVAL;
374		else
375			hirunningspace = value;
376	} else {
377		KASSERT(arg1 == &lorunningspace,
378		    ("%s: unknown arg1", __func__));
379		if (value > hirunningspace)
380			error = EINVAL;
381		else
382			lorunningspace = value;
383	}
384	mtx_unlock(&rbreqlock);
385	return (error);
386}
387
388#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
389    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
390static int
391sysctl_bufspace(SYSCTL_HANDLER_ARGS)
392{
393	long lvalue;
394	int ivalue;
395
396	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
397		return (sysctl_handle_long(oidp, arg1, arg2, req));
398	lvalue = *(long *)arg1;
399	if (lvalue > INT_MAX)
400		/* On overflow, still write out a long to trigger ENOMEM. */
401		return (sysctl_handle_long(oidp, &lvalue, 0, req));
402	ivalue = lvalue;
403	return (sysctl_handle_int(oidp, &ivalue, 0, req));
404}
405#endif
406
407static int
408bqcleanq(void)
409{
410	static int nextq;
411
412	return ((atomic_fetchadd_int(&nextq, 1) % clean_queues) + QUEUE_CLEAN);
413}
414
415static int
416bqisclean(int qindex)
417{
418
419	return (qindex >= QUEUE_CLEAN && qindex < QUEUE_CLEAN + CLEAN_QUEUES);
420}
421
422/*
423 *	bqlock:
424 *
425 *	Return the appropriate queue lock based on the index.
426 */
427static inline struct mtx *
428bqlock(int qindex)
429{
430
431	return (struct mtx *)&bqlocks[qindex];
432}
433
434/*
435 *	bdirtywakeup:
436 *
437 *	Wakeup any bwillwrite() waiters.
438 */
439static void
440bdirtywakeup(void)
441{
442	mtx_lock(&bdirtylock);
443	if (bdirtywait) {
444		bdirtywait = 0;
445		wakeup(&bdirtywait);
446	}
447	mtx_unlock(&bdirtylock);
448}
449
450/*
451 *	bdirtysub:
452 *
453 *	Decrement the numdirtybuffers count by one and wakeup any
454 *	threads blocked in bwillwrite().
455 */
456static void
457bdirtysub(void)
458{
459
460	if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
461	    (lodirtybuffers + hidirtybuffers) / 2)
462		bdirtywakeup();
463}
464
465/*
466 *	bdirtyadd:
467 *
468 *	Increment the numdirtybuffers count by one and wakeup the buf
469 *	daemon if needed.
470 */
471static void
472bdirtyadd(void)
473{
474
475	/*
476	 * Only do the wakeup once as we cross the boundary.  The
477	 * buf daemon will keep running until the condition clears.
478	 */
479	if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
480	    (lodirtybuffers + hidirtybuffers) / 2)
481		bd_wakeup();
482}
483
484/*
485 *	bufspace_wakeup:
486 *
487 *	Called when buffer space is potentially available for recovery.
488 *	getnewbuf() will block on this flag when it is unable to free
489 *	sufficient buffer space.  Buffer space becomes recoverable when
490 *	bp's get placed back in the queues.
491 */
492static void
493bufspace_wakeup(void)
494{
495
496	/*
497	 * If someone is waiting for bufspace, wake them up.
498	 *
499	 * Since needsbuffer is set prior to doing an additional queue
500	 * scan it is safe to check for the flag prior to acquiring the
501	 * lock.  The thread that is preparing to scan again before
502	 * blocking would discover the buf we released.
503	 */
504	if (needsbuffer) {
505		rw_rlock(&nblock);
506		if (atomic_cmpset_int(&needsbuffer, 1, 0) == 1)
507			wakeup(__DEVOLATILE(void *, &needsbuffer));
508		rw_runlock(&nblock);
509	}
510}
511
512/*
513 *	bufspace_daemonwakeup:
514 *
515 *	Wakeup the daemon responsible for freeing clean bufs.
516 */
517static void
518bufspace_daemonwakeup(void)
519{
520	rw_rlock(&nblock);
521	if (bufspace_request == 0) {
522		bufspace_request = 1;
523		wakeup(&bufspace_request);
524	}
525	rw_runlock(&nblock);
526}
527
528/*
529 *	bufspace_adjust:
530 *
531 *	Adjust the reported bufspace for a KVA managed buffer, possibly
532 * 	waking any waiters.
533 */
534static void
535bufspace_adjust(struct buf *bp, int bufsize)
536{
537	long space;
538	int diff;
539
540	KASSERT((bp->b_flags & B_MALLOC) == 0,
541	    ("bufspace_adjust: malloc buf %p", bp));
542	diff = bufsize - bp->b_bufsize;
543	if (diff < 0) {
544		atomic_subtract_long(&bufspace, -diff);
545		bufspace_wakeup();
546	} else {
547		space = atomic_fetchadd_long(&bufspace, diff);
548		/* Wake up the daemon on the transition. */
549		if (space < bufspacethresh && space + diff >= bufspacethresh)
550			bufspace_daemonwakeup();
551	}
552	bp->b_bufsize = bufsize;
553}
554
555/*
556 *	bufspace_reserve:
557 *
558 *	Reserve bufspace before calling allocbuf().  metadata has a
559 *	different space limit than data.
560 */
561static int
562bufspace_reserve(int size, bool metadata)
563{
564	long limit;
565	long space;
566
567	if (metadata)
568		limit = maxbufspace;
569	else
570		limit = hibufspace;
571	do {
572		space = bufspace;
573		if (space + size > limit)
574			return (ENOSPC);
575	} while (atomic_cmpset_long(&bufspace, space, space + size) == 0);
576
577	/* Wake up the daemon on the transition. */
578	if (space < bufspacethresh && space + size >= bufspacethresh)
579		bufspace_daemonwakeup();
580
581	return (0);
582}
583
584/*
585 *	bufspace_release:
586 *
587 *	Release reserved bufspace after bufspace_adjust() has consumed it.
588 */
589static void
590bufspace_release(int size)
591{
592	atomic_subtract_long(&bufspace, size);
593	bufspace_wakeup();
594}
595
596/*
597 *	bufspace_wait:
598 *
599 *	Wait for bufspace, acting as the buf daemon if a locked vnode is
600 *	supplied.  needsbuffer must be set in a safe fashion prior to
601 *	polling for space.  The operation must be re-tried on return.
602 */
603static void
604bufspace_wait(struct vnode *vp, int gbflags, int slpflag, int slptimeo)
605{
606	struct thread *td;
607	int error, fl, norunbuf;
608
609	if ((gbflags & GB_NOWAIT_BD) != 0)
610		return;
611
612	td = curthread;
613	rw_wlock(&nblock);
614	while (needsbuffer != 0) {
615		if (vp != NULL && vp->v_type != VCHR &&
616		    (td->td_pflags & TDP_BUFNEED) == 0) {
617			rw_wunlock(&nblock);
618			/*
619			 * getblk() is called with a vnode locked, and
620			 * some majority of the dirty buffers may as
621			 * well belong to the vnode.  Flushing the
622			 * buffers there would make a progress that
623			 * cannot be achieved by the buf_daemon, that
624			 * cannot lock the vnode.
625			 */
626			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
627			    (td->td_pflags & TDP_NORUNNINGBUF);
628
629			/*
630			 * Play bufdaemon.  The getnewbuf() function
631			 * may be called while the thread owns lock
632			 * for another dirty buffer for the same
633			 * vnode, which makes it impossible to use
634			 * VOP_FSYNC() there, due to the buffer lock
635			 * recursion.
636			 */
637			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
638			fl = buf_flush(vp, flushbufqtarget);
639			td->td_pflags &= norunbuf;
640			rw_wlock(&nblock);
641			if (fl != 0)
642				continue;
643			if (needsbuffer == 0)
644				break;
645		}
646		error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
647		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
648		if (error != 0)
649			break;
650	}
651	rw_wunlock(&nblock);
652}
653
654
655/*
656 *	bufspace_daemon:
657 *
658 *	buffer space management daemon.  Tries to maintain some marginal
659 *	amount of free buffer space so that requesting processes neither
660 *	block nor work to reclaim buffers.
661 */
662static void
663bufspace_daemon(void)
664{
665	for (;;) {
666		kproc_suspend_check(bufspacedaemonproc);
667
668		/*
669		 * Free buffers from the clean queue until we meet our
670		 * targets.
671		 *
672		 * Theory of operation:  The buffer cache is most efficient
673		 * when some free buffer headers and space are always
674		 * available to getnewbuf().  This daemon attempts to prevent
675		 * the excessive blocking and synchronization associated
676		 * with shortfall.  It goes through three phases according
677		 * demand:
678		 *
679		 * 1)	The daemon wakes up voluntarily once per-second
680		 *	during idle periods when the counters are below
681		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
682		 *
683		 * 2)	The daemon wakes up as we cross the thresholds
684		 *	ahead of any potential blocking.  This may bounce
685		 *	slightly according to the rate of consumption and
686		 *	release.
687		 *
688		 * 3)	The daemon and consumers are starved for working
689		 *	clean buffers.  This is the 'bufspace' sleep below
690		 *	which will inefficiently trade bufs with bqrelse
691		 *	until we return to condition 2.
692		 */
693		while (bufspace > lobufspace ||
694		    numfreebuffers < hifreebuffers) {
695			if (buf_recycle(false) != 0) {
696				atomic_set_int(&needsbuffer, 1);
697				if (buf_recycle(false) != 0) {
698					rw_wlock(&nblock);
699					if (needsbuffer)
700						rw_sleep(__DEVOLATILE(void *,
701						    &needsbuffer), &nblock,
702						    PRIBIO|PDROP, "bufspace",
703						    hz/10);
704					else
705						rw_wunlock(&nblock);
706				}
707			}
708			maybe_yield();
709		}
710
711		/*
712		 * Re-check our limits under the exclusive nblock.
713		 */
714		rw_wlock(&nblock);
715		if (bufspace < bufspacethresh &&
716		    numfreebuffers > lofreebuffers) {
717			bufspace_request = 0;
718			rw_sleep(&bufspace_request, &nblock, PRIBIO|PDROP,
719			    "-", hz);
720		} else
721			rw_wunlock(&nblock);
722	}
723}
724
725static struct kproc_desc bufspace_kp = {
726	"bufspacedaemon",
727	bufspace_daemon,
728	&bufspacedaemonproc
729};
730SYSINIT(bufspacedaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start,
731    &bufspace_kp);
732
733/*
734 *	bufmallocadjust:
735 *
736 *	Adjust the reported bufspace for a malloc managed buffer, possibly
737 *	waking any waiters.
738 */
739static void
740bufmallocadjust(struct buf *bp, int bufsize)
741{
742	int diff;
743
744	KASSERT((bp->b_flags & B_MALLOC) != 0,
745	    ("bufmallocadjust: non-malloc buf %p", bp));
746	diff = bufsize - bp->b_bufsize;
747	if (diff < 0)
748		atomic_subtract_long(&bufmallocspace, -diff);
749	else
750		atomic_add_long(&bufmallocspace, diff);
751	bp->b_bufsize = bufsize;
752}
753
754/*
755 *	runningwakeup:
756 *
757 *	Wake up processes that are waiting on asynchronous writes to fall
758 *	below lorunningspace.
759 */
760static void
761runningwakeup(void)
762{
763
764	mtx_lock(&rbreqlock);
765	if (runningbufreq) {
766		runningbufreq = 0;
767		wakeup(&runningbufreq);
768	}
769	mtx_unlock(&rbreqlock);
770}
771
772/*
773 *	runningbufwakeup:
774 *
775 *	Decrement the outstanding write count according.
776 */
777void
778runningbufwakeup(struct buf *bp)
779{
780	long space, bspace;
781
782	bspace = bp->b_runningbufspace;
783	if (bspace == 0)
784		return;
785	space = atomic_fetchadd_long(&runningbufspace, -bspace);
786	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
787	    space, bspace));
788	bp->b_runningbufspace = 0;
789	/*
790	 * Only acquire the lock and wakeup on the transition from exceeding
791	 * the threshold to falling below it.
792	 */
793	if (space < lorunningspace)
794		return;
795	if (space - bspace > lorunningspace)
796		return;
797	runningwakeup();
798}
799
800/*
801 *	waitrunningbufspace()
802 *
803 *	runningbufspace is a measure of the amount of I/O currently
804 *	running.  This routine is used in async-write situations to
805 *	prevent creating huge backups of pending writes to a device.
806 *	Only asynchronous writes are governed by this function.
807 *
808 *	This does NOT turn an async write into a sync write.  It waits
809 *	for earlier writes to complete and generally returns before the
810 *	caller's write has reached the device.
811 */
812void
813waitrunningbufspace(void)
814{
815
816	mtx_lock(&rbreqlock);
817	while (runningbufspace > hirunningspace) {
818		runningbufreq = 1;
819		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
820	}
821	mtx_unlock(&rbreqlock);
822}
823
824
825/*
826 *	vfs_buf_test_cache:
827 *
828 *	Called when a buffer is extended.  This function clears the B_CACHE
829 *	bit if the newly extended portion of the buffer does not contain
830 *	valid data.
831 */
832static __inline void
833vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
834    vm_offset_t size, vm_page_t m)
835{
836
837	VM_OBJECT_ASSERT_LOCKED(m->object);
838	if (bp->b_flags & B_CACHE) {
839		int base = (foff + off) & PAGE_MASK;
840		if (vm_page_is_valid(m, base, size) == 0)
841			bp->b_flags &= ~B_CACHE;
842	}
843}
844
845/* Wake up the buffer daemon if necessary */
846static __inline void
847bd_wakeup(void)
848{
849
850	mtx_lock(&bdlock);
851	if (bd_request == 0) {
852		bd_request = 1;
853		wakeup(&bd_request);
854	}
855	mtx_unlock(&bdlock);
856}
857
858/*
859 * bd_speedup - speedup the buffer cache flushing code
860 */
861void
862bd_speedup(void)
863{
864	int needwake;
865
866	mtx_lock(&bdlock);
867	needwake = 0;
868	if (bd_speedupreq == 0 || bd_request == 0)
869		needwake = 1;
870	bd_speedupreq = 1;
871	bd_request = 1;
872	if (needwake)
873		wakeup(&bd_request);
874	mtx_unlock(&bdlock);
875}
876
877#ifndef NSWBUF_MIN
878#define	NSWBUF_MIN	16
879#endif
880
881#ifdef __i386__
882#define	TRANSIENT_DENOM	5
883#else
884#define	TRANSIENT_DENOM 10
885#endif
886
887/*
888 * Calculating buffer cache scaling values and reserve space for buffer
889 * headers.  This is called during low level kernel initialization and
890 * may be called more then once.  We CANNOT write to the memory area
891 * being reserved at this time.
892 */
893caddr_t
894kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
895{
896	int tuned_nbuf;
897	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
898
899	/*
900	 * physmem_est is in pages.  Convert it to kilobytes (assumes
901	 * PAGE_SIZE is >= 1K)
902	 */
903	physmem_est = physmem_est * (PAGE_SIZE / 1024);
904
905	/*
906	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
907	 * For the first 64MB of ram nominally allocate sufficient buffers to
908	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
909	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
910	 * the buffer cache we limit the eventual kva reservation to
911	 * maxbcache bytes.
912	 *
913	 * factor represents the 1/4 x ram conversion.
914	 */
915	if (nbuf == 0) {
916		int factor = 4 * BKVASIZE / 1024;
917
918		nbuf = 50;
919		if (physmem_est > 4096)
920			nbuf += min((physmem_est - 4096) / factor,
921			    65536 / factor);
922		if (physmem_est > 65536)
923			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
924			    32 * 1024 * 1024 / (factor * 5));
925
926		if (maxbcache && nbuf > maxbcache / BKVASIZE)
927			nbuf = maxbcache / BKVASIZE;
928		tuned_nbuf = 1;
929	} else
930		tuned_nbuf = 0;
931
932	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
933	maxbuf = (LONG_MAX / 3) / BKVASIZE;
934	if (nbuf > maxbuf) {
935		if (!tuned_nbuf)
936			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
937			    maxbuf);
938		nbuf = maxbuf;
939	}
940
941	/*
942	 * Ideal allocation size for the transient bio submap is 10%
943	 * of the maximal space buffer map.  This roughly corresponds
944	 * to the amount of the buffer mapped for typical UFS load.
945	 *
946	 * Clip the buffer map to reserve space for the transient
947	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
948	 * maximum buffer map extent on the platform.
949	 *
950	 * The fall-back to the maxbuf in case of maxbcache unset,
951	 * allows to not trim the buffer KVA for the architectures
952	 * with ample KVA space.
953	 */
954	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
955		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
956		buf_sz = (long)nbuf * BKVASIZE;
957		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
958		    (TRANSIENT_DENOM - 1)) {
959			/*
960			 * There is more KVA than memory.  Do not
961			 * adjust buffer map size, and assign the rest
962			 * of maxbuf to transient map.
963			 */
964			biotmap_sz = maxbuf_sz - buf_sz;
965		} else {
966			/*
967			 * Buffer map spans all KVA we could afford on
968			 * this platform.  Give 10% (20% on i386) of
969			 * the buffer map to the transient bio map.
970			 */
971			biotmap_sz = buf_sz / TRANSIENT_DENOM;
972			buf_sz -= biotmap_sz;
973		}
974		if (biotmap_sz / INT_MAX > MAXPHYS)
975			bio_transient_maxcnt = INT_MAX;
976		else
977			bio_transient_maxcnt = biotmap_sz / MAXPHYS;
978		/*
979		 * Artificially limit to 1024 simultaneous in-flight I/Os
980		 * using the transient mapping.
981		 */
982		if (bio_transient_maxcnt > 1024)
983			bio_transient_maxcnt = 1024;
984		if (tuned_nbuf)
985			nbuf = buf_sz / BKVASIZE;
986	}
987
988	/*
989	 * swbufs are used as temporary holders for I/O, such as paging I/O.
990	 * We have no less then 16 and no more then 256.
991	 */
992	nswbuf = min(nbuf / 4, 256);
993	TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
994	if (nswbuf < NSWBUF_MIN)
995		nswbuf = NSWBUF_MIN;
996
997	/*
998	 * Reserve space for the buffer cache buffers
999	 */
1000	swbuf = (void *)v;
1001	v = (caddr_t)(swbuf + nswbuf);
1002	buf = (void *)v;
1003	v = (caddr_t)(buf + nbuf);
1004
1005	return(v);
1006}
1007
1008/* Initialize the buffer subsystem.  Called before use of any buffers. */
1009void
1010bufinit(void)
1011{
1012	struct buf *bp;
1013	int i;
1014
1015	CTASSERT(MAXBCACHEBUF >= MAXBSIZE);
1016	mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF);
1017	mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF);
1018	for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++)
1019		mtx_init(&bqlocks[i], "bufq clean lock", NULL, MTX_DEF);
1020	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1021	rw_init(&nblock, "needsbuffer lock");
1022	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1023	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1024
1025	/* next, make a null set of free lists */
1026	for (i = 0; i < BUFFER_QUEUES; i++)
1027		TAILQ_INIT(&bufqueues[i]);
1028
1029	unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
1030
1031	/* finally, initialize each buffer header and stick on empty q */
1032	for (i = 0; i < nbuf; i++) {
1033		bp = &buf[i];
1034		bzero(bp, sizeof *bp);
1035		bp->b_flags = B_INVAL;
1036		bp->b_rcred = NOCRED;
1037		bp->b_wcred = NOCRED;
1038		bp->b_qindex = QUEUE_EMPTY;
1039		bp->b_xflags = 0;
1040		bp->b_data = bp->b_kvabase = unmapped_buf;
1041		LIST_INIT(&bp->b_dep);
1042		BUF_LOCKINIT(bp);
1043		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
1044#ifdef INVARIANTS
1045		bq_len[QUEUE_EMPTY]++;
1046#endif
1047	}
1048
1049	/*
1050	 * maxbufspace is the absolute maximum amount of buffer space we are
1051	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1052	 * is nominally used by metadata.  hibufspace is the nominal maximum
1053	 * used by most other requests.  The differential is required to
1054	 * ensure that metadata deadlocks don't occur.
1055	 *
1056	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1057	 * this may result in KVM fragmentation which is not handled optimally
1058	 * by the system. XXX This is less true with vmem.  We could use
1059	 * PAGE_SIZE.
1060	 */
1061	maxbufspace = (long)nbuf * BKVASIZE;
1062	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10);
1063	lobufspace = (hibufspace / 20) * 19; /* 95% */
1064	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1065
1066	/*
1067	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1068	 * arbitrarily and may need further tuning. It corresponds to
1069	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1070	 * which fits with many RAID controllers' tagged queuing limits.
1071	 * The lower 1 MiB limit is the historical upper limit for
1072	 * hirunningspace.
1073	 */
1074	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF),
1075	    16 * 1024 * 1024), 1024 * 1024);
1076	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF);
1077
1078	/*
1079	 * Limit the amount of malloc memory since it is wired permanently into
1080	 * the kernel space.  Even though this is accounted for in the buffer
1081	 * allocation, we don't want the malloced region to grow uncontrolled.
1082	 * The malloc scheme improves memory utilization significantly on
1083	 * average (small) directories.
1084	 */
1085	maxbufmallocspace = hibufspace / 20;
1086
1087	/*
1088	 * Reduce the chance of a deadlock occurring by limiting the number
1089	 * of delayed-write dirty buffers we allow to stack up.
1090	 */
1091	hidirtybuffers = nbuf / 4 + 20;
1092	dirtybufthresh = hidirtybuffers * 9 / 10;
1093	numdirtybuffers = 0;
1094	/*
1095	 * To support extreme low-memory systems, make sure hidirtybuffers
1096	 * cannot eat up all available buffer space.  This occurs when our
1097	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1098	 * buffer space assuming BKVASIZE'd buffers.
1099	 */
1100	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1101		hidirtybuffers >>= 1;
1102	}
1103	lodirtybuffers = hidirtybuffers / 2;
1104
1105	/*
1106	 * lofreebuffers should be sufficient to avoid stalling waiting on
1107	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1108	 * are counted as free but will be unavailable to threads executing
1109	 * on other cpus.
1110	 *
1111	 * hifreebuffers is the free target for the bufspace daemon.  This
1112	 * should be set appropriately to limit work per-iteration.
1113	 */
1114	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1115	hifreebuffers = (3 * lofreebuffers) / 2;
1116	numfreebuffers = nbuf;
1117
1118	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
1119	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
1120
1121	/* Setup the kva and free list allocators. */
1122	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1123	buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
1124	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1125
1126	/*
1127	 * Size the clean queue according to the amount of buffer space.
1128	 * One queue per-256mb up to the max.  More queues gives better
1129	 * concurrency but less accurate LRU.
1130	 */
1131	clean_queues = MIN(howmany(maxbufspace, 256*1024*1024), CLEAN_QUEUES);
1132
1133}
1134
1135#ifdef INVARIANTS
1136static inline void
1137vfs_buf_check_mapped(struct buf *bp)
1138{
1139
1140	KASSERT(bp->b_kvabase != unmapped_buf,
1141	    ("mapped buf: b_kvabase was not updated %p", bp));
1142	KASSERT(bp->b_data != unmapped_buf,
1143	    ("mapped buf: b_data was not updated %p", bp));
1144	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1145	    MAXPHYS, ("b_data + b_offset unmapped %p", bp));
1146}
1147
1148static inline void
1149vfs_buf_check_unmapped(struct buf *bp)
1150{
1151
1152	KASSERT(bp->b_data == unmapped_buf,
1153	    ("unmapped buf: corrupted b_data %p", bp));
1154}
1155
1156#define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1157#define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1158#else
1159#define	BUF_CHECK_MAPPED(bp) do {} while (0)
1160#define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1161#endif
1162
1163static int
1164isbufbusy(struct buf *bp)
1165{
1166	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1167	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1168		return (1);
1169	return (0);
1170}
1171
1172/*
1173 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1174 */
1175void
1176bufshutdown(int show_busybufs)
1177{
1178	static int first_buf_printf = 1;
1179	struct buf *bp;
1180	int iter, nbusy, pbusy;
1181#ifndef PREEMPTION
1182	int subiter;
1183#endif
1184
1185	/*
1186	 * Sync filesystems for shutdown
1187	 */
1188	wdog_kern_pat(WD_LASTVAL);
1189	sys_sync(curthread, NULL);
1190
1191	/*
1192	 * With soft updates, some buffers that are
1193	 * written will be remarked as dirty until other
1194	 * buffers are written.
1195	 */
1196	for (iter = pbusy = 0; iter < 20; iter++) {
1197		nbusy = 0;
1198		for (bp = &buf[nbuf]; --bp >= buf; )
1199			if (isbufbusy(bp))
1200				nbusy++;
1201		if (nbusy == 0) {
1202			if (first_buf_printf)
1203				printf("All buffers synced.");
1204			break;
1205		}
1206		if (first_buf_printf) {
1207			printf("Syncing disks, buffers remaining... ");
1208			first_buf_printf = 0;
1209		}
1210		printf("%d ", nbusy);
1211		if (nbusy < pbusy)
1212			iter = 0;
1213		pbusy = nbusy;
1214
1215		wdog_kern_pat(WD_LASTVAL);
1216		sys_sync(curthread, NULL);
1217
1218#ifdef PREEMPTION
1219		/*
1220		 * Drop Giant and spin for a while to allow
1221		 * interrupt threads to run.
1222		 */
1223		DROP_GIANT();
1224		DELAY(50000 * iter);
1225		PICKUP_GIANT();
1226#else
1227		/*
1228		 * Drop Giant and context switch several times to
1229		 * allow interrupt threads to run.
1230		 */
1231		DROP_GIANT();
1232		for (subiter = 0; subiter < 50 * iter; subiter++) {
1233			thread_lock(curthread);
1234			mi_switch(SW_VOL, NULL);
1235			thread_unlock(curthread);
1236			DELAY(1000);
1237		}
1238		PICKUP_GIANT();
1239#endif
1240	}
1241	printf("\n");
1242	/*
1243	 * Count only busy local buffers to prevent forcing
1244	 * a fsck if we're just a client of a wedged NFS server
1245	 */
1246	nbusy = 0;
1247	for (bp = &buf[nbuf]; --bp >= buf; ) {
1248		if (isbufbusy(bp)) {
1249#if 0
1250/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1251			if (bp->b_dev == NULL) {
1252				TAILQ_REMOVE(&mountlist,
1253				    bp->b_vp->v_mount, mnt_list);
1254				continue;
1255			}
1256#endif
1257			nbusy++;
1258			if (show_busybufs > 0) {
1259				printf(
1260	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1261				    nbusy, bp, bp->b_vp, bp->b_flags,
1262				    (intmax_t)bp->b_blkno,
1263				    (intmax_t)bp->b_lblkno);
1264				BUF_LOCKPRINTINFO(bp);
1265				if (show_busybufs > 1)
1266					vn_printf(bp->b_vp,
1267					    "vnode content: ");
1268			}
1269		}
1270	}
1271	if (nbusy) {
1272		/*
1273		 * Failed to sync all blocks. Indicate this and don't
1274		 * unmount filesystems (thus forcing an fsck on reboot).
1275		 */
1276		printf("Giving up on %d buffers\n", nbusy);
1277		DELAY(5000000);	/* 5 seconds */
1278	} else {
1279		if (!first_buf_printf)
1280			printf("Final sync complete\n");
1281		/*
1282		 * Unmount filesystems
1283		 */
1284		if (panicstr == NULL)
1285			vfs_unmountall();
1286	}
1287	swapoff_all();
1288	DELAY(100000);		/* wait for console output to finish */
1289}
1290
1291static void
1292bpmap_qenter(struct buf *bp)
1293{
1294
1295	BUF_CHECK_MAPPED(bp);
1296
1297	/*
1298	 * bp->b_data is relative to bp->b_offset, but
1299	 * bp->b_offset may be offset into the first page.
1300	 */
1301	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1302	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1303	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1304	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1305}
1306
1307/*
1308 *	binsfree:
1309 *
1310 *	Insert the buffer into the appropriate free list.
1311 */
1312static void
1313binsfree(struct buf *bp, int qindex)
1314{
1315	struct mtx *olock, *nlock;
1316
1317	if (qindex != QUEUE_EMPTY) {
1318		BUF_ASSERT_XLOCKED(bp);
1319	}
1320
1321	/*
1322	 * Stick to the same clean queue for the lifetime of the buf to
1323	 * limit locking below.  Otherwise pick ont sequentially.
1324	 */
1325	if (qindex == QUEUE_CLEAN) {
1326		if (bqisclean(bp->b_qindex))
1327			qindex = bp->b_qindex;
1328		else
1329			qindex = bqcleanq();
1330	}
1331
1332	/*
1333	 * Handle delayed bremfree() processing.
1334	 */
1335	nlock = bqlock(qindex);
1336	if (bp->b_flags & B_REMFREE) {
1337		olock = bqlock(bp->b_qindex);
1338		mtx_lock(olock);
1339		bremfreel(bp);
1340		if (olock != nlock) {
1341			mtx_unlock(olock);
1342			mtx_lock(nlock);
1343		}
1344	} else
1345		mtx_lock(nlock);
1346
1347	if (bp->b_qindex != QUEUE_NONE)
1348		panic("binsfree: free buffer onto another queue???");
1349
1350	bp->b_qindex = qindex;
1351	if (bp->b_flags & B_AGE)
1352		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1353	else
1354		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1355#ifdef INVARIANTS
1356	bq_len[bp->b_qindex]++;
1357#endif
1358	mtx_unlock(nlock);
1359}
1360
1361/*
1362 * buf_free:
1363 *
1364 *	Free a buffer to the buf zone once it no longer has valid contents.
1365 */
1366static void
1367buf_free(struct buf *bp)
1368{
1369
1370	if (bp->b_flags & B_REMFREE)
1371		bremfreef(bp);
1372	if (bp->b_vflags & BV_BKGRDINPROG)
1373		panic("losing buffer 1");
1374	if (bp->b_rcred != NOCRED) {
1375		crfree(bp->b_rcred);
1376		bp->b_rcred = NOCRED;
1377	}
1378	if (bp->b_wcred != NOCRED) {
1379		crfree(bp->b_wcred);
1380		bp->b_wcred = NOCRED;
1381	}
1382	if (!LIST_EMPTY(&bp->b_dep))
1383		buf_deallocate(bp);
1384	bufkva_free(bp);
1385	BUF_UNLOCK(bp);
1386	uma_zfree(buf_zone, bp);
1387	atomic_add_int(&numfreebuffers, 1);
1388	bufspace_wakeup();
1389}
1390
1391/*
1392 * buf_import:
1393 *
1394 *	Import bufs into the uma cache from the buf list.  The system still
1395 *	expects a static array of bufs and much of the synchronization
1396 *	around bufs assumes type stable storage.  As a result, UMA is used
1397 *	only as a per-cpu cache of bufs still maintained on a global list.
1398 */
1399static int
1400buf_import(void *arg, void **store, int cnt, int flags)
1401{
1402	struct buf *bp;
1403	int i;
1404
1405	mtx_lock(&bqlocks[QUEUE_EMPTY]);
1406	for (i = 0; i < cnt; i++) {
1407		bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1408		if (bp == NULL)
1409			break;
1410		bremfreel(bp);
1411		store[i] = bp;
1412	}
1413	mtx_unlock(&bqlocks[QUEUE_EMPTY]);
1414
1415	return (i);
1416}
1417
1418/*
1419 * buf_release:
1420 *
1421 *	Release bufs from the uma cache back to the buffer queues.
1422 */
1423static void
1424buf_release(void *arg, void **store, int cnt)
1425{
1426        int i;
1427
1428        for (i = 0; i < cnt; i++)
1429		binsfree(store[i], QUEUE_EMPTY);
1430}
1431
1432/*
1433 * buf_alloc:
1434 *
1435 *	Allocate an empty buffer header.
1436 */
1437static struct buf *
1438buf_alloc(void)
1439{
1440	struct buf *bp;
1441
1442	bp = uma_zalloc(buf_zone, M_NOWAIT);
1443	if (bp == NULL) {
1444		bufspace_daemonwakeup();
1445		atomic_add_int(&numbufallocfails, 1);
1446		return (NULL);
1447	}
1448
1449	/*
1450	 * Wake-up the bufspace daemon on transition.
1451	 */
1452	if (atomic_fetchadd_int(&numfreebuffers, -1) == lofreebuffers)
1453		bufspace_daemonwakeup();
1454
1455	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1456		panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
1457
1458	KASSERT(bp->b_vp == NULL,
1459	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1460	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1461	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1462	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1463	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1464	KASSERT(bp->b_npages == 0,
1465	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1466	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1467	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1468
1469	bp->b_flags = 0;
1470	bp->b_ioflags = 0;
1471	bp->b_xflags = 0;
1472	bp->b_vflags = 0;
1473	bp->b_vp = NULL;
1474	bp->b_blkno = bp->b_lblkno = 0;
1475	bp->b_offset = NOOFFSET;
1476	bp->b_iodone = 0;
1477	bp->b_error = 0;
1478	bp->b_resid = 0;
1479	bp->b_bcount = 0;
1480	bp->b_npages = 0;
1481	bp->b_dirtyoff = bp->b_dirtyend = 0;
1482	bp->b_bufobj = NULL;
1483	bp->b_pin_count = 0;
1484	bp->b_data = bp->b_kvabase = unmapped_buf;
1485	bp->b_fsprivate1 = NULL;
1486	bp->b_fsprivate2 = NULL;
1487	bp->b_fsprivate3 = NULL;
1488	LIST_INIT(&bp->b_dep);
1489
1490	return (bp);
1491}
1492
1493/*
1494 *	buf_qrecycle:
1495 *
1496 *	Free a buffer from the given bufqueue.  kva controls whether the
1497 *	freed buf must own some kva resources.  This is used for
1498 *	defragmenting.
1499 */
1500static int
1501buf_qrecycle(int qindex, bool kva)
1502{
1503	struct buf *bp, *nbp;
1504
1505	if (kva)
1506		atomic_add_int(&bufdefragcnt, 1);
1507	nbp = NULL;
1508	mtx_lock(&bqlocks[qindex]);
1509	nbp = TAILQ_FIRST(&bufqueues[qindex]);
1510
1511	/*
1512	 * Run scan, possibly freeing data and/or kva mappings on the fly
1513	 * depending.
1514	 */
1515	while ((bp = nbp) != NULL) {
1516		/*
1517		 * Calculate next bp (we can only use it if we do not
1518		 * release the bqlock).
1519		 */
1520		nbp = TAILQ_NEXT(bp, b_freelist);
1521
1522		/*
1523		 * If we are defragging then we need a buffer with
1524		 * some kva to reclaim.
1525		 */
1526		if (kva && bp->b_kvasize == 0)
1527			continue;
1528
1529		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1530			continue;
1531
1532		/*
1533		 * Skip buffers with background writes in progress.
1534		 */
1535		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1536			BUF_UNLOCK(bp);
1537			continue;
1538		}
1539
1540		KASSERT(bp->b_qindex == qindex,
1541		    ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
1542		/*
1543		 * NOTE:  nbp is now entirely invalid.  We can only restart
1544		 * the scan from this point on.
1545		 */
1546		bremfreel(bp);
1547		mtx_unlock(&bqlocks[qindex]);
1548
1549		/*
1550		 * Requeue the background write buffer with error and
1551		 * restart the scan.
1552		 */
1553		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1554			bqrelse(bp);
1555			mtx_lock(&bqlocks[qindex]);
1556			nbp = TAILQ_FIRST(&bufqueues[qindex]);
1557			continue;
1558		}
1559		bp->b_flags |= B_INVAL;
1560		brelse(bp);
1561		return (0);
1562	}
1563	mtx_unlock(&bqlocks[qindex]);
1564
1565	return (ENOBUFS);
1566}
1567
1568/*
1569 *	buf_recycle:
1570 *
1571 *	Iterate through all clean queues until we find a buf to recycle or
1572 *	exhaust the search.
1573 */
1574static int
1575buf_recycle(bool kva)
1576{
1577	int qindex, first_qindex;
1578
1579	qindex = first_qindex = bqcleanq();
1580	do {
1581		if (buf_qrecycle(qindex, kva) == 0)
1582			return (0);
1583		if (++qindex == QUEUE_CLEAN + clean_queues)
1584			qindex = QUEUE_CLEAN;
1585	} while (qindex != first_qindex);
1586
1587	return (ENOBUFS);
1588}
1589
1590/*
1591 *	buf_scan:
1592 *
1593 *	Scan the clean queues looking for a buffer to recycle.  needsbuffer
1594 *	is set on failure so that the caller may optionally bufspace_wait()
1595 *	in a race-free fashion.
1596 */
1597static int
1598buf_scan(bool defrag)
1599{
1600	int error;
1601
1602	/*
1603	 * To avoid heavy synchronization and wakeup races we set
1604	 * needsbuffer and re-poll before failing.  This ensures that
1605	 * no frees can be missed between an unsuccessful poll and
1606	 * going to sleep in a synchronized fashion.
1607	 */
1608	if ((error = buf_recycle(defrag)) != 0) {
1609		atomic_set_int(&needsbuffer, 1);
1610		bufspace_daemonwakeup();
1611		error = buf_recycle(defrag);
1612	}
1613	if (error == 0)
1614		atomic_add_int(&getnewbufrestarts, 1);
1615	return (error);
1616}
1617
1618/*
1619 *	bremfree:
1620 *
1621 *	Mark the buffer for removal from the appropriate free list.
1622 *
1623 */
1624void
1625bremfree(struct buf *bp)
1626{
1627
1628	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1629	KASSERT((bp->b_flags & B_REMFREE) == 0,
1630	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1631	KASSERT(bp->b_qindex != QUEUE_NONE,
1632	    ("bremfree: buffer %p not on a queue.", bp));
1633	BUF_ASSERT_XLOCKED(bp);
1634
1635	bp->b_flags |= B_REMFREE;
1636}
1637
1638/*
1639 *	bremfreef:
1640 *
1641 *	Force an immediate removal from a free list.  Used only in nfs when
1642 *	it abuses the b_freelist pointer.
1643 */
1644void
1645bremfreef(struct buf *bp)
1646{
1647	struct mtx *qlock;
1648
1649	qlock = bqlock(bp->b_qindex);
1650	mtx_lock(qlock);
1651	bremfreel(bp);
1652	mtx_unlock(qlock);
1653}
1654
1655/*
1656 *	bremfreel:
1657 *
1658 *	Removes a buffer from the free list, must be called with the
1659 *	correct qlock held.
1660 */
1661static void
1662bremfreel(struct buf *bp)
1663{
1664
1665	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
1666	    bp, bp->b_vp, bp->b_flags);
1667	KASSERT(bp->b_qindex != QUEUE_NONE,
1668	    ("bremfreel: buffer %p not on a queue.", bp));
1669	if (bp->b_qindex != QUEUE_EMPTY) {
1670		BUF_ASSERT_XLOCKED(bp);
1671	}
1672	mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
1673
1674	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
1675#ifdef INVARIANTS
1676	KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
1677	    bp->b_qindex));
1678	bq_len[bp->b_qindex]--;
1679#endif
1680	bp->b_qindex = QUEUE_NONE;
1681	bp->b_flags &= ~B_REMFREE;
1682}
1683
1684/*
1685 *	bufkva_free:
1686 *
1687 *	Free the kva allocation for a buffer.
1688 *
1689 */
1690static void
1691bufkva_free(struct buf *bp)
1692{
1693
1694#ifdef INVARIANTS
1695	if (bp->b_kvasize == 0) {
1696		KASSERT(bp->b_kvabase == unmapped_buf &&
1697		    bp->b_data == unmapped_buf,
1698		    ("Leaked KVA space on %p", bp));
1699	} else if (buf_mapped(bp))
1700		BUF_CHECK_MAPPED(bp);
1701	else
1702		BUF_CHECK_UNMAPPED(bp);
1703#endif
1704	if (bp->b_kvasize == 0)
1705		return;
1706
1707	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1708	atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
1709	atomic_add_int(&buffreekvacnt, 1);
1710	bp->b_data = bp->b_kvabase = unmapped_buf;
1711	bp->b_kvasize = 0;
1712}
1713
1714/*
1715 *	bufkva_alloc:
1716 *
1717 *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
1718 */
1719static int
1720bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
1721{
1722	vm_offset_t addr;
1723	int error;
1724
1725	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
1726	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
1727
1728	bufkva_free(bp);
1729
1730	addr = 0;
1731	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
1732	if (error != 0) {
1733		/*
1734		 * Buffer map is too fragmented.  Request the caller
1735		 * to defragment the map.
1736		 */
1737		return (error);
1738	}
1739	bp->b_kvabase = (caddr_t)addr;
1740	bp->b_kvasize = maxsize;
1741	atomic_add_long(&bufkvaspace, bp->b_kvasize);
1742	if ((gbflags & GB_UNMAPPED) != 0) {
1743		bp->b_data = unmapped_buf;
1744		BUF_CHECK_UNMAPPED(bp);
1745	} else {
1746		bp->b_data = bp->b_kvabase;
1747		BUF_CHECK_MAPPED(bp);
1748	}
1749	return (0);
1750}
1751
1752/*
1753 *	bufkva_reclaim:
1754 *
1755 *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
1756 *	callback that fires to avoid returning failure.
1757 */
1758static void
1759bufkva_reclaim(vmem_t *vmem, int flags)
1760{
1761	int i;
1762
1763	for (i = 0; i < 5; i++)
1764		if (buf_scan(true) != 0)
1765			break;
1766	return;
1767}
1768
1769
1770/*
1771 * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
1772 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
1773 * the buffer is valid and we do not have to do anything.
1774 */
1775void
1776breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
1777    int cnt, struct ucred * cred)
1778{
1779	struct buf *rabp;
1780	int i;
1781
1782	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
1783		if (inmem(vp, *rablkno))
1784			continue;
1785		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
1786
1787		if ((rabp->b_flags & B_CACHE) == 0) {
1788			if (!TD_IS_IDLETHREAD(curthread)) {
1789#ifdef RACCT
1790				if (racct_enable) {
1791					PROC_LOCK(curproc);
1792					racct_add_buf(curproc, rabp, 0);
1793					PROC_UNLOCK(curproc);
1794				}
1795#endif /* RACCT */
1796				curthread->td_ru.ru_inblock++;
1797			}
1798			rabp->b_flags |= B_ASYNC;
1799			rabp->b_flags &= ~B_INVAL;
1800			rabp->b_ioflags &= ~BIO_ERROR;
1801			rabp->b_iocmd = BIO_READ;
1802			if (rabp->b_rcred == NOCRED && cred != NOCRED)
1803				rabp->b_rcred = crhold(cred);
1804			vfs_busy_pages(rabp, 0);
1805			BUF_KERNPROC(rabp);
1806			rabp->b_iooffset = dbtob(rabp->b_blkno);
1807			bstrategy(rabp);
1808		} else {
1809			brelse(rabp);
1810		}
1811	}
1812}
1813
1814/*
1815 * Entry point for bread() and breadn() via #defines in sys/buf.h.
1816 *
1817 * Get a buffer with the specified data.  Look in the cache first.  We
1818 * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
1819 * is set, the buffer is valid and we do not have to do anything, see
1820 * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1821 *
1822 * Always return a NULL buffer pointer (in bpp) when returning an error.
1823 */
1824int
1825breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1826    int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
1827{
1828	struct buf *bp;
1829	int rv = 0, readwait = 0;
1830
1831	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
1832	/*
1833	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
1834	 */
1835	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1836	if (bp == NULL)
1837		return (EBUSY);
1838
1839	/* if not found in cache, do some I/O */
1840	if ((bp->b_flags & B_CACHE) == 0) {
1841		if (!TD_IS_IDLETHREAD(curthread)) {
1842#ifdef RACCT
1843			if (racct_enable) {
1844				PROC_LOCK(curproc);
1845				racct_add_buf(curproc, bp, 0);
1846				PROC_UNLOCK(curproc);
1847			}
1848#endif /* RACCT */
1849			curthread->td_ru.ru_inblock++;
1850		}
1851		bp->b_iocmd = BIO_READ;
1852		bp->b_flags &= ~B_INVAL;
1853		bp->b_ioflags &= ~BIO_ERROR;
1854		if (bp->b_rcred == NOCRED && cred != NOCRED)
1855			bp->b_rcred = crhold(cred);
1856		vfs_busy_pages(bp, 0);
1857		bp->b_iooffset = dbtob(bp->b_blkno);
1858		bstrategy(bp);
1859		++readwait;
1860	}
1861
1862	breada(vp, rablkno, rabsize, cnt, cred);
1863
1864	if (readwait) {
1865		rv = bufwait(bp);
1866		if (rv != 0) {
1867			brelse(bp);
1868			*bpp = NULL;
1869		}
1870	}
1871	return (rv);
1872}
1873
1874/*
1875 * Write, release buffer on completion.  (Done by iodone
1876 * if async).  Do not bother writing anything if the buffer
1877 * is invalid.
1878 *
1879 * Note that we set B_CACHE here, indicating that buffer is
1880 * fully valid and thus cacheable.  This is true even of NFS
1881 * now so we set it generally.  This could be set either here
1882 * or in biodone() since the I/O is synchronous.  We put it
1883 * here.
1884 */
1885int
1886bufwrite(struct buf *bp)
1887{
1888	int oldflags;
1889	struct vnode *vp;
1890	long space;
1891	int vp_md;
1892
1893	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1894	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
1895		bp->b_flags |= B_INVAL | B_RELBUF;
1896		bp->b_flags &= ~B_CACHE;
1897		brelse(bp);
1898		return (ENXIO);
1899	}
1900	if (bp->b_flags & B_INVAL) {
1901		brelse(bp);
1902		return (0);
1903	}
1904
1905	if (bp->b_flags & B_BARRIER)
1906		barrierwrites++;
1907
1908	oldflags = bp->b_flags;
1909
1910	BUF_ASSERT_HELD(bp);
1911
1912	if (bp->b_pin_count > 0)
1913		bunpin_wait(bp);
1914
1915	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1916	    ("FFS background buffer should not get here %p", bp));
1917
1918	vp = bp->b_vp;
1919	if (vp)
1920		vp_md = vp->v_vflag & VV_MD;
1921	else
1922		vp_md = 0;
1923
1924	/*
1925	 * Mark the buffer clean.  Increment the bufobj write count
1926	 * before bundirty() call, to prevent other thread from seeing
1927	 * empty dirty list and zero counter for writes in progress,
1928	 * falsely indicating that the bufobj is clean.
1929	 */
1930	bufobj_wref(bp->b_bufobj);
1931	bundirty(bp);
1932
1933	bp->b_flags &= ~B_DONE;
1934	bp->b_ioflags &= ~BIO_ERROR;
1935	bp->b_flags |= B_CACHE;
1936	bp->b_iocmd = BIO_WRITE;
1937
1938	vfs_busy_pages(bp, 1);
1939
1940	/*
1941	 * Normal bwrites pipeline writes
1942	 */
1943	bp->b_runningbufspace = bp->b_bufsize;
1944	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
1945
1946	if (!TD_IS_IDLETHREAD(curthread)) {
1947#ifdef RACCT
1948		if (racct_enable) {
1949			PROC_LOCK(curproc);
1950			racct_add_buf(curproc, bp, 1);
1951			PROC_UNLOCK(curproc);
1952		}
1953#endif /* RACCT */
1954		curthread->td_ru.ru_oublock++;
1955	}
1956	if (oldflags & B_ASYNC)
1957		BUF_KERNPROC(bp);
1958	bp->b_iooffset = dbtob(bp->b_blkno);
1959	bstrategy(bp);
1960
1961	if ((oldflags & B_ASYNC) == 0) {
1962		int rtval = bufwait(bp);
1963		brelse(bp);
1964		return (rtval);
1965	} else if (space > hirunningspace) {
1966		/*
1967		 * don't allow the async write to saturate the I/O
1968		 * system.  We will not deadlock here because
1969		 * we are blocking waiting for I/O that is already in-progress
1970		 * to complete. We do not block here if it is the update
1971		 * or syncer daemon trying to clean up as that can lead
1972		 * to deadlock.
1973		 */
1974		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
1975			waitrunningbufspace();
1976	}
1977
1978	return (0);
1979}
1980
1981void
1982bufbdflush(struct bufobj *bo, struct buf *bp)
1983{
1984	struct buf *nbp;
1985
1986	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
1987		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1988		altbufferflushes++;
1989	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
1990		BO_LOCK(bo);
1991		/*
1992		 * Try to find a buffer to flush.
1993		 */
1994		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
1995			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
1996			    BUF_LOCK(nbp,
1997				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
1998				continue;
1999			if (bp == nbp)
2000				panic("bdwrite: found ourselves");
2001			BO_UNLOCK(bo);
2002			/* Don't countdeps with the bo lock held. */
2003			if (buf_countdeps(nbp, 0)) {
2004				BO_LOCK(bo);
2005				BUF_UNLOCK(nbp);
2006				continue;
2007			}
2008			if (nbp->b_flags & B_CLUSTEROK) {
2009				vfs_bio_awrite(nbp);
2010			} else {
2011				bremfree(nbp);
2012				bawrite(nbp);
2013			}
2014			dirtybufferflushes++;
2015			break;
2016		}
2017		if (nbp == NULL)
2018			BO_UNLOCK(bo);
2019	}
2020}
2021
2022/*
2023 * Delayed write. (Buffer is marked dirty).  Do not bother writing
2024 * anything if the buffer is marked invalid.
2025 *
2026 * Note that since the buffer must be completely valid, we can safely
2027 * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2028 * biodone() in order to prevent getblk from writing the buffer
2029 * out synchronously.
2030 */
2031void
2032bdwrite(struct buf *bp)
2033{
2034	struct thread *td = curthread;
2035	struct vnode *vp;
2036	struct bufobj *bo;
2037
2038	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2039	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2040	KASSERT((bp->b_flags & B_BARRIER) == 0,
2041	    ("Barrier request in delayed write %p", bp));
2042	BUF_ASSERT_HELD(bp);
2043
2044	if (bp->b_flags & B_INVAL) {
2045		brelse(bp);
2046		return;
2047	}
2048
2049	/*
2050	 * If we have too many dirty buffers, don't create any more.
2051	 * If we are wildly over our limit, then force a complete
2052	 * cleanup. Otherwise, just keep the situation from getting
2053	 * out of control. Note that we have to avoid a recursive
2054	 * disaster and not try to clean up after our own cleanup!
2055	 */
2056	vp = bp->b_vp;
2057	bo = bp->b_bufobj;
2058	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2059		td->td_pflags |= TDP_INBDFLUSH;
2060		BO_BDFLUSH(bo, bp);
2061		td->td_pflags &= ~TDP_INBDFLUSH;
2062	} else
2063		recursiveflushes++;
2064
2065	bdirty(bp);
2066	/*
2067	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2068	 * true even of NFS now.
2069	 */
2070	bp->b_flags |= B_CACHE;
2071
2072	/*
2073	 * This bmap keeps the system from needing to do the bmap later,
2074	 * perhaps when the system is attempting to do a sync.  Since it
2075	 * is likely that the indirect block -- or whatever other datastructure
2076	 * that the filesystem needs is still in memory now, it is a good
2077	 * thing to do this.  Note also, that if the pageout daemon is
2078	 * requesting a sync -- there might not be enough memory to do
2079	 * the bmap then...  So, this is important to do.
2080	 */
2081	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2082		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2083	}
2084
2085	/*
2086	 * Set the *dirty* buffer range based upon the VM system dirty
2087	 * pages.
2088	 *
2089	 * Mark the buffer pages as clean.  We need to do this here to
2090	 * satisfy the vnode_pager and the pageout daemon, so that it
2091	 * thinks that the pages have been "cleaned".  Note that since
2092	 * the pages are in a delayed write buffer -- the VFS layer
2093	 * "will" see that the pages get written out on the next sync,
2094	 * or perhaps the cluster will be completed.
2095	 */
2096	vfs_clean_pages_dirty_buf(bp);
2097	bqrelse(bp);
2098
2099	/*
2100	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2101	 * due to the softdep code.
2102	 */
2103}
2104
2105/*
2106 *	bdirty:
2107 *
2108 *	Turn buffer into delayed write request.  We must clear BIO_READ and
2109 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2110 *	itself to properly update it in the dirty/clean lists.  We mark it
2111 *	B_DONE to ensure that any asynchronization of the buffer properly
2112 *	clears B_DONE ( else a panic will occur later ).
2113 *
2114 *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2115 *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2116 *	should only be called if the buffer is known-good.
2117 *
2118 *	Since the buffer is not on a queue, we do not update the numfreebuffers
2119 *	count.
2120 *
2121 *	The buffer must be on QUEUE_NONE.
2122 */
2123void
2124bdirty(struct buf *bp)
2125{
2126
2127	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2128	    bp, bp->b_vp, bp->b_flags);
2129	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2130	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2131	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2132	BUF_ASSERT_HELD(bp);
2133	bp->b_flags &= ~(B_RELBUF);
2134	bp->b_iocmd = BIO_WRITE;
2135
2136	if ((bp->b_flags & B_DELWRI) == 0) {
2137		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2138		reassignbuf(bp);
2139		bdirtyadd();
2140	}
2141}
2142
2143/*
2144 *	bundirty:
2145 *
2146 *	Clear B_DELWRI for buffer.
2147 *
2148 *	Since the buffer is not on a queue, we do not update the numfreebuffers
2149 *	count.
2150 *
2151 *	The buffer must be on QUEUE_NONE.
2152 */
2153
2154void
2155bundirty(struct buf *bp)
2156{
2157
2158	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2159	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2160	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2161	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2162	BUF_ASSERT_HELD(bp);
2163
2164	if (bp->b_flags & B_DELWRI) {
2165		bp->b_flags &= ~B_DELWRI;
2166		reassignbuf(bp);
2167		bdirtysub();
2168	}
2169	/*
2170	 * Since it is now being written, we can clear its deferred write flag.
2171	 */
2172	bp->b_flags &= ~B_DEFERRED;
2173}
2174
2175/*
2176 *	bawrite:
2177 *
2178 *	Asynchronous write.  Start output on a buffer, but do not wait for
2179 *	it to complete.  The buffer is released when the output completes.
2180 *
2181 *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2182 *	B_INVAL buffers.  Not us.
2183 */
2184void
2185bawrite(struct buf *bp)
2186{
2187
2188	bp->b_flags |= B_ASYNC;
2189	(void) bwrite(bp);
2190}
2191
2192/*
2193 *	babarrierwrite:
2194 *
2195 *	Asynchronous barrier write.  Start output on a buffer, but do not
2196 *	wait for it to complete.  Place a write barrier after this write so
2197 *	that this buffer and all buffers written before it are committed to
2198 *	the disk before any buffers written after this write are committed
2199 *	to the disk.  The buffer is released when the output completes.
2200 */
2201void
2202babarrierwrite(struct buf *bp)
2203{
2204
2205	bp->b_flags |= B_ASYNC | B_BARRIER;
2206	(void) bwrite(bp);
2207}
2208
2209/*
2210 *	bbarrierwrite:
2211 *
2212 *	Synchronous barrier write.  Start output on a buffer and wait for
2213 *	it to complete.  Place a write barrier after this write so that
2214 *	this buffer and all buffers written before it are committed to
2215 *	the disk before any buffers written after this write are committed
2216 *	to the disk.  The buffer is released when the output completes.
2217 */
2218int
2219bbarrierwrite(struct buf *bp)
2220{
2221
2222	bp->b_flags |= B_BARRIER;
2223	return (bwrite(bp));
2224}
2225
2226/*
2227 *	bwillwrite:
2228 *
2229 *	Called prior to the locking of any vnodes when we are expecting to
2230 *	write.  We do not want to starve the buffer cache with too many
2231 *	dirty buffers so we block here.  By blocking prior to the locking
2232 *	of any vnodes we attempt to avoid the situation where a locked vnode
2233 *	prevents the various system daemons from flushing related buffers.
2234 */
2235void
2236bwillwrite(void)
2237{
2238
2239	if (numdirtybuffers >= hidirtybuffers) {
2240		mtx_lock(&bdirtylock);
2241		while (numdirtybuffers >= hidirtybuffers) {
2242			bdirtywait = 1;
2243			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2244			    "flswai", 0);
2245		}
2246		mtx_unlock(&bdirtylock);
2247	}
2248}
2249
2250/*
2251 * Return true if we have too many dirty buffers.
2252 */
2253int
2254buf_dirty_count_severe(void)
2255{
2256
2257	return(numdirtybuffers >= hidirtybuffers);
2258}
2259
2260/*
2261 *	brelse:
2262 *
2263 *	Release a busy buffer and, if requested, free its resources.  The
2264 *	buffer will be stashed in the appropriate bufqueue[] allowing it
2265 *	to be accessed later as a cache entity or reused for other purposes.
2266 */
2267void
2268brelse(struct buf *bp)
2269{
2270	int qindex;
2271
2272	/*
2273	 * Many functions erroneously call brelse with a NULL bp under rare
2274	 * error conditions. Simply return when called with a NULL bp.
2275	 */
2276	if (bp == NULL)
2277		return;
2278	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2279	    bp, bp->b_vp, bp->b_flags);
2280	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2281	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2282	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2283	    ("brelse: non-VMIO buffer marked NOREUSE"));
2284
2285	if (BUF_LOCKRECURSED(bp)) {
2286		/*
2287		 * Do not process, in particular, do not handle the
2288		 * B_INVAL/B_RELBUF and do not release to free list.
2289		 */
2290		BUF_UNLOCK(bp);
2291		return;
2292	}
2293
2294	if (bp->b_flags & B_MANAGED) {
2295		bqrelse(bp);
2296		return;
2297	}
2298
2299	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2300		BO_LOCK(bp->b_bufobj);
2301		bp->b_vflags &= ~BV_BKGRDERR;
2302		BO_UNLOCK(bp->b_bufobj);
2303		bdirty(bp);
2304	}
2305	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2306	    !(bp->b_flags & B_INVAL)) {
2307		/*
2308		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
2309		 * pages from being scrapped.
2310		 */
2311		bp->b_ioflags &= ~BIO_ERROR;
2312		bdirty(bp);
2313	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2314	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2315		/*
2316		 * Either a failed read I/O or we were asked to free or not
2317		 * cache the buffer.
2318		 */
2319		bp->b_flags |= B_INVAL;
2320		if (!LIST_EMPTY(&bp->b_dep))
2321			buf_deallocate(bp);
2322		if (bp->b_flags & B_DELWRI)
2323			bdirtysub();
2324		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2325		if ((bp->b_flags & B_VMIO) == 0) {
2326			allocbuf(bp, 0);
2327			if (bp->b_vp)
2328				brelvp(bp);
2329		}
2330	}
2331
2332	/*
2333	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2334	 * is called with B_DELWRI set, the underlying pages may wind up
2335	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2336	 * because pages associated with a B_DELWRI bp are marked clean.
2337	 *
2338	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2339	 * if B_DELWRI is set.
2340	 */
2341	if (bp->b_flags & B_DELWRI)
2342		bp->b_flags &= ~B_RELBUF;
2343
2344	/*
2345	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2346	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2347	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2348	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2349	 *
2350	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2351	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2352	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2353	 *
2354	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2355	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2356	 * the commit state and we cannot afford to lose the buffer. If the
2357	 * buffer has a background write in progress, we need to keep it
2358	 * around to prevent it from being reconstituted and starting a second
2359	 * background write.
2360	 */
2361	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2362	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2363	    !(bp->b_vp->v_mount != NULL &&
2364	    (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2365	    !vn_isdisk(bp->b_vp, NULL) && (bp->b_flags & B_DELWRI))) {
2366		vfs_vmio_invalidate(bp);
2367		allocbuf(bp, 0);
2368	}
2369
2370	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2371	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2372		allocbuf(bp, 0);
2373		bp->b_flags &= ~B_NOREUSE;
2374		if (bp->b_vp != NULL)
2375			brelvp(bp);
2376	}
2377
2378	/*
2379	 * If the buffer has junk contents signal it and eventually
2380	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2381	 * doesn't find it.
2382	 */
2383	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2384	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2385		bp->b_flags |= B_INVAL;
2386	if (bp->b_flags & B_INVAL) {
2387		if (bp->b_flags & B_DELWRI)
2388			bundirty(bp);
2389		if (bp->b_vp)
2390			brelvp(bp);
2391	}
2392
2393	/* buffers with no memory */
2394	if (bp->b_bufsize == 0) {
2395		buf_free(bp);
2396		return;
2397	}
2398	/* buffers with junk contents */
2399	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2400	    (bp->b_ioflags & BIO_ERROR)) {
2401		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2402		if (bp->b_vflags & BV_BKGRDINPROG)
2403			panic("losing buffer 2");
2404		qindex = QUEUE_CLEAN;
2405		bp->b_flags |= B_AGE;
2406	/* remaining buffers */
2407	} else if (bp->b_flags & B_DELWRI)
2408		qindex = QUEUE_DIRTY;
2409	else
2410		qindex = QUEUE_CLEAN;
2411
2412	binsfree(bp, qindex);
2413
2414	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
2415	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2416		panic("brelse: not dirty");
2417	/* unlock */
2418	BUF_UNLOCK(bp);
2419	if (qindex == QUEUE_CLEAN)
2420		bufspace_wakeup();
2421}
2422
2423/*
2424 * Release a buffer back to the appropriate queue but do not try to free
2425 * it.  The buffer is expected to be used again soon.
2426 *
2427 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2428 * biodone() to requeue an async I/O on completion.  It is also used when
2429 * known good buffers need to be requeued but we think we may need the data
2430 * again soon.
2431 *
2432 * XXX we should be able to leave the B_RELBUF hint set on completion.
2433 */
2434void
2435bqrelse(struct buf *bp)
2436{
2437	int qindex;
2438
2439	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2440	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2441	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2442
2443	qindex = QUEUE_NONE;
2444	if (BUF_LOCKRECURSED(bp)) {
2445		/* do not release to free list */
2446		BUF_UNLOCK(bp);
2447		return;
2448	}
2449	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2450
2451	if (bp->b_flags & B_MANAGED) {
2452		if (bp->b_flags & B_REMFREE)
2453			bremfreef(bp);
2454		goto out;
2455	}
2456
2457	/* buffers with stale but valid contents */
2458	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2459	    BV_BKGRDERR)) == BV_BKGRDERR) {
2460		BO_LOCK(bp->b_bufobj);
2461		bp->b_vflags &= ~BV_BKGRDERR;
2462		BO_UNLOCK(bp->b_bufobj);
2463		qindex = QUEUE_DIRTY;
2464	} else {
2465		if ((bp->b_flags & B_DELWRI) == 0 &&
2466		    (bp->b_xflags & BX_VNDIRTY))
2467			panic("bqrelse: not dirty");
2468		if ((bp->b_flags & B_NOREUSE) != 0) {
2469			brelse(bp);
2470			return;
2471		}
2472		qindex = QUEUE_CLEAN;
2473	}
2474	binsfree(bp, qindex);
2475
2476out:
2477	/* unlock */
2478	BUF_UNLOCK(bp);
2479	if (qindex == QUEUE_CLEAN)
2480		bufspace_wakeup();
2481}
2482
2483/*
2484 * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2485 * restore bogus pages.
2486 */
2487static void
2488vfs_vmio_iodone(struct buf *bp)
2489{
2490	vm_ooffset_t foff;
2491	vm_page_t m;
2492	vm_object_t obj;
2493	struct vnode *vp;
2494	int bogus, i, iosize;
2495
2496	obj = bp->b_bufobj->bo_object;
2497	KASSERT(obj->paging_in_progress >= bp->b_npages,
2498	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2499	    obj->paging_in_progress, bp->b_npages));
2500
2501	vp = bp->b_vp;
2502	KASSERT(vp->v_holdcnt > 0,
2503	    ("vfs_vmio_iodone: vnode %p has zero hold count", vp));
2504	KASSERT(vp->v_object != NULL,
2505	    ("vfs_vmio_iodone: vnode %p has no vm_object", vp));
2506
2507	foff = bp->b_offset;
2508	KASSERT(bp->b_offset != NOOFFSET,
2509	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2510
2511	bogus = 0;
2512	iosize = bp->b_bcount - bp->b_resid;
2513	VM_OBJECT_WLOCK(obj);
2514	for (i = 0; i < bp->b_npages; i++) {
2515		int resid;
2516
2517		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2518		if (resid > iosize)
2519			resid = iosize;
2520
2521		/*
2522		 * cleanup bogus pages, restoring the originals
2523		 */
2524		m = bp->b_pages[i];
2525		if (m == bogus_page) {
2526			bogus = 1;
2527			m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2528			if (m == NULL)
2529				panic("biodone: page disappeared!");
2530			bp->b_pages[i] = m;
2531		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2532			/*
2533			 * In the write case, the valid and clean bits are
2534			 * already changed correctly ( see bdwrite() ), so we
2535			 * only need to do this here in the read case.
2536			 */
2537			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2538			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2539			    "has unexpected dirty bits", m));
2540			vfs_page_set_valid(bp, foff, m);
2541		}
2542		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2543		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2544		    (intmax_t)foff, (uintmax_t)m->pindex));
2545
2546		vm_page_sunbusy(m);
2547		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2548		iosize -= resid;
2549	}
2550	vm_object_pip_wakeupn(obj, bp->b_npages);
2551	VM_OBJECT_WUNLOCK(obj);
2552	if (bogus && buf_mapped(bp)) {
2553		BUF_CHECK_MAPPED(bp);
2554		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2555		    bp->b_pages, bp->b_npages);
2556	}
2557}
2558
2559/*
2560 * Unwire a page held by a buf and place it on the appropriate vm queue.
2561 */
2562static void
2563vfs_vmio_unwire(struct buf *bp, vm_page_t m)
2564{
2565	bool freed;
2566
2567	vm_page_lock(m);
2568	if (vm_page_unwire(m, PQ_NONE)) {
2569		/*
2570		 * Determine if the page should be freed before adding
2571		 * it to the inactive queue.
2572		 */
2573		if (m->valid == 0) {
2574			freed = !vm_page_busied(m);
2575			if (freed)
2576				vm_page_free(m);
2577		} else if ((bp->b_flags & B_DIRECT) != 0)
2578			freed = vm_page_try_to_free(m);
2579		else
2580			freed = false;
2581		if (!freed) {
2582			/*
2583			 * If the page is unlikely to be reused, let the
2584			 * VM know.  Otherwise, maintain LRU page
2585			 * ordering and put the page at the tail of the
2586			 * inactive queue.
2587			 */
2588			if ((bp->b_flags & B_NOREUSE) != 0)
2589				vm_page_deactivate_noreuse(m);
2590			else
2591				vm_page_deactivate(m);
2592		}
2593	}
2594	vm_page_unlock(m);
2595}
2596
2597/*
2598 * Perform page invalidation when a buffer is released.  The fully invalid
2599 * pages will be reclaimed later in vfs_vmio_truncate().
2600 */
2601static void
2602vfs_vmio_invalidate(struct buf *bp)
2603{
2604	vm_object_t obj;
2605	vm_page_t m;
2606	int i, resid, poffset, presid;
2607
2608	if (buf_mapped(bp)) {
2609		BUF_CHECK_MAPPED(bp);
2610		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2611	} else
2612		BUF_CHECK_UNMAPPED(bp);
2613	/*
2614	 * Get the base offset and length of the buffer.  Note that
2615	 * in the VMIO case if the buffer block size is not
2616	 * page-aligned then b_data pointer may not be page-aligned.
2617	 * But our b_pages[] array *IS* page aligned.
2618	 *
2619	 * block sizes less then DEV_BSIZE (usually 512) are not
2620	 * supported due to the page granularity bits (m->valid,
2621	 * m->dirty, etc...).
2622	 *
2623	 * See man buf(9) for more information
2624	 */
2625	obj = bp->b_bufobj->bo_object;
2626	resid = bp->b_bufsize;
2627	poffset = bp->b_offset & PAGE_MASK;
2628	VM_OBJECT_WLOCK(obj);
2629	for (i = 0; i < bp->b_npages; i++) {
2630		m = bp->b_pages[i];
2631		if (m == bogus_page)
2632			panic("vfs_vmio_invalidate: Unexpected bogus page.");
2633		bp->b_pages[i] = NULL;
2634
2635		presid = resid > (PAGE_SIZE - poffset) ?
2636		    (PAGE_SIZE - poffset) : resid;
2637		KASSERT(presid >= 0, ("brelse: extra page"));
2638		while (vm_page_xbusied(m)) {
2639			vm_page_lock(m);
2640			VM_OBJECT_WUNLOCK(obj);
2641			vm_page_busy_sleep(m, "mbncsh", true);
2642			VM_OBJECT_WLOCK(obj);
2643		}
2644		if (pmap_page_wired_mappings(m) == 0)
2645			vm_page_set_invalid(m, poffset, presid);
2646		vfs_vmio_unwire(bp, m);
2647		resid -= presid;
2648		poffset = 0;
2649	}
2650	VM_OBJECT_WUNLOCK(obj);
2651	bp->b_npages = 0;
2652}
2653
2654/*
2655 * Page-granular truncation of an existing VMIO buffer.
2656 */
2657static void
2658vfs_vmio_truncate(struct buf *bp, int desiredpages)
2659{
2660	vm_object_t obj;
2661	vm_page_t m;
2662	int i;
2663
2664	if (bp->b_npages == desiredpages)
2665		return;
2666
2667	if (buf_mapped(bp)) {
2668		BUF_CHECK_MAPPED(bp);
2669		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
2670		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
2671	} else
2672		BUF_CHECK_UNMAPPED(bp);
2673	obj = bp->b_bufobj->bo_object;
2674	if (obj != NULL)
2675		VM_OBJECT_WLOCK(obj);
2676	for (i = desiredpages; i < bp->b_npages; i++) {
2677		m = bp->b_pages[i];
2678		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
2679		bp->b_pages[i] = NULL;
2680		vfs_vmio_unwire(bp, m);
2681	}
2682	if (obj != NULL)
2683		VM_OBJECT_WUNLOCK(obj);
2684	bp->b_npages = desiredpages;
2685}
2686
2687/*
2688 * Byte granular extension of VMIO buffers.
2689 */
2690static void
2691vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
2692{
2693	/*
2694	 * We are growing the buffer, possibly in a
2695	 * byte-granular fashion.
2696	 */
2697	vm_object_t obj;
2698	vm_offset_t toff;
2699	vm_offset_t tinc;
2700	vm_page_t m;
2701
2702	/*
2703	 * Step 1, bring in the VM pages from the object, allocating
2704	 * them if necessary.  We must clear B_CACHE if these pages
2705	 * are not valid for the range covered by the buffer.
2706	 */
2707	obj = bp->b_bufobj->bo_object;
2708	VM_OBJECT_WLOCK(obj);
2709	while (bp->b_npages < desiredpages) {
2710		/*
2711		 * We must allocate system pages since blocking
2712		 * here could interfere with paging I/O, no
2713		 * matter which process we are.
2714		 *
2715		 * Only exclusive busy can be tested here.
2716		 * Blocking on shared busy might lead to
2717		 * deadlocks once allocbuf() is called after
2718		 * pages are vfs_busy_pages().
2719		 */
2720		m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages,
2721		    VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
2722		    VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
2723		    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
2724		if (m->valid == 0)
2725			bp->b_flags &= ~B_CACHE;
2726		bp->b_pages[bp->b_npages] = m;
2727		++bp->b_npages;
2728	}
2729
2730	/*
2731	 * Step 2.  We've loaded the pages into the buffer,
2732	 * we have to figure out if we can still have B_CACHE
2733	 * set.  Note that B_CACHE is set according to the
2734	 * byte-granular range ( bcount and size ), not the
2735	 * aligned range ( newbsize ).
2736	 *
2737	 * The VM test is against m->valid, which is DEV_BSIZE
2738	 * aligned.  Needless to say, the validity of the data
2739	 * needs to also be DEV_BSIZE aligned.  Note that this
2740	 * fails with NFS if the server or some other client
2741	 * extends the file's EOF.  If our buffer is resized,
2742	 * B_CACHE may remain set! XXX
2743	 */
2744	toff = bp->b_bcount;
2745	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2746	while ((bp->b_flags & B_CACHE) && toff < size) {
2747		vm_pindex_t pi;
2748
2749		if (tinc > (size - toff))
2750			tinc = size - toff;
2751		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
2752		m = bp->b_pages[pi];
2753		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
2754		toff += tinc;
2755		tinc = PAGE_SIZE;
2756	}
2757	VM_OBJECT_WUNLOCK(obj);
2758
2759	/*
2760	 * Step 3, fixup the KVA pmap.
2761	 */
2762	if (buf_mapped(bp))
2763		bpmap_qenter(bp);
2764	else
2765		BUF_CHECK_UNMAPPED(bp);
2766}
2767
2768/*
2769 * Check to see if a block at a particular lbn is available for a clustered
2770 * write.
2771 */
2772static int
2773vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
2774{
2775	struct buf *bpa;
2776	int match;
2777
2778	match = 0;
2779
2780	/* If the buf isn't in core skip it */
2781	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
2782		return (0);
2783
2784	/* If the buf is busy we don't want to wait for it */
2785	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2786		return (0);
2787
2788	/* Only cluster with valid clusterable delayed write buffers */
2789	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
2790	    (B_DELWRI | B_CLUSTEROK))
2791		goto done;
2792
2793	if (bpa->b_bufsize != size)
2794		goto done;
2795
2796	/*
2797	 * Check to see if it is in the expected place on disk and that the
2798	 * block has been mapped.
2799	 */
2800	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
2801		match = 1;
2802done:
2803	BUF_UNLOCK(bpa);
2804	return (match);
2805}
2806
2807/*
2808 *	vfs_bio_awrite:
2809 *
2810 *	Implement clustered async writes for clearing out B_DELWRI buffers.
2811 *	This is much better then the old way of writing only one buffer at
2812 *	a time.  Note that we may not be presented with the buffers in the
2813 *	correct order, so we search for the cluster in both directions.
2814 */
2815int
2816vfs_bio_awrite(struct buf *bp)
2817{
2818	struct bufobj *bo;
2819	int i;
2820	int j;
2821	daddr_t lblkno = bp->b_lblkno;
2822	struct vnode *vp = bp->b_vp;
2823	int ncl;
2824	int nwritten;
2825	int size;
2826	int maxcl;
2827	int gbflags;
2828
2829	bo = &vp->v_bufobj;
2830	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
2831	/*
2832	 * right now we support clustered writing only to regular files.  If
2833	 * we find a clusterable block we could be in the middle of a cluster
2834	 * rather then at the beginning.
2835	 */
2836	if ((vp->v_type == VREG) &&
2837	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
2838	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
2839
2840		size = vp->v_mount->mnt_stat.f_iosize;
2841		maxcl = MAXPHYS / size;
2842
2843		BO_RLOCK(bo);
2844		for (i = 1; i < maxcl; i++)
2845			if (vfs_bio_clcheck(vp, size, lblkno + i,
2846			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
2847				break;
2848
2849		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
2850			if (vfs_bio_clcheck(vp, size, lblkno - j,
2851			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2852				break;
2853		BO_RUNLOCK(bo);
2854		--j;
2855		ncl = i + j;
2856		/*
2857		 * this is a possible cluster write
2858		 */
2859		if (ncl != 1) {
2860			BUF_UNLOCK(bp);
2861			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
2862			    gbflags);
2863			return (nwritten);
2864		}
2865	}
2866	bremfree(bp);
2867	bp->b_flags |= B_ASYNC;
2868	/*
2869	 * default (old) behavior, writing out only one block
2870	 *
2871	 * XXX returns b_bufsize instead of b_bcount for nwritten?
2872	 */
2873	nwritten = bp->b_bufsize;
2874	(void) bwrite(bp);
2875
2876	return (nwritten);
2877}
2878
2879/*
2880 *	getnewbuf_kva:
2881 *
2882 *	Allocate KVA for an empty buf header according to gbflags.
2883 */
2884static int
2885getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
2886{
2887
2888	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
2889		/*
2890		 * In order to keep fragmentation sane we only allocate kva
2891		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
2892		 */
2893		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2894
2895		if (maxsize != bp->b_kvasize &&
2896		    bufkva_alloc(bp, maxsize, gbflags))
2897			return (ENOSPC);
2898	}
2899	return (0);
2900}
2901
2902/*
2903 *	getnewbuf:
2904 *
2905 *	Find and initialize a new buffer header, freeing up existing buffers
2906 *	in the bufqueues as necessary.  The new buffer is returned locked.
2907 *
2908 *	We block if:
2909 *		We have insufficient buffer headers
2910 *		We have insufficient buffer space
2911 *		buffer_arena is too fragmented ( space reservation fails )
2912 *		If we have to flush dirty buffers ( but we try to avoid this )
2913 *
2914 *	The caller is responsible for releasing the reserved bufspace after
2915 *	allocbuf() is called.
2916 */
2917static struct buf *
2918getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
2919{
2920	struct buf *bp;
2921	bool metadata, reserved;
2922
2923	bp = NULL;
2924	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2925	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2926	if (!unmapped_buf_allowed)
2927		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2928
2929	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2930	    vp->v_type == VCHR)
2931		metadata = true;
2932	else
2933		metadata = false;
2934	atomic_add_int(&getnewbufcalls, 1);
2935	reserved = false;
2936	do {
2937		if (reserved == false &&
2938		    bufspace_reserve(maxsize, metadata) != 0)
2939			continue;
2940		reserved = true;
2941		if ((bp = buf_alloc()) == NULL)
2942			continue;
2943		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
2944			return (bp);
2945		break;
2946	} while(buf_scan(false) == 0);
2947
2948	if (reserved)
2949		atomic_subtract_long(&bufspace, maxsize);
2950	if (bp != NULL) {
2951		bp->b_flags |= B_INVAL;
2952		brelse(bp);
2953	}
2954	bufspace_wait(vp, gbflags, slpflag, slptimeo);
2955
2956	return (NULL);
2957}
2958
2959/*
2960 *	buf_daemon:
2961 *
2962 *	buffer flushing daemon.  Buffers are normally flushed by the
2963 *	update daemon but if it cannot keep up this process starts to
2964 *	take the load in an attempt to prevent getnewbuf() from blocking.
2965 */
2966static struct kproc_desc buf_kp = {
2967	"bufdaemon",
2968	buf_daemon,
2969	&bufdaemonproc
2970};
2971SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2972
2973static int
2974buf_flush(struct vnode *vp, int target)
2975{
2976	int flushed;
2977
2978	flushed = flushbufqueues(vp, target, 0);
2979	if (flushed == 0) {
2980		/*
2981		 * Could not find any buffers without rollback
2982		 * dependencies, so just write the first one
2983		 * in the hopes of eventually making progress.
2984		 */
2985		if (vp != NULL && target > 2)
2986			target /= 2;
2987		flushbufqueues(vp, target, 1);
2988	}
2989	return (flushed);
2990}
2991
2992static void
2993buf_daemon()
2994{
2995	int lodirty;
2996
2997	/*
2998	 * This process needs to be suspended prior to shutdown sync.
2999	 */
3000	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
3001	    SHUTDOWN_PRI_LAST);
3002
3003	/*
3004	 * This process is allowed to take the buffer cache to the limit
3005	 */
3006	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3007	mtx_lock(&bdlock);
3008	for (;;) {
3009		bd_request = 0;
3010		mtx_unlock(&bdlock);
3011
3012		kproc_suspend_check(bufdaemonproc);
3013		lodirty = lodirtybuffers;
3014		if (bd_speedupreq) {
3015			lodirty = numdirtybuffers / 2;
3016			bd_speedupreq = 0;
3017		}
3018		/*
3019		 * Do the flush.  Limit the amount of in-transit I/O we
3020		 * allow to build up, otherwise we would completely saturate
3021		 * the I/O system.
3022		 */
3023		while (numdirtybuffers > lodirty) {
3024			if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
3025				break;
3026			kern_yield(PRI_USER);
3027		}
3028
3029		/*
3030		 * Only clear bd_request if we have reached our low water
3031		 * mark.  The buf_daemon normally waits 1 second and
3032		 * then incrementally flushes any dirty buffers that have
3033		 * built up, within reason.
3034		 *
3035		 * If we were unable to hit our low water mark and couldn't
3036		 * find any flushable buffers, we sleep for a short period
3037		 * to avoid endless loops on unlockable buffers.
3038		 */
3039		mtx_lock(&bdlock);
3040		if (numdirtybuffers <= lodirtybuffers) {
3041			/*
3042			 * We reached our low water mark, reset the
3043			 * request and sleep until we are needed again.
3044			 * The sleep is just so the suspend code works.
3045			 */
3046			bd_request = 0;
3047			/*
3048			 * Do an extra wakeup in case dirty threshold
3049			 * changed via sysctl and the explicit transition
3050			 * out of shortfall was missed.
3051			 */
3052			bdirtywakeup();
3053			if (runningbufspace <= lorunningspace)
3054				runningwakeup();
3055			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3056		} else {
3057			/*
3058			 * We couldn't find any flushable dirty buffers but
3059			 * still have too many dirty buffers, we
3060			 * have to sleep and try again.  (rare)
3061			 */
3062			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3063		}
3064	}
3065}
3066
3067/*
3068 *	flushbufqueues:
3069 *
3070 *	Try to flush a buffer in the dirty queue.  We must be careful to
3071 *	free up B_INVAL buffers instead of write them, which NFS is
3072 *	particularly sensitive to.
3073 */
3074static int flushwithdeps = 0;
3075SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
3076    0, "Number of buffers flushed with dependecies that require rollbacks");
3077
3078static int
3079flushbufqueues(struct vnode *lvp, int target, int flushdeps)
3080{
3081	struct buf *sentinel;
3082	struct vnode *vp;
3083	struct mount *mp;
3084	struct buf *bp;
3085	int hasdeps;
3086	int flushed;
3087	int queue;
3088	int error;
3089	bool unlock;
3090
3091	flushed = 0;
3092	queue = QUEUE_DIRTY;
3093	bp = NULL;
3094	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3095	sentinel->b_qindex = QUEUE_SENTINEL;
3096	mtx_lock(&bqlocks[queue]);
3097	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
3098	mtx_unlock(&bqlocks[queue]);
3099	while (flushed != target) {
3100		maybe_yield();
3101		mtx_lock(&bqlocks[queue]);
3102		bp = TAILQ_NEXT(sentinel, b_freelist);
3103		if (bp != NULL) {
3104			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3105			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
3106			    b_freelist);
3107		} else {
3108			mtx_unlock(&bqlocks[queue]);
3109			break;
3110		}
3111		/*
3112		 * Skip sentinels inserted by other invocations of the
3113		 * flushbufqueues(), taking care to not reorder them.
3114		 *
3115		 * Only flush the buffers that belong to the
3116		 * vnode locked by the curthread.
3117		 */
3118		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3119		    bp->b_vp != lvp)) {
3120			mtx_unlock(&bqlocks[queue]);
3121 			continue;
3122		}
3123		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3124		mtx_unlock(&bqlocks[queue]);
3125		if (error != 0)
3126			continue;
3127		if (bp->b_pin_count > 0) {
3128			BUF_UNLOCK(bp);
3129			continue;
3130		}
3131		/*
3132		 * BKGRDINPROG can only be set with the buf and bufobj
3133		 * locks both held.  We tolerate a race to clear it here.
3134		 */
3135		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3136		    (bp->b_flags & B_DELWRI) == 0) {
3137			BUF_UNLOCK(bp);
3138			continue;
3139		}
3140		if (bp->b_flags & B_INVAL) {
3141			bremfreef(bp);
3142			brelse(bp);
3143			flushed++;
3144			continue;
3145		}
3146
3147		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3148			if (flushdeps == 0) {
3149				BUF_UNLOCK(bp);
3150				continue;
3151			}
3152			hasdeps = 1;
3153		} else
3154			hasdeps = 0;
3155		/*
3156		 * We must hold the lock on a vnode before writing
3157		 * one of its buffers. Otherwise we may confuse, or
3158		 * in the case of a snapshot vnode, deadlock the
3159		 * system.
3160		 *
3161		 * The lock order here is the reverse of the normal
3162		 * of vnode followed by buf lock.  This is ok because
3163		 * the NOWAIT will prevent deadlock.
3164		 */
3165		vp = bp->b_vp;
3166		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3167			BUF_UNLOCK(bp);
3168			continue;
3169		}
3170		if (lvp == NULL) {
3171			unlock = true;
3172			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3173		} else {
3174			ASSERT_VOP_LOCKED(vp, "getbuf");
3175			unlock = false;
3176			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3177			    vn_lock(vp, LK_TRYUPGRADE);
3178		}
3179		if (error == 0) {
3180			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3181			    bp, bp->b_vp, bp->b_flags);
3182			if (curproc == bufdaemonproc) {
3183				vfs_bio_awrite(bp);
3184			} else {
3185				bremfree(bp);
3186				bwrite(bp);
3187				notbufdflushes++;
3188			}
3189			vn_finished_write(mp);
3190			if (unlock)
3191				VOP_UNLOCK(vp, 0);
3192			flushwithdeps += hasdeps;
3193			flushed++;
3194
3195			/*
3196			 * Sleeping on runningbufspace while holding
3197			 * vnode lock leads to deadlock.
3198			 */
3199			if (curproc == bufdaemonproc &&
3200			    runningbufspace > hirunningspace)
3201				waitrunningbufspace();
3202			continue;
3203		}
3204		vn_finished_write(mp);
3205		BUF_UNLOCK(bp);
3206	}
3207	mtx_lock(&bqlocks[queue]);
3208	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3209	mtx_unlock(&bqlocks[queue]);
3210	free(sentinel, M_TEMP);
3211	return (flushed);
3212}
3213
3214/*
3215 * Check to see if a block is currently memory resident.
3216 */
3217struct buf *
3218incore(struct bufobj *bo, daddr_t blkno)
3219{
3220	struct buf *bp;
3221
3222	BO_RLOCK(bo);
3223	bp = gbincore(bo, blkno);
3224	BO_RUNLOCK(bo);
3225	return (bp);
3226}
3227
3228/*
3229 * Returns true if no I/O is needed to access the
3230 * associated VM object.  This is like incore except
3231 * it also hunts around in the VM system for the data.
3232 */
3233
3234static int
3235inmem(struct vnode * vp, daddr_t blkno)
3236{
3237	vm_object_t obj;
3238	vm_offset_t toff, tinc, size;
3239	vm_page_t m;
3240	vm_ooffset_t off;
3241
3242	ASSERT_VOP_LOCKED(vp, "inmem");
3243
3244	if (incore(&vp->v_bufobj, blkno))
3245		return 1;
3246	if (vp->v_mount == NULL)
3247		return 0;
3248	obj = vp->v_object;
3249	if (obj == NULL)
3250		return (0);
3251
3252	size = PAGE_SIZE;
3253	if (size > vp->v_mount->mnt_stat.f_iosize)
3254		size = vp->v_mount->mnt_stat.f_iosize;
3255	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3256
3257	VM_OBJECT_RLOCK(obj);
3258	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3259		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
3260		if (!m)
3261			goto notinmem;
3262		tinc = size;
3263		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3264			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3265		if (vm_page_is_valid(m,
3266		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
3267			goto notinmem;
3268	}
3269	VM_OBJECT_RUNLOCK(obj);
3270	return 1;
3271
3272notinmem:
3273	VM_OBJECT_RUNLOCK(obj);
3274	return (0);
3275}
3276
3277/*
3278 * Set the dirty range for a buffer based on the status of the dirty
3279 * bits in the pages comprising the buffer.  The range is limited
3280 * to the size of the buffer.
3281 *
3282 * Tell the VM system that the pages associated with this buffer
3283 * are clean.  This is used for delayed writes where the data is
3284 * going to go to disk eventually without additional VM intevention.
3285 *
3286 * Note that while we only really need to clean through to b_bcount, we
3287 * just go ahead and clean through to b_bufsize.
3288 */
3289static void
3290vfs_clean_pages_dirty_buf(struct buf *bp)
3291{
3292	vm_ooffset_t foff, noff, eoff;
3293	vm_page_t m;
3294	int i;
3295
3296	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3297		return;
3298
3299	foff = bp->b_offset;
3300	KASSERT(bp->b_offset != NOOFFSET,
3301	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3302
3303	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3304	vfs_drain_busy_pages(bp);
3305	vfs_setdirty_locked_object(bp);
3306	for (i = 0; i < bp->b_npages; i++) {
3307		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3308		eoff = noff;
3309		if (eoff > bp->b_offset + bp->b_bufsize)
3310			eoff = bp->b_offset + bp->b_bufsize;
3311		m = bp->b_pages[i];
3312		vfs_page_set_validclean(bp, foff, m);
3313		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3314		foff = noff;
3315	}
3316	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3317}
3318
3319static void
3320vfs_setdirty_locked_object(struct buf *bp)
3321{
3322	vm_object_t object;
3323	int i;
3324
3325	object = bp->b_bufobj->bo_object;
3326	VM_OBJECT_ASSERT_WLOCKED(object);
3327
3328	/*
3329	 * We qualify the scan for modified pages on whether the
3330	 * object has been flushed yet.
3331	 */
3332	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
3333		vm_offset_t boffset;
3334		vm_offset_t eoffset;
3335
3336		/*
3337		 * test the pages to see if they have been modified directly
3338		 * by users through the VM system.
3339		 */
3340		for (i = 0; i < bp->b_npages; i++)
3341			vm_page_test_dirty(bp->b_pages[i]);
3342
3343		/*
3344		 * Calculate the encompassing dirty range, boffset and eoffset,
3345		 * (eoffset - boffset) bytes.
3346		 */
3347
3348		for (i = 0; i < bp->b_npages; i++) {
3349			if (bp->b_pages[i]->dirty)
3350				break;
3351		}
3352		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3353
3354		for (i = bp->b_npages - 1; i >= 0; --i) {
3355			if (bp->b_pages[i]->dirty) {
3356				break;
3357			}
3358		}
3359		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3360
3361		/*
3362		 * Fit it to the buffer.
3363		 */
3364
3365		if (eoffset > bp->b_bcount)
3366			eoffset = bp->b_bcount;
3367
3368		/*
3369		 * If we have a good dirty range, merge with the existing
3370		 * dirty range.
3371		 */
3372
3373		if (boffset < eoffset) {
3374			if (bp->b_dirtyoff > boffset)
3375				bp->b_dirtyoff = boffset;
3376			if (bp->b_dirtyend < eoffset)
3377				bp->b_dirtyend = eoffset;
3378		}
3379	}
3380}
3381
3382/*
3383 * Allocate the KVA mapping for an existing buffer.
3384 * If an unmapped buffer is provided but a mapped buffer is requested, take
3385 * also care to properly setup mappings between pages and KVA.
3386 */
3387static void
3388bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3389{
3390	int bsize, maxsize, need_mapping, need_kva;
3391	off_t offset;
3392
3393	need_mapping = bp->b_data == unmapped_buf &&
3394	    (gbflags & GB_UNMAPPED) == 0;
3395	need_kva = bp->b_kvabase == unmapped_buf &&
3396	    bp->b_data == unmapped_buf &&
3397	    (gbflags & GB_KVAALLOC) != 0;
3398	if (!need_mapping && !need_kva)
3399		return;
3400
3401	BUF_CHECK_UNMAPPED(bp);
3402
3403	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3404		/*
3405		 * Buffer is not mapped, but the KVA was already
3406		 * reserved at the time of the instantiation.  Use the
3407		 * allocated space.
3408		 */
3409		goto has_addr;
3410	}
3411
3412	/*
3413	 * Calculate the amount of the address space we would reserve
3414	 * if the buffer was mapped.
3415	 */
3416	bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3417	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3418	offset = blkno * bsize;
3419	maxsize = size + (offset & PAGE_MASK);
3420	maxsize = imax(maxsize, bsize);
3421
3422	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3423		if ((gbflags & GB_NOWAIT_BD) != 0) {
3424			/*
3425			 * XXXKIB: defragmentation cannot
3426			 * succeed, not sure what else to do.
3427			 */
3428			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3429		}
3430		atomic_add_int(&mappingrestarts, 1);
3431		bufspace_wait(bp->b_vp, gbflags, 0, 0);
3432	}
3433has_addr:
3434	if (need_mapping) {
3435		/* b_offset is handled by bpmap_qenter. */
3436		bp->b_data = bp->b_kvabase;
3437		BUF_CHECK_MAPPED(bp);
3438		bpmap_qenter(bp);
3439	}
3440}
3441
3442/*
3443 *	getblk:
3444 *
3445 *	Get a block given a specified block and offset into a file/device.
3446 *	The buffers B_DONE bit will be cleared on return, making it almost
3447 * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3448 *	return.  The caller should clear B_INVAL prior to initiating a
3449 *	READ.
3450 *
3451 *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3452 *	an existing buffer.
3453 *
3454 *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3455 *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3456 *	and then cleared based on the backing VM.  If the previous buffer is
3457 *	non-0-sized but invalid, B_CACHE will be cleared.
3458 *
3459 *	If getblk() must create a new buffer, the new buffer is returned with
3460 *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3461 *	case it is returned with B_INVAL clear and B_CACHE set based on the
3462 *	backing VM.
3463 *
3464 *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
3465 *	B_CACHE bit is clear.
3466 *
3467 *	What this means, basically, is that the caller should use B_CACHE to
3468 *	determine whether the buffer is fully valid or not and should clear
3469 *	B_INVAL prior to issuing a read.  If the caller intends to validate
3470 *	the buffer by loading its data area with something, the caller needs
3471 *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3472 *	the caller should set B_CACHE ( as an optimization ), else the caller
3473 *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3474 *	a write attempt or if it was a successful read.  If the caller
3475 *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3476 *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3477 */
3478struct buf *
3479getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3480    int flags)
3481{
3482	struct buf *bp;
3483	struct bufobj *bo;
3484	int bsize, error, maxsize, vmio;
3485	off_t offset;
3486
3487	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3488	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3489	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3490	ASSERT_VOP_LOCKED(vp, "getblk");
3491	if (size > MAXBCACHEBUF)
3492		panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size,
3493		    MAXBCACHEBUF);
3494	if (!unmapped_buf_allowed)
3495		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3496
3497	bo = &vp->v_bufobj;
3498loop:
3499	BO_RLOCK(bo);
3500	bp = gbincore(bo, blkno);
3501	if (bp != NULL) {
3502		int lockflags;
3503		/*
3504		 * Buffer is in-core.  If the buffer is not busy nor managed,
3505		 * it must be on a queue.
3506		 */
3507		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3508
3509		if (flags & GB_LOCK_NOWAIT)
3510			lockflags |= LK_NOWAIT;
3511
3512		error = BUF_TIMELOCK(bp, lockflags,
3513		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3514
3515		/*
3516		 * If we slept and got the lock we have to restart in case
3517		 * the buffer changed identities.
3518		 */
3519		if (error == ENOLCK)
3520			goto loop;
3521		/* We timed out or were interrupted. */
3522		else if (error)
3523			return (NULL);
3524		/* If recursed, assume caller knows the rules. */
3525		else if (BUF_LOCKRECURSED(bp))
3526			goto end;
3527
3528		/*
3529		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3530		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3531		 * and for a VMIO buffer B_CACHE is adjusted according to the
3532		 * backing VM cache.
3533		 */
3534		if (bp->b_flags & B_INVAL)
3535			bp->b_flags &= ~B_CACHE;
3536		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3537			bp->b_flags |= B_CACHE;
3538		if (bp->b_flags & B_MANAGED)
3539			MPASS(bp->b_qindex == QUEUE_NONE);
3540		else
3541			bremfree(bp);
3542
3543		/*
3544		 * check for size inconsistencies for non-VMIO case.
3545		 */
3546		if (bp->b_bcount != size) {
3547			if ((bp->b_flags & B_VMIO) == 0 ||
3548			    (size > bp->b_kvasize)) {
3549				if (bp->b_flags & B_DELWRI) {
3550					/*
3551					 * If buffer is pinned and caller does
3552					 * not want sleep  waiting for it to be
3553					 * unpinned, bail out
3554					 * */
3555					if (bp->b_pin_count > 0) {
3556						if (flags & GB_LOCK_NOWAIT) {
3557							bqrelse(bp);
3558							return (NULL);
3559						} else {
3560							bunpin_wait(bp);
3561						}
3562					}
3563					bp->b_flags |= B_NOCACHE;
3564					bwrite(bp);
3565				} else {
3566					if (LIST_EMPTY(&bp->b_dep)) {
3567						bp->b_flags |= B_RELBUF;
3568						brelse(bp);
3569					} else {
3570						bp->b_flags |= B_NOCACHE;
3571						bwrite(bp);
3572					}
3573				}
3574				goto loop;
3575			}
3576		}
3577
3578		/*
3579		 * Handle the case of unmapped buffer which should
3580		 * become mapped, or the buffer for which KVA
3581		 * reservation is requested.
3582		 */
3583		bp_unmapped_get_kva(bp, blkno, size, flags);
3584
3585		/*
3586		 * If the size is inconsistent in the VMIO case, we can resize
3587		 * the buffer.  This might lead to B_CACHE getting set or
3588		 * cleared.  If the size has not changed, B_CACHE remains
3589		 * unchanged from its previous state.
3590		 */
3591		allocbuf(bp, size);
3592
3593		KASSERT(bp->b_offset != NOOFFSET,
3594		    ("getblk: no buffer offset"));
3595
3596		/*
3597		 * A buffer with B_DELWRI set and B_CACHE clear must
3598		 * be committed before we can return the buffer in
3599		 * order to prevent the caller from issuing a read
3600		 * ( due to B_CACHE not being set ) and overwriting
3601		 * it.
3602		 *
3603		 * Most callers, including NFS and FFS, need this to
3604		 * operate properly either because they assume they
3605		 * can issue a read if B_CACHE is not set, or because
3606		 * ( for example ) an uncached B_DELWRI might loop due
3607		 * to softupdates re-dirtying the buffer.  In the latter
3608		 * case, B_CACHE is set after the first write completes,
3609		 * preventing further loops.
3610		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3611		 * above while extending the buffer, we cannot allow the
3612		 * buffer to remain with B_CACHE set after the write
3613		 * completes or it will represent a corrupt state.  To
3614		 * deal with this we set B_NOCACHE to scrap the buffer
3615		 * after the write.
3616		 *
3617		 * We might be able to do something fancy, like setting
3618		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3619		 * so the below call doesn't set B_CACHE, but that gets real
3620		 * confusing.  This is much easier.
3621		 */
3622
3623		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3624			bp->b_flags |= B_NOCACHE;
3625			bwrite(bp);
3626			goto loop;
3627		}
3628		bp->b_flags &= ~B_DONE;
3629	} else {
3630		/*
3631		 * Buffer is not in-core, create new buffer.  The buffer
3632		 * returned by getnewbuf() is locked.  Note that the returned
3633		 * buffer is also considered valid (not marked B_INVAL).
3634		 */
3635		BO_RUNLOCK(bo);
3636		/*
3637		 * If the user does not want us to create the buffer, bail out
3638		 * here.
3639		 */
3640		if (flags & GB_NOCREAT)
3641			return NULL;
3642		if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread))
3643			return NULL;
3644
3645		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3646		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3647		offset = blkno * bsize;
3648		vmio = vp->v_object != NULL;
3649		if (vmio) {
3650			maxsize = size + (offset & PAGE_MASK);
3651		} else {
3652			maxsize = size;
3653			/* Do not allow non-VMIO notmapped buffers. */
3654			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3655		}
3656		maxsize = imax(maxsize, bsize);
3657
3658		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
3659		if (bp == NULL) {
3660			if (slpflag || slptimeo)
3661				return NULL;
3662			/*
3663			 * XXX This is here until the sleep path is diagnosed
3664			 * enough to work under very low memory conditions.
3665			 *
3666			 * There's an issue on low memory, 4BSD+non-preempt
3667			 * systems (eg MIPS routers with 32MB RAM) where buffer
3668			 * exhaustion occurs without sleeping for buffer
3669			 * reclaimation.  This just sticks in a loop and
3670			 * constantly attempts to allocate a buffer, which
3671			 * hits exhaustion and tries to wakeup bufdaemon.
3672			 * This never happens because we never yield.
3673			 *
3674			 * The real solution is to identify and fix these cases
3675			 * so we aren't effectively busy-waiting in a loop
3676			 * until the reclaimation path has cycles to run.
3677			 */
3678			kern_yield(PRI_USER);
3679			goto loop;
3680		}
3681
3682		/*
3683		 * This code is used to make sure that a buffer is not
3684		 * created while the getnewbuf routine is blocked.
3685		 * This can be a problem whether the vnode is locked or not.
3686		 * If the buffer is created out from under us, we have to
3687		 * throw away the one we just created.
3688		 *
3689		 * Note: this must occur before we associate the buffer
3690		 * with the vp especially considering limitations in
3691		 * the splay tree implementation when dealing with duplicate
3692		 * lblkno's.
3693		 */
3694		BO_LOCK(bo);
3695		if (gbincore(bo, blkno)) {
3696			BO_UNLOCK(bo);
3697			bp->b_flags |= B_INVAL;
3698			brelse(bp);
3699			bufspace_release(maxsize);
3700			goto loop;
3701		}
3702
3703		/*
3704		 * Insert the buffer into the hash, so that it can
3705		 * be found by incore.
3706		 */
3707		bp->b_blkno = bp->b_lblkno = blkno;
3708		bp->b_offset = offset;
3709		bgetvp(vp, bp);
3710		BO_UNLOCK(bo);
3711
3712		/*
3713		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
3714		 * buffer size starts out as 0, B_CACHE will be set by
3715		 * allocbuf() for the VMIO case prior to it testing the
3716		 * backing store for validity.
3717		 */
3718
3719		if (vmio) {
3720			bp->b_flags |= B_VMIO;
3721			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3722			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3723			    bp, vp->v_object, bp->b_bufobj->bo_object));
3724		} else {
3725			bp->b_flags &= ~B_VMIO;
3726			KASSERT(bp->b_bufobj->bo_object == NULL,
3727			    ("ARGH! has b_bufobj->bo_object %p %p\n",
3728			    bp, bp->b_bufobj->bo_object));
3729			BUF_CHECK_MAPPED(bp);
3730		}
3731
3732		allocbuf(bp, size);
3733		bufspace_release(maxsize);
3734		bp->b_flags &= ~B_DONE;
3735	}
3736	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3737	BUF_ASSERT_HELD(bp);
3738end:
3739	KASSERT(bp->b_bufobj == bo,
3740	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3741	return (bp);
3742}
3743
3744/*
3745 * Get an empty, disassociated buffer of given size.  The buffer is initially
3746 * set to B_INVAL.
3747 */
3748struct buf *
3749geteblk(int size, int flags)
3750{
3751	struct buf *bp;
3752	int maxsize;
3753
3754	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3755	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
3756		if ((flags & GB_NOWAIT_BD) &&
3757		    (curthread->td_pflags & TDP_BUFNEED) != 0)
3758			return (NULL);
3759	}
3760	allocbuf(bp, size);
3761	bufspace_release(maxsize);
3762	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3763	BUF_ASSERT_HELD(bp);
3764	return (bp);
3765}
3766
3767/*
3768 * Truncate the backing store for a non-vmio buffer.
3769 */
3770static void
3771vfs_nonvmio_truncate(struct buf *bp, int newbsize)
3772{
3773
3774	if (bp->b_flags & B_MALLOC) {
3775		/*
3776		 * malloced buffers are not shrunk
3777		 */
3778		if (newbsize == 0) {
3779			bufmallocadjust(bp, 0);
3780			free(bp->b_data, M_BIOBUF);
3781			bp->b_data = bp->b_kvabase;
3782			bp->b_flags &= ~B_MALLOC;
3783		}
3784		return;
3785	}
3786	vm_hold_free_pages(bp, newbsize);
3787	bufspace_adjust(bp, newbsize);
3788}
3789
3790/*
3791 * Extend the backing for a non-VMIO buffer.
3792 */
3793static void
3794vfs_nonvmio_extend(struct buf *bp, int newbsize)
3795{
3796	caddr_t origbuf;
3797	int origbufsize;
3798
3799	/*
3800	 * We only use malloced memory on the first allocation.
3801	 * and revert to page-allocated memory when the buffer
3802	 * grows.
3803	 *
3804	 * There is a potential smp race here that could lead
3805	 * to bufmallocspace slightly passing the max.  It
3806	 * is probably extremely rare and not worth worrying
3807	 * over.
3808	 */
3809	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
3810	    bufmallocspace < maxbufmallocspace) {
3811		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
3812		bp->b_flags |= B_MALLOC;
3813		bufmallocadjust(bp, newbsize);
3814		return;
3815	}
3816
3817	/*
3818	 * If the buffer is growing on its other-than-first
3819	 * allocation then we revert to the page-allocation
3820	 * scheme.
3821	 */
3822	origbuf = NULL;
3823	origbufsize = 0;
3824	if (bp->b_flags & B_MALLOC) {
3825		origbuf = bp->b_data;
3826		origbufsize = bp->b_bufsize;
3827		bp->b_data = bp->b_kvabase;
3828		bufmallocadjust(bp, 0);
3829		bp->b_flags &= ~B_MALLOC;
3830		newbsize = round_page(newbsize);
3831	}
3832	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
3833	    (vm_offset_t) bp->b_data + newbsize);
3834	if (origbuf != NULL) {
3835		bcopy(origbuf, bp->b_data, origbufsize);
3836		free(origbuf, M_BIOBUF);
3837	}
3838	bufspace_adjust(bp, newbsize);
3839}
3840
3841/*
3842 * This code constitutes the buffer memory from either anonymous system
3843 * memory (in the case of non-VMIO operations) or from an associated
3844 * VM object (in the case of VMIO operations).  This code is able to
3845 * resize a buffer up or down.
3846 *
3847 * Note that this code is tricky, and has many complications to resolve
3848 * deadlock or inconsistent data situations.  Tread lightly!!!
3849 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
3850 * the caller.  Calling this code willy nilly can result in the loss of data.
3851 *
3852 * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
3853 * B_CACHE for the non-VMIO case.
3854 */
3855int
3856allocbuf(struct buf *bp, int size)
3857{
3858	int newbsize;
3859
3860	BUF_ASSERT_HELD(bp);
3861
3862	if (bp->b_bcount == size)
3863		return (1);
3864
3865	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
3866		panic("allocbuf: buffer too small");
3867
3868	newbsize = roundup2(size, DEV_BSIZE);
3869	if ((bp->b_flags & B_VMIO) == 0) {
3870		if ((bp->b_flags & B_MALLOC) == 0)
3871			newbsize = round_page(newbsize);
3872		/*
3873		 * Just get anonymous memory from the kernel.  Don't
3874		 * mess with B_CACHE.
3875		 */
3876		if (newbsize < bp->b_bufsize)
3877			vfs_nonvmio_truncate(bp, newbsize);
3878		else if (newbsize > bp->b_bufsize)
3879			vfs_nonvmio_extend(bp, newbsize);
3880	} else {
3881		int desiredpages;
3882
3883		desiredpages = (size == 0) ? 0 :
3884		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3885
3886		if (bp->b_flags & B_MALLOC)
3887			panic("allocbuf: VMIO buffer can't be malloced");
3888		/*
3889		 * Set B_CACHE initially if buffer is 0 length or will become
3890		 * 0-length.
3891		 */
3892		if (size == 0 || bp->b_bufsize == 0)
3893			bp->b_flags |= B_CACHE;
3894
3895		if (newbsize < bp->b_bufsize)
3896			vfs_vmio_truncate(bp, desiredpages);
3897		/* XXX This looks as if it should be newbsize > b_bufsize */
3898		else if (size > bp->b_bcount)
3899			vfs_vmio_extend(bp, desiredpages, size);
3900		bufspace_adjust(bp, newbsize);
3901	}
3902	bp->b_bcount = size;		/* requested buffer size. */
3903	return (1);
3904}
3905
3906extern int inflight_transient_maps;
3907
3908void
3909biodone(struct bio *bp)
3910{
3911	struct mtx *mtxp;
3912	void (*done)(struct bio *);
3913	vm_offset_t start, end;
3914
3915	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3916		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
3917		bp->bio_flags |= BIO_UNMAPPED;
3918		start = trunc_page((vm_offset_t)bp->bio_data);
3919		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3920		bp->bio_data = unmapped_buf;
3921		pmap_qremove(start, atop(end - start));
3922		vmem_free(transient_arena, start, end - start);
3923		atomic_add_int(&inflight_transient_maps, -1);
3924	}
3925	done = bp->bio_done;
3926	if (done == NULL) {
3927		mtxp = mtx_pool_find(mtxpool_sleep, bp);
3928		mtx_lock(mtxp);
3929		bp->bio_flags |= BIO_DONE;
3930		wakeup(bp);
3931		mtx_unlock(mtxp);
3932	} else
3933		done(bp);
3934}
3935
3936/*
3937 * Wait for a BIO to finish.
3938 */
3939int
3940biowait(struct bio *bp, const char *wchan)
3941{
3942	struct mtx *mtxp;
3943
3944	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3945	mtx_lock(mtxp);
3946	while ((bp->bio_flags & BIO_DONE) == 0)
3947		msleep(bp, mtxp, PRIBIO, wchan, 0);
3948	mtx_unlock(mtxp);
3949	if (bp->bio_error != 0)
3950		return (bp->bio_error);
3951	if (!(bp->bio_flags & BIO_ERROR))
3952		return (0);
3953	return (EIO);
3954}
3955
3956void
3957biofinish(struct bio *bp, struct devstat *stat, int error)
3958{
3959
3960	if (error) {
3961		bp->bio_error = error;
3962		bp->bio_flags |= BIO_ERROR;
3963	}
3964	if (stat != NULL)
3965		devstat_end_transaction_bio(stat, bp);
3966	biodone(bp);
3967}
3968
3969/*
3970 *	bufwait:
3971 *
3972 *	Wait for buffer I/O completion, returning error status.  The buffer
3973 *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3974 *	error and cleared.
3975 */
3976int
3977bufwait(struct buf *bp)
3978{
3979	if (bp->b_iocmd == BIO_READ)
3980		bwait(bp, PRIBIO, "biord");
3981	else
3982		bwait(bp, PRIBIO, "biowr");
3983	if (bp->b_flags & B_EINTR) {
3984		bp->b_flags &= ~B_EINTR;
3985		return (EINTR);
3986	}
3987	if (bp->b_ioflags & BIO_ERROR) {
3988		return (bp->b_error ? bp->b_error : EIO);
3989	} else {
3990		return (0);
3991	}
3992}
3993
3994/*
3995 *	bufdone:
3996 *
3997 *	Finish I/O on a buffer, optionally calling a completion function.
3998 *	This is usually called from an interrupt so process blocking is
3999 *	not allowed.
4000 *
4001 *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4002 *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4003 *	assuming B_INVAL is clear.
4004 *
4005 *	For the VMIO case, we set B_CACHE if the op was a read and no
4006 *	read error occurred, or if the op was a write.  B_CACHE is never
4007 *	set if the buffer is invalid or otherwise uncacheable.
4008 *
4009 *	biodone does not mess with B_INVAL, allowing the I/O routine or the
4010 *	initiator to leave B_INVAL set to brelse the buffer out of existence
4011 *	in the biodone routine.
4012 */
4013void
4014bufdone(struct buf *bp)
4015{
4016	struct bufobj *dropobj;
4017	void    (*biodone)(struct buf *);
4018
4019	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4020	dropobj = NULL;
4021
4022	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4023	BUF_ASSERT_HELD(bp);
4024
4025	runningbufwakeup(bp);
4026	if (bp->b_iocmd == BIO_WRITE)
4027		dropobj = bp->b_bufobj;
4028	/* call optional completion function if requested */
4029	if (bp->b_iodone != NULL) {
4030		biodone = bp->b_iodone;
4031		bp->b_iodone = NULL;
4032		(*biodone) (bp);
4033		if (dropobj)
4034			bufobj_wdrop(dropobj);
4035		return;
4036	}
4037
4038	bufdone_finish(bp);
4039
4040	if (dropobj)
4041		bufobj_wdrop(dropobj);
4042}
4043
4044void
4045bufdone_finish(struct buf *bp)
4046{
4047	BUF_ASSERT_HELD(bp);
4048
4049	if (!LIST_EMPTY(&bp->b_dep))
4050		buf_complete(bp);
4051
4052	if (bp->b_flags & B_VMIO) {
4053		/*
4054		 * Set B_CACHE if the op was a normal read and no error
4055		 * occurred.  B_CACHE is set for writes in the b*write()
4056		 * routines.
4057		 */
4058		if (bp->b_iocmd == BIO_READ &&
4059		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4060		    !(bp->b_ioflags & BIO_ERROR))
4061			bp->b_flags |= B_CACHE;
4062		vfs_vmio_iodone(bp);
4063	}
4064
4065	/*
4066	 * For asynchronous completions, release the buffer now. The brelse
4067	 * will do a wakeup there if necessary - so no need to do a wakeup
4068	 * here in the async case. The sync case always needs to do a wakeup.
4069	 */
4070	if (bp->b_flags & B_ASYNC) {
4071		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4072		    (bp->b_ioflags & BIO_ERROR))
4073			brelse(bp);
4074		else
4075			bqrelse(bp);
4076	} else
4077		bdone(bp);
4078}
4079
4080/*
4081 * This routine is called in lieu of iodone in the case of
4082 * incomplete I/O.  This keeps the busy status for pages
4083 * consistent.
4084 */
4085void
4086vfs_unbusy_pages(struct buf *bp)
4087{
4088	int i;
4089	vm_object_t obj;
4090	vm_page_t m;
4091
4092	runningbufwakeup(bp);
4093	if (!(bp->b_flags & B_VMIO))
4094		return;
4095
4096	obj = bp->b_bufobj->bo_object;
4097	VM_OBJECT_WLOCK(obj);
4098	for (i = 0; i < bp->b_npages; i++) {
4099		m = bp->b_pages[i];
4100		if (m == bogus_page) {
4101			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4102			if (!m)
4103				panic("vfs_unbusy_pages: page missing\n");
4104			bp->b_pages[i] = m;
4105			if (buf_mapped(bp)) {
4106				BUF_CHECK_MAPPED(bp);
4107				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4108				    bp->b_pages, bp->b_npages);
4109			} else
4110				BUF_CHECK_UNMAPPED(bp);
4111		}
4112		vm_page_sunbusy(m);
4113	}
4114	vm_object_pip_wakeupn(obj, bp->b_npages);
4115	VM_OBJECT_WUNLOCK(obj);
4116}
4117
4118/*
4119 * vfs_page_set_valid:
4120 *
4121 *	Set the valid bits in a page based on the supplied offset.   The
4122 *	range is restricted to the buffer's size.
4123 *
4124 *	This routine is typically called after a read completes.
4125 */
4126static void
4127vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4128{
4129	vm_ooffset_t eoff;
4130
4131	/*
4132	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4133	 * page boundary and eoff is not greater than the end of the buffer.
4134	 * The end of the buffer, in this case, is our file EOF, not the
4135	 * allocation size of the buffer.
4136	 */
4137	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4138	if (eoff > bp->b_offset + bp->b_bcount)
4139		eoff = bp->b_offset + bp->b_bcount;
4140
4141	/*
4142	 * Set valid range.  This is typically the entire buffer and thus the
4143	 * entire page.
4144	 */
4145	if (eoff > off)
4146		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4147}
4148
4149/*
4150 * vfs_page_set_validclean:
4151 *
4152 *	Set the valid bits and clear the dirty bits in a page based on the
4153 *	supplied offset.   The range is restricted to the buffer's size.
4154 */
4155static void
4156vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4157{
4158	vm_ooffset_t soff, eoff;
4159
4160	/*
4161	 * Start and end offsets in buffer.  eoff - soff may not cross a
4162	 * page boundary or cross the end of the buffer.  The end of the
4163	 * buffer, in this case, is our file EOF, not the allocation size
4164	 * of the buffer.
4165	 */
4166	soff = off;
4167	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4168	if (eoff > bp->b_offset + bp->b_bcount)
4169		eoff = bp->b_offset + bp->b_bcount;
4170
4171	/*
4172	 * Set valid range.  This is typically the entire buffer and thus the
4173	 * entire page.
4174	 */
4175	if (eoff > soff) {
4176		vm_page_set_validclean(
4177		    m,
4178		   (vm_offset_t) (soff & PAGE_MASK),
4179		   (vm_offset_t) (eoff - soff)
4180		);
4181	}
4182}
4183
4184/*
4185 * Ensure that all buffer pages are not exclusive busied.  If any page is
4186 * exclusive busy, drain it.
4187 */
4188void
4189vfs_drain_busy_pages(struct buf *bp)
4190{
4191	vm_page_t m;
4192	int i, last_busied;
4193
4194	VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4195	last_busied = 0;
4196	for (i = 0; i < bp->b_npages; i++) {
4197		m = bp->b_pages[i];
4198		if (vm_page_xbusied(m)) {
4199			for (; last_busied < i; last_busied++)
4200				vm_page_sbusy(bp->b_pages[last_busied]);
4201			while (vm_page_xbusied(m)) {
4202				vm_page_lock(m);
4203				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4204				vm_page_busy_sleep(m, "vbpage", true);
4205				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4206			}
4207		}
4208	}
4209	for (i = 0; i < last_busied; i++)
4210		vm_page_sunbusy(bp->b_pages[i]);
4211}
4212
4213/*
4214 * This routine is called before a device strategy routine.
4215 * It is used to tell the VM system that paging I/O is in
4216 * progress, and treat the pages associated with the buffer
4217 * almost as being exclusive busy.  Also the object paging_in_progress
4218 * flag is handled to make sure that the object doesn't become
4219 * inconsistent.
4220 *
4221 * Since I/O has not been initiated yet, certain buffer flags
4222 * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4223 * and should be ignored.
4224 */
4225void
4226vfs_busy_pages(struct buf *bp, int clear_modify)
4227{
4228	int i, bogus;
4229	vm_object_t obj;
4230	vm_ooffset_t foff;
4231	vm_page_t m;
4232
4233	if (!(bp->b_flags & B_VMIO))
4234		return;
4235
4236	obj = bp->b_bufobj->bo_object;
4237	foff = bp->b_offset;
4238	KASSERT(bp->b_offset != NOOFFSET,
4239	    ("vfs_busy_pages: no buffer offset"));
4240	VM_OBJECT_WLOCK(obj);
4241	vfs_drain_busy_pages(bp);
4242	if (bp->b_bufsize != 0)
4243		vfs_setdirty_locked_object(bp);
4244	bogus = 0;
4245	for (i = 0; i < bp->b_npages; i++) {
4246		m = bp->b_pages[i];
4247
4248		if ((bp->b_flags & B_CLUSTER) == 0) {
4249			vm_object_pip_add(obj, 1);
4250			vm_page_sbusy(m);
4251		}
4252		/*
4253		 * When readying a buffer for a read ( i.e
4254		 * clear_modify == 0 ), it is important to do
4255		 * bogus_page replacement for valid pages in
4256		 * partially instantiated buffers.  Partially
4257		 * instantiated buffers can, in turn, occur when
4258		 * reconstituting a buffer from its VM backing store
4259		 * base.  We only have to do this if B_CACHE is
4260		 * clear ( which causes the I/O to occur in the
4261		 * first place ).  The replacement prevents the read
4262		 * I/O from overwriting potentially dirty VM-backed
4263		 * pages.  XXX bogus page replacement is, uh, bogus.
4264		 * It may not work properly with small-block devices.
4265		 * We need to find a better way.
4266		 */
4267		if (clear_modify) {
4268			pmap_remove_write(m);
4269			vfs_page_set_validclean(bp, foff, m);
4270		} else if (m->valid == VM_PAGE_BITS_ALL &&
4271		    (bp->b_flags & B_CACHE) == 0) {
4272			bp->b_pages[i] = bogus_page;
4273			bogus++;
4274		}
4275		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4276	}
4277	VM_OBJECT_WUNLOCK(obj);
4278	if (bogus && buf_mapped(bp)) {
4279		BUF_CHECK_MAPPED(bp);
4280		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4281		    bp->b_pages, bp->b_npages);
4282	}
4283}
4284
4285/*
4286 *	vfs_bio_set_valid:
4287 *
4288 *	Set the range within the buffer to valid.  The range is
4289 *	relative to the beginning of the buffer, b_offset.  Note that
4290 *	b_offset itself may be offset from the beginning of the first
4291 *	page.
4292 */
4293void
4294vfs_bio_set_valid(struct buf *bp, int base, int size)
4295{
4296	int i, n;
4297	vm_page_t m;
4298
4299	if (!(bp->b_flags & B_VMIO))
4300		return;
4301
4302	/*
4303	 * Fixup base to be relative to beginning of first page.
4304	 * Set initial n to be the maximum number of bytes in the
4305	 * first page that can be validated.
4306	 */
4307	base += (bp->b_offset & PAGE_MASK);
4308	n = PAGE_SIZE - (base & PAGE_MASK);
4309
4310	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4311	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4312		m = bp->b_pages[i];
4313		if (n > size)
4314			n = size;
4315		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4316		base += n;
4317		size -= n;
4318		n = PAGE_SIZE;
4319	}
4320	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4321}
4322
4323/*
4324 *	vfs_bio_clrbuf:
4325 *
4326 *	If the specified buffer is a non-VMIO buffer, clear the entire
4327 *	buffer.  If the specified buffer is a VMIO buffer, clear and
4328 *	validate only the previously invalid portions of the buffer.
4329 *	This routine essentially fakes an I/O, so we need to clear
4330 *	BIO_ERROR and B_INVAL.
4331 *
4332 *	Note that while we only theoretically need to clear through b_bcount,
4333 *	we go ahead and clear through b_bufsize.
4334 */
4335void
4336vfs_bio_clrbuf(struct buf *bp)
4337{
4338	int i, j, mask, sa, ea, slide;
4339
4340	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4341		clrbuf(bp);
4342		return;
4343	}
4344	bp->b_flags &= ~B_INVAL;
4345	bp->b_ioflags &= ~BIO_ERROR;
4346	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4347	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4348	    (bp->b_offset & PAGE_MASK) == 0) {
4349		if (bp->b_pages[0] == bogus_page)
4350			goto unlock;
4351		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4352		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4353		if ((bp->b_pages[0]->valid & mask) == mask)
4354			goto unlock;
4355		if ((bp->b_pages[0]->valid & mask) == 0) {
4356			pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4357			bp->b_pages[0]->valid |= mask;
4358			goto unlock;
4359		}
4360	}
4361	sa = bp->b_offset & PAGE_MASK;
4362	slide = 0;
4363	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4364		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4365		ea = slide & PAGE_MASK;
4366		if (ea == 0)
4367			ea = PAGE_SIZE;
4368		if (bp->b_pages[i] == bogus_page)
4369			continue;
4370		j = sa / DEV_BSIZE;
4371		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4372		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4373		if ((bp->b_pages[i]->valid & mask) == mask)
4374			continue;
4375		if ((bp->b_pages[i]->valid & mask) == 0)
4376			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4377		else {
4378			for (; sa < ea; sa += DEV_BSIZE, j++) {
4379				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4380					pmap_zero_page_area(bp->b_pages[i],
4381					    sa, DEV_BSIZE);
4382				}
4383			}
4384		}
4385		bp->b_pages[i]->valid |= mask;
4386	}
4387unlock:
4388	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4389	bp->b_resid = 0;
4390}
4391
4392void
4393vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4394{
4395	vm_page_t m;
4396	int i, n;
4397
4398	if (buf_mapped(bp)) {
4399		BUF_CHECK_MAPPED(bp);
4400		bzero(bp->b_data + base, size);
4401	} else {
4402		BUF_CHECK_UNMAPPED(bp);
4403		n = PAGE_SIZE - (base & PAGE_MASK);
4404		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4405			m = bp->b_pages[i];
4406			if (n > size)
4407				n = size;
4408			pmap_zero_page_area(m, base & PAGE_MASK, n);
4409			base += n;
4410			size -= n;
4411			n = PAGE_SIZE;
4412		}
4413	}
4414}
4415
4416/*
4417 * vm_hold_load_pages and vm_hold_free_pages get pages into
4418 * a buffers address space.  The pages are anonymous and are
4419 * not associated with a file object.
4420 */
4421static void
4422vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4423{
4424	vm_offset_t pg;
4425	vm_page_t p;
4426	int index;
4427
4428	BUF_CHECK_MAPPED(bp);
4429
4430	to = round_page(to);
4431	from = round_page(from);
4432	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4433
4434	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4435tryagain:
4436		/*
4437		 * note: must allocate system pages since blocking here
4438		 * could interfere with paging I/O, no matter which
4439		 * process we are.
4440		 */
4441		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4442		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
4443		if (p == NULL) {
4444			VM_WAIT;
4445			goto tryagain;
4446		}
4447		pmap_qenter(pg, &p, 1);
4448		bp->b_pages[index] = p;
4449	}
4450	bp->b_npages = index;
4451}
4452
4453/* Return pages associated with this buf to the vm system */
4454static void
4455vm_hold_free_pages(struct buf *bp, int newbsize)
4456{
4457	vm_offset_t from;
4458	vm_page_t p;
4459	int index, newnpages;
4460
4461	BUF_CHECK_MAPPED(bp);
4462
4463	from = round_page((vm_offset_t)bp->b_data + newbsize);
4464	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4465	if (bp->b_npages > newnpages)
4466		pmap_qremove(from, bp->b_npages - newnpages);
4467	for (index = newnpages; index < bp->b_npages; index++) {
4468		p = bp->b_pages[index];
4469		bp->b_pages[index] = NULL;
4470		if (vm_page_sbusied(p))
4471			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
4472			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4473		p->wire_count--;
4474		vm_page_free(p);
4475		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
4476	}
4477	bp->b_npages = newnpages;
4478}
4479
4480/*
4481 * Map an IO request into kernel virtual address space.
4482 *
4483 * All requests are (re)mapped into kernel VA space.
4484 * Notice that we use b_bufsize for the size of the buffer
4485 * to be mapped.  b_bcount might be modified by the driver.
4486 *
4487 * Note that even if the caller determines that the address space should
4488 * be valid, a race or a smaller-file mapped into a larger space may
4489 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4490 * check the return value.
4491 *
4492 * This function only works with pager buffers.
4493 */
4494int
4495vmapbuf(struct buf *bp, int mapbuf)
4496{
4497	vm_prot_t prot;
4498	int pidx;
4499
4500	if (bp->b_bufsize < 0)
4501		return (-1);
4502	prot = VM_PROT_READ;
4503	if (bp->b_iocmd == BIO_READ)
4504		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4505	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4506	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4507	    btoc(MAXPHYS))) < 0)
4508		return (-1);
4509	bp->b_npages = pidx;
4510	bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4511	if (mapbuf || !unmapped_buf_allowed) {
4512		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4513		bp->b_data = bp->b_kvabase + bp->b_offset;
4514	} else
4515		bp->b_data = unmapped_buf;
4516	return(0);
4517}
4518
4519/*
4520 * Free the io map PTEs associated with this IO operation.
4521 * We also invalidate the TLB entries and restore the original b_addr.
4522 *
4523 * This function only works with pager buffers.
4524 */
4525void
4526vunmapbuf(struct buf *bp)
4527{
4528	int npages;
4529
4530	npages = bp->b_npages;
4531	if (buf_mapped(bp))
4532		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4533	vm_page_unhold_pages(bp->b_pages, npages);
4534
4535	bp->b_data = unmapped_buf;
4536}
4537
4538void
4539bdone(struct buf *bp)
4540{
4541	struct mtx *mtxp;
4542
4543	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4544	mtx_lock(mtxp);
4545	bp->b_flags |= B_DONE;
4546	wakeup(bp);
4547	mtx_unlock(mtxp);
4548}
4549
4550void
4551bwait(struct buf *bp, u_char pri, const char *wchan)
4552{
4553	struct mtx *mtxp;
4554
4555	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4556	mtx_lock(mtxp);
4557	while ((bp->b_flags & B_DONE) == 0)
4558		msleep(bp, mtxp, pri, wchan, 0);
4559	mtx_unlock(mtxp);
4560}
4561
4562int
4563bufsync(struct bufobj *bo, int waitfor)
4564{
4565
4566	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
4567}
4568
4569void
4570bufstrategy(struct bufobj *bo, struct buf *bp)
4571{
4572	int i = 0;
4573	struct vnode *vp;
4574
4575	vp = bp->b_vp;
4576	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4577	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4578	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4579	i = VOP_STRATEGY(vp, bp);
4580	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4581}
4582
4583void
4584bufobj_wrefl(struct bufobj *bo)
4585{
4586
4587	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4588	ASSERT_BO_WLOCKED(bo);
4589	bo->bo_numoutput++;
4590}
4591
4592void
4593bufobj_wref(struct bufobj *bo)
4594{
4595
4596	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4597	BO_LOCK(bo);
4598	bo->bo_numoutput++;
4599	BO_UNLOCK(bo);
4600}
4601
4602void
4603bufobj_wdrop(struct bufobj *bo)
4604{
4605
4606	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4607	BO_LOCK(bo);
4608	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4609	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4610		bo->bo_flag &= ~BO_WWAIT;
4611		wakeup(&bo->bo_numoutput);
4612	}
4613	BO_UNLOCK(bo);
4614}
4615
4616int
4617bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4618{
4619	int error;
4620
4621	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4622	ASSERT_BO_WLOCKED(bo);
4623	error = 0;
4624	while (bo->bo_numoutput) {
4625		bo->bo_flag |= BO_WWAIT;
4626		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
4627		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4628		if (error)
4629			break;
4630	}
4631	return (error);
4632}
4633
4634void
4635bpin(struct buf *bp)
4636{
4637	struct mtx *mtxp;
4638
4639	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4640	mtx_lock(mtxp);
4641	bp->b_pin_count++;
4642	mtx_unlock(mtxp);
4643}
4644
4645void
4646bunpin(struct buf *bp)
4647{
4648	struct mtx *mtxp;
4649
4650	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4651	mtx_lock(mtxp);
4652	if (--bp->b_pin_count == 0)
4653		wakeup(bp);
4654	mtx_unlock(mtxp);
4655}
4656
4657void
4658bunpin_wait(struct buf *bp)
4659{
4660	struct mtx *mtxp;
4661
4662	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4663	mtx_lock(mtxp);
4664	while (bp->b_pin_count > 0)
4665		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4666	mtx_unlock(mtxp);
4667}
4668
4669/*
4670 * Set bio_data or bio_ma for struct bio from the struct buf.
4671 */
4672void
4673bdata2bio(struct buf *bp, struct bio *bip)
4674{
4675
4676	if (!buf_mapped(bp)) {
4677		KASSERT(unmapped_buf_allowed, ("unmapped"));
4678		bip->bio_ma = bp->b_pages;
4679		bip->bio_ma_n = bp->b_npages;
4680		bip->bio_data = unmapped_buf;
4681		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4682		bip->bio_flags |= BIO_UNMAPPED;
4683		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4684		    PAGE_SIZE == bp->b_npages,
4685		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4686		    (long long)bip->bio_length, bip->bio_ma_n));
4687	} else {
4688		bip->bio_data = bp->b_data;
4689		bip->bio_ma = NULL;
4690	}
4691}
4692
4693static int buf_pager_relbuf;
4694SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
4695    &buf_pager_relbuf, 0,
4696    "Make buffer pager release buffers after reading");
4697
4698/*
4699 * The buffer pager.  It uses buffer reads to validate pages.
4700 *
4701 * In contrast to the generic local pager from vm/vnode_pager.c, this
4702 * pager correctly and easily handles volumes where the underlying
4703 * device block size is greater than the machine page size.  The
4704 * buffer cache transparently extends the requested page run to be
4705 * aligned at the block boundary, and does the necessary bogus page
4706 * replacements in the addends to avoid obliterating already valid
4707 * pages.
4708 *
4709 * The only non-trivial issue is that the exclusive busy state for
4710 * pages, which is assumed by the vm_pager_getpages() interface, is
4711 * incompatible with the VMIO buffer cache's desire to share-busy the
4712 * pages.  This function performs a trivial downgrade of the pages'
4713 * state before reading buffers, and a less trivial upgrade from the
4714 * shared-busy to excl-busy state after the read.
4715 */
4716int
4717vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
4718    int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
4719    vbg_get_blksize_t get_blksize)
4720{
4721	vm_page_t m;
4722	vm_object_t object;
4723	struct buf *bp;
4724	struct mount *mp;
4725	daddr_t lbn, lbnp;
4726	vm_ooffset_t la, lb, poff, poffe;
4727	long bsize;
4728	int bo_bs, br_flags, error, i, pgsin, pgsin_a, pgsin_b;
4729	bool redo, lpart;
4730
4731	object = vp->v_object;
4732	mp = vp->v_mount;
4733	la = IDX_TO_OFF(ma[count - 1]->pindex);
4734	if (la >= object->un_pager.vnp.vnp_size)
4735		return (VM_PAGER_BAD);
4736	lpart = la + PAGE_SIZE > object->un_pager.vnp.vnp_size;
4737	bo_bs = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)));
4738
4739	/*
4740	 * Calculate read-ahead, behind and total pages.
4741	 */
4742	pgsin = count;
4743	lb = IDX_TO_OFF(ma[0]->pindex);
4744	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
4745	pgsin += pgsin_b;
4746	if (rbehind != NULL)
4747		*rbehind = pgsin_b;
4748	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
4749	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
4750		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
4751		    PAGE_SIZE) - la);
4752	pgsin += pgsin_a;
4753	if (rahead != NULL)
4754		*rahead = pgsin_a;
4755	PCPU_INC(cnt.v_vnodein);
4756	PCPU_ADD(cnt.v_vnodepgsin, pgsin);
4757
4758	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
4759	    != 0) ? GB_UNMAPPED : 0;
4760	VM_OBJECT_WLOCK(object);
4761again:
4762	for (i = 0; i < count; i++)
4763		vm_page_busy_downgrade(ma[i]);
4764	VM_OBJECT_WUNLOCK(object);
4765
4766	lbnp = -1;
4767	for (i = 0; i < count; i++) {
4768		m = ma[i];
4769
4770		/*
4771		 * Pages are shared busy and the object lock is not
4772		 * owned, which together allow for the pages'
4773		 * invalidation.  The racy test for validity avoids
4774		 * useless creation of the buffer for the most typical
4775		 * case when invalidation is not used in redo or for
4776		 * parallel read.  The shared->excl upgrade loop at
4777		 * the end of the function catches the race in a
4778		 * reliable way (protected by the object lock).
4779		 */
4780		if (m->valid == VM_PAGE_BITS_ALL)
4781			continue;
4782
4783		poff = IDX_TO_OFF(m->pindex);
4784		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
4785		for (; poff < poffe; poff += bsize) {
4786			lbn = get_lblkno(vp, poff);
4787			if (lbn == lbnp)
4788				goto next_page;
4789			lbnp = lbn;
4790
4791			bsize = get_blksize(vp, lbn);
4792			error = bread_gb(vp, lbn, bsize, curthread->td_ucred,
4793			    br_flags, &bp);
4794			if (error != 0)
4795				goto end_pages;
4796			if (LIST_EMPTY(&bp->b_dep)) {
4797				/*
4798				 * Invalidation clears m->valid, but
4799				 * may leave B_CACHE flag if the
4800				 * buffer existed at the invalidation
4801				 * time.  In this case, recycle the
4802				 * buffer to do real read on next
4803				 * bread() after redo.
4804				 *
4805				 * Otherwise B_RELBUF is not strictly
4806				 * necessary, enable to reduce buf
4807				 * cache pressure.
4808				 */
4809				if (buf_pager_relbuf ||
4810				    m->valid != VM_PAGE_BITS_ALL)
4811					bp->b_flags |= B_RELBUF;
4812
4813				bp->b_flags &= ~B_NOCACHE;
4814				brelse(bp);
4815			} else {
4816				bqrelse(bp);
4817			}
4818		}
4819		KASSERT(1 /* racy, enable for debugging */ ||
4820		    m->valid == VM_PAGE_BITS_ALL || i == count - 1,
4821		    ("buf %d %p invalid", i, m));
4822		if (i == count - 1 && lpart) {
4823			VM_OBJECT_WLOCK(object);
4824			if (m->valid != 0 &&
4825			    m->valid != VM_PAGE_BITS_ALL)
4826				vm_page_zero_invalid(m, TRUE);
4827			VM_OBJECT_WUNLOCK(object);
4828		}
4829next_page:;
4830	}
4831end_pages:
4832
4833	VM_OBJECT_WLOCK(object);
4834	redo = false;
4835	for (i = 0; i < count; i++) {
4836		vm_page_sunbusy(ma[i]);
4837		ma[i] = vm_page_grab(object, ma[i]->pindex, VM_ALLOC_NORMAL);
4838
4839		/*
4840		 * Since the pages were only sbusy while neither the
4841		 * buffer nor the object lock was held by us, or
4842		 * reallocated while vm_page_grab() slept for busy
4843		 * relinguish, they could have been invalidated.
4844		 * Recheck the valid bits and re-read as needed.
4845		 *
4846		 * Note that the last page is made fully valid in the
4847		 * read loop, and partial validity for the page at
4848		 * index count - 1 could mean that the page was
4849		 * invalidated or removed, so we must restart for
4850		 * safety as well.
4851		 */
4852		if (ma[i]->valid != VM_PAGE_BITS_ALL)
4853			redo = true;
4854	}
4855	if (redo && error == 0)
4856		goto again;
4857	VM_OBJECT_WUNLOCK(object);
4858	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
4859}
4860
4861#include "opt_ddb.h"
4862#ifdef DDB
4863#include <ddb/ddb.h>
4864
4865/* DDB command to show buffer data */
4866DB_SHOW_COMMAND(buffer, db_show_buffer)
4867{
4868	/* get args */
4869	struct buf *bp = (struct buf *)addr;
4870
4871	if (!have_addr) {
4872		db_printf("usage: show buffer <addr>\n");
4873		return;
4874	}
4875
4876	db_printf("buf at %p\n", bp);
4877	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4878	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4879	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4880	db_printf(
4881	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4882	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4883	    "b_dep = %p\n",
4884	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4885	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4886	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4887	db_printf("b_kvabase = %p, b_kvasize = %d\n",
4888	    bp->b_kvabase, bp->b_kvasize);
4889	if (bp->b_npages) {
4890		int i;
4891		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4892		for (i = 0; i < bp->b_npages; i++) {
4893			vm_page_t m;
4894			m = bp->b_pages[i];
4895			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4896			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4897			if ((i + 1) < bp->b_npages)
4898				db_printf(",");
4899		}
4900		db_printf("\n");
4901	}
4902	db_printf(" ");
4903	BUF_LOCKPRINTINFO(bp);
4904}
4905
4906DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4907{
4908	struct buf *bp;
4909	int i;
4910
4911	for (i = 0; i < nbuf; i++) {
4912		bp = &buf[i];
4913		if (BUF_ISLOCKED(bp)) {
4914			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4915			db_printf("\n");
4916		}
4917	}
4918}
4919
4920DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4921{
4922	struct vnode *vp;
4923	struct buf *bp;
4924
4925	if (!have_addr) {
4926		db_printf("usage: show vnodebufs <addr>\n");
4927		return;
4928	}
4929	vp = (struct vnode *)addr;
4930	db_printf("Clean buffers:\n");
4931	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4932		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4933		db_printf("\n");
4934	}
4935	db_printf("Dirty buffers:\n");
4936	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4937		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4938		db_printf("\n");
4939	}
4940}
4941
4942DB_COMMAND(countfreebufs, db_coundfreebufs)
4943{
4944	struct buf *bp;
4945	int i, used = 0, nfree = 0;
4946
4947	if (have_addr) {
4948		db_printf("usage: countfreebufs\n");
4949		return;
4950	}
4951
4952	for (i = 0; i < nbuf; i++) {
4953		bp = &buf[i];
4954		if (bp->b_qindex == QUEUE_EMPTY)
4955			nfree++;
4956		else
4957			used++;
4958	}
4959
4960	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
4961	    nfree + used);
4962	db_printf("numfreebuffers is %d\n", numfreebuffers);
4963}
4964#endif /* DDB */
4965