vfs_bio.c revision 46181
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 *		John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.206 1999/04/14 18:51:52 dt Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme.  Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *
23 * Author:  John S. Dyson
24 * Significant help during the development and debugging phases
25 * had been provided by David Greenman, also of the FreeBSD core team.
26 *
27 * see man buf(9) for more info.
28 */
29
30#define VMIO
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/sysproto.h>
34#include <sys/kernel.h>
35#include <sys/sysctl.h>
36#include <sys/proc.h>
37#include <sys/vnode.h>
38#include <sys/vmmeter.h>
39#include <sys/lock.h>
40#include <miscfs/specfs/specdev.h>
41#include <vm/vm.h>
42#include <vm/vm_param.h>
43#include <vm/vm_prot.h>
44#include <vm/vm_kern.h>
45#include <vm/vm_pageout.h>
46#include <vm/vm_page.h>
47#include <vm/vm_object.h>
48#include <vm/vm_extern.h>
49#include <vm/vm_map.h>
50#include <sys/buf.h>
51#include <sys/mount.h>
52#include <sys/malloc.h>
53#include <sys/resourcevar.h>
54
55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
56
57struct	bio_ops bioops;		/* I/O operation notification */
58
59#if 0 	/* replaced bu sched_sync */
60static void vfs_update __P((void));
61static struct	proc *updateproc;
62static struct kproc_desc up_kp = {
63	"update",
64	vfs_update,
65	&updateproc
66};
67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
68#endif
69
70struct buf *buf;		/* buffer header pool */
71struct swqueue bswlist;
72
73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74		vm_offset_t to);
75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76		vm_offset_t to);
77static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
78			      vm_offset_t off, vm_offset_t size,
79			      vm_page_t m);
80static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
81			       int pageno, vm_page_t m);
82static void vfs_clean_pages(struct buf * bp);
83static void vfs_setdirty(struct buf *bp);
84static void vfs_vmio_release(struct buf *bp);
85static void flushdirtybuffers(int slpflag, int slptimeo);
86static int flushbufqueues(void);
87
88/*
89 * Internal update daemon, process 3
90 *	The variable vfs_update_wakeup allows for internal syncs.
91 */
92int vfs_update_wakeup;
93
94/*
95 * bogus page -- for I/O to/from partially complete buffers
96 * this is a temporary solution to the problem, but it is not
97 * really that bad.  it would be better to split the buffer
98 * for input in the case of buffers partially already in memory,
99 * but the code is intricate enough already.
100 */
101vm_page_t bogus_page;
102int runningbufspace;
103static vm_offset_t bogus_offset;
104
105static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
106	bufmallocspace, maxbufmallocspace, hibufspace;
107static int needsbuffer;
108static int numdirtybuffers, lodirtybuffers, hidirtybuffers;
109static int numfreebuffers, lofreebuffers, hifreebuffers;
110static int kvafreespace;
111
112SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
113	&numdirtybuffers, 0, "");
114SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
115	&lodirtybuffers, 0, "");
116SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
117	&hidirtybuffers, 0, "");
118SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
119	&numfreebuffers, 0, "");
120SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
121	&lofreebuffers, 0, "");
122SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
123	&hifreebuffers, 0, "");
124SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD,
125	&runningbufspace, 0, "");
126SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
127	&maxbufspace, 0, "");
128SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD,
129	&hibufspace, 0, "");
130SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
131	&bufspace, 0, "");
132SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
133	&maxvmiobufspace, 0, "");
134SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
135	&vmiospace, 0, "");
136SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
137	&maxbufmallocspace, 0, "");
138SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
139	&bufmallocspace, 0, "");
140SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
141	&kvafreespace, 0, "");
142
143static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
144struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } };
145
146extern int vm_swap_size;
147
148#define BUF_MAXUSE		24
149
150#define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
151#define VFS_BIO_NEED_RESERVED02	0x02	/* unused */
152#define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
153#define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
154#define VFS_BIO_NEED_KVASPACE	0x10	/* wait for buffer_map space, emerg  */
155
156/*
157 *	kvaspacewakeup:
158 *
159 *	Called when kva space is potential available for recovery or when
160 *	kva space is recovered in the buffer_map.  This function wakes up
161 *	anyone waiting for buffer_map kva space.  Even though the buffer_map
162 *	is larger then maxbufspace, this situation will typically occur
163 *	when the buffer_map gets fragmented.
164 */
165
166static __inline void
167kvaspacewakeup(void)
168{
169	/*
170	 * If someone is waiting for KVA space, wake them up.  Even
171	 * though we haven't freed the kva space yet, the waiting
172	 * process will be able to now.
173	 */
174	if (needsbuffer & VFS_BIO_NEED_KVASPACE) {
175		needsbuffer &= ~VFS_BIO_NEED_KVASPACE;
176		wakeup(&needsbuffer);
177	}
178}
179
180/*
181 *	bufspacewakeup:
182 *
183 *	Called when buffer space is potentially available for recovery or when
184 *	buffer space is recovered.  getnewbuf() will block on this flag when
185 *	it is unable to free sufficient buffer space.  Buffer space becomes
186 *	recoverable when bp's get placed back in the queues.
187 */
188
189static __inline void
190bufspacewakeup(void)
191{
192	/*
193	 * If someone is waiting for BUF space, wake them up.  Even
194	 * though we haven't freed the kva space yet, the waiting
195	 * process will be able to now.
196	 */
197	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
198		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
199		wakeup(&needsbuffer);
200	}
201}
202
203/*
204 *	bufcountwakeup:
205 *
206 *	Called when a buffer has been added to one of the free queues to
207 *	account for the buffer and to wakeup anyone waiting for free buffers.
208 *	This typically occurs when large amounts of metadata are being handled
209 *	by the buffer cache ( else buffer space runs out first, usually ).
210 */
211
212static __inline void
213bufcountwakeup(void)
214{
215	++numfreebuffers;
216	if (needsbuffer) {
217		needsbuffer &= ~VFS_BIO_NEED_ANY;
218		if (numfreebuffers >= hifreebuffers)
219			needsbuffer &= ~VFS_BIO_NEED_FREE;
220		wakeup(&needsbuffer);
221	}
222}
223
224/*
225 * Initialize buffer headers and related structures.
226 */
227void
228bufinit()
229{
230	struct buf *bp;
231	int i;
232
233	TAILQ_INIT(&bswlist);
234	LIST_INIT(&invalhash);
235
236	/* first, make a null hash table */
237	for (i = 0; i < BUFHSZ; i++)
238		LIST_INIT(&bufhashtbl[i]);
239
240	/* next, make a null set of free lists */
241	for (i = 0; i < BUFFER_QUEUES; i++)
242		TAILQ_INIT(&bufqueues[i]);
243
244	/* finally, initialize each buffer header and stick on empty q */
245	for (i = 0; i < nbuf; i++) {
246		bp = &buf[i];
247		bzero(bp, sizeof *bp);
248		bp->b_flags = B_INVAL;	/* we're just an empty header */
249		bp->b_dev = NODEV;
250		bp->b_rcred = NOCRED;
251		bp->b_wcred = NOCRED;
252		bp->b_qindex = QUEUE_EMPTY;
253		bp->b_xflags = 0;
254		LIST_INIT(&bp->b_dep);
255		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
256		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
257	}
258
259	/*
260	 * maxbufspace is currently calculated to support all filesystem
261	 * blocks to be 8K.  If you happen to use a 16K filesystem, the size
262	 * of the buffer cache is still the same as it would be for 8K
263	 * filesystems.  This keeps the size of the buffer cache "in check"
264	 * for big block filesystems.
265	 *
266	 * maxbufspace is calculated as around 50% of the KVA available in
267	 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the
268	 * effect of fragmentation.
269	 */
270	maxbufspace = (nbuf + 8) * DFLTBSIZE;
271	if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE)
272		hibufspace = 3 * maxbufspace / 4;
273/*
274 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
275 */
276	maxvmiobufspace = 2 * hibufspace / 3;
277/*
278 * Limit the amount of malloc memory since it is wired permanently into
279 * the kernel space.  Even though this is accounted for in the buffer
280 * allocation, we don't want the malloced region to grow uncontrolled.
281 * The malloc scheme improves memory utilization significantly on average
282 * (small) directories.
283 */
284	maxbufmallocspace = hibufspace / 20;
285
286/*
287 * Reduce the chance of a deadlock occuring by limiting the number
288 * of delayed-write dirty buffers we allow to stack up.
289 */
290	lodirtybuffers = nbuf / 16 + 10;
291	hidirtybuffers = nbuf / 8 + 20;
292	numdirtybuffers = 0;
293
294/*
295 * Try to keep the number of free buffers in the specified range,
296 * and give the syncer access to an emergency reserve.
297 */
298	lofreebuffers = nbuf / 18 + 5;
299	hifreebuffers = 2 * lofreebuffers;
300	numfreebuffers = nbuf;
301
302	kvafreespace = 0;
303
304	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
305	bogus_page = vm_page_alloc(kernel_object,
306			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
307			VM_ALLOC_NORMAL);
308
309}
310
311/*
312 * Free the kva allocation for a buffer
313 * Must be called only at splbio or higher,
314 *  as this is the only locking for buffer_map.
315 */
316static void
317bfreekva(struct buf * bp)
318{
319	if (bp->b_kvasize) {
320		vm_map_delete(buffer_map,
321		    (vm_offset_t) bp->b_kvabase,
322		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
323		);
324		bp->b_kvasize = 0;
325		kvaspacewakeup();
326	}
327}
328
329/*
330 *	bremfree:
331 *
332 *	Remove the buffer from the appropriate free list.
333 */
334void
335bremfree(struct buf * bp)
336{
337	int s = splbio();
338	int old_qindex = bp->b_qindex;
339
340	if (bp->b_qindex != QUEUE_NONE) {
341		if (bp->b_qindex == QUEUE_EMPTY) {
342			kvafreespace -= bp->b_kvasize;
343		}
344		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
345		bp->b_qindex = QUEUE_NONE;
346		runningbufspace += bp->b_bufsize;
347	} else {
348#if !defined(MAX_PERF)
349		panic("bremfree: removing a buffer when not on a queue");
350#endif
351	}
352
353	/*
354	 * Fixup numfreebuffers count.  If the buffer is invalid or not
355	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
356	 * the buffer was free and we must decrement numfreebuffers.
357	 */
358	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
359		switch(old_qindex) {
360		case QUEUE_EMPTY:
361		case QUEUE_LRU:
362		case QUEUE_AGE:
363			--numfreebuffers;
364			break;
365		default:
366			break;
367		}
368	}
369	splx(s);
370}
371
372
373/*
374 * Get a buffer with the specified data.  Look in the cache first.
375 */
376int
377bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
378    struct buf ** bpp)
379{
380	struct buf *bp;
381
382	bp = getblk(vp, blkno, size, 0, 0);
383	*bpp = bp;
384
385	/* if not found in cache, do some I/O */
386	if ((bp->b_flags & B_CACHE) == 0) {
387		if (curproc != NULL)
388			curproc->p_stats->p_ru.ru_inblock++;
389		KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
390		bp->b_flags |= B_READ;
391		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
392		if (bp->b_rcred == NOCRED) {
393			if (cred != NOCRED)
394				crhold(cred);
395			bp->b_rcred = cred;
396		}
397		vfs_busy_pages(bp, 0);
398		VOP_STRATEGY(vp, bp);
399		return (biowait(bp));
400	}
401	return (0);
402}
403
404/*
405 * Operates like bread, but also starts asynchronous I/O on
406 * read-ahead blocks.
407 */
408int
409breadn(struct vnode * vp, daddr_t blkno, int size,
410    daddr_t * rablkno, int *rabsize,
411    int cnt, struct ucred * cred, struct buf ** bpp)
412{
413	struct buf *bp, *rabp;
414	int i;
415	int rv = 0, readwait = 0;
416
417	*bpp = bp = getblk(vp, blkno, size, 0, 0);
418
419	/* if not found in cache, do some I/O */
420	if ((bp->b_flags & B_CACHE) == 0) {
421		if (curproc != NULL)
422			curproc->p_stats->p_ru.ru_inblock++;
423		bp->b_flags |= B_READ;
424		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
425		if (bp->b_rcred == NOCRED) {
426			if (cred != NOCRED)
427				crhold(cred);
428			bp->b_rcred = cred;
429		}
430		vfs_busy_pages(bp, 0);
431		VOP_STRATEGY(vp, bp);
432		++readwait;
433	}
434
435	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
436		if (inmem(vp, *rablkno))
437			continue;
438		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
439
440		if ((rabp->b_flags & B_CACHE) == 0) {
441			if (curproc != NULL)
442				curproc->p_stats->p_ru.ru_inblock++;
443			rabp->b_flags |= B_READ | B_ASYNC;
444			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
445			if (rabp->b_rcred == NOCRED) {
446				if (cred != NOCRED)
447					crhold(cred);
448				rabp->b_rcred = cred;
449			}
450			vfs_busy_pages(rabp, 0);
451			VOP_STRATEGY(vp, rabp);
452		} else {
453			brelse(rabp);
454		}
455	}
456
457	if (readwait) {
458		rv = biowait(bp);
459	}
460	return (rv);
461}
462
463/*
464 * Write, release buffer on completion.  (Done by iodone
465 * if async.)
466 */
467int
468bwrite(struct buf * bp)
469{
470	int oldflags, s;
471	struct vnode *vp;
472	struct mount *mp;
473
474	if (bp->b_flags & B_INVAL) {
475		brelse(bp);
476		return (0);
477	}
478
479	oldflags = bp->b_flags;
480
481#if !defined(MAX_PERF)
482	if ((bp->b_flags & B_BUSY) == 0)
483		panic("bwrite: buffer is not busy???");
484#endif
485	s = splbio();
486	bundirty(bp);
487
488	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
489	bp->b_flags |= B_WRITEINPROG;
490
491	bp->b_vp->v_numoutput++;
492	vfs_busy_pages(bp, 1);
493	if (curproc != NULL)
494		curproc->p_stats->p_ru.ru_oublock++;
495	splx(s);
496	VOP_STRATEGY(bp->b_vp, bp);
497
498	/*
499	 * Collect statistics on synchronous and asynchronous writes.
500	 * Writes to block devices are charged to their associated
501	 * filesystem (if any).
502	 */
503	if ((vp = bp->b_vp) != NULL) {
504		if (vp->v_type == VBLK)
505			mp = vp->v_specmountpoint;
506		else
507			mp = vp->v_mount;
508		if (mp != NULL)
509			if ((oldflags & B_ASYNC) == 0)
510				mp->mnt_stat.f_syncwrites++;
511			else
512				mp->mnt_stat.f_asyncwrites++;
513	}
514
515	if ((oldflags & B_ASYNC) == 0) {
516		int rtval = biowait(bp);
517		brelse(bp);
518		return (rtval);
519	}
520
521	return (0);
522}
523
524/*
525 * Delayed write. (Buffer is marked dirty).
526 */
527void
528bdwrite(struct buf * bp)
529{
530	struct vnode *vp;
531
532#if !defined(MAX_PERF)
533	if ((bp->b_flags & B_BUSY) == 0) {
534		panic("bdwrite: buffer is not busy");
535	}
536#endif
537
538	if (bp->b_flags & B_INVAL) {
539		brelse(bp);
540		return;
541	}
542	bdirty(bp);
543
544	/*
545	 * This bmap keeps the system from needing to do the bmap later,
546	 * perhaps when the system is attempting to do a sync.  Since it
547	 * is likely that the indirect block -- or whatever other datastructure
548	 * that the filesystem needs is still in memory now, it is a good
549	 * thing to do this.  Note also, that if the pageout daemon is
550	 * requesting a sync -- there might not be enough memory to do
551	 * the bmap then...  So, this is important to do.
552	 */
553	if (bp->b_lblkno == bp->b_blkno) {
554		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
555	}
556
557	/*
558	 * Set the *dirty* buffer range based upon the VM system dirty pages.
559	 */
560	vfs_setdirty(bp);
561
562	/*
563	 * We need to do this here to satisfy the vnode_pager and the
564	 * pageout daemon, so that it thinks that the pages have been
565	 * "cleaned".  Note that since the pages are in a delayed write
566	 * buffer -- the VFS layer "will" see that the pages get written
567	 * out on the next sync, or perhaps the cluster will be completed.
568	 */
569	vfs_clean_pages(bp);
570	bqrelse(bp);
571
572	/*
573	 * XXX The soft dependency code is not prepared to
574	 * have I/O done when a bdwrite is requested. For
575	 * now we just let the write be delayed if it is
576	 * requested by the soft dependency code.
577	 */
578	if ((vp = bp->b_vp) &&
579	    ((vp->v_type == VBLK && vp->v_specmountpoint &&
580		  (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) ||
581		 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))))
582		return;
583
584	if (numdirtybuffers >= hidirtybuffers)
585		flushdirtybuffers(0, 0);
586}
587
588/*
589 *	bdirty:
590 *
591 *	Turn buffer into delayed write request.  We must clear B_READ and
592 *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
593 *	itself to properly update it in the dirty/clean lists.  We mark it
594 *	B_DONE to ensure that any asynchronization of the buffer properly
595 *	clears B_DONE ( else a panic will occur later ).  Note that B_INVALID
596 *	buffers are not considered dirty even if B_DELWRI is set.
597 *
598 *	Since the buffer is not on a queue, we do not update the numfreebuffers
599 *	count.
600 *
601 *	Must be called at splbio().
602 *	The buffer must be on QUEUE_NONE.
603 */
604void
605bdirty(bp)
606	struct buf *bp;
607{
608	KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
609	bp->b_flags &= ~(B_READ|B_RELBUF);
610
611	if ((bp->b_flags & B_DELWRI) == 0) {
612		bp->b_flags |= B_DONE | B_DELWRI;
613		reassignbuf(bp, bp->b_vp);
614		++numdirtybuffers;
615	}
616}
617
618/*
619 *	bundirty:
620 *
621 *	Clear B_DELWRI for buffer.
622 *
623 *	Since the buffer is not on a queue, we do not update the numfreebuffers
624 *	count.
625 *
626 *	Must be called at splbio().
627 *	The buffer must be on QUEUE_NONE.
628 */
629
630void
631bundirty(bp)
632	struct buf *bp;
633{
634	KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
635
636	if (bp->b_flags & B_DELWRI) {
637		bp->b_flags &= ~B_DELWRI;
638		reassignbuf(bp, bp->b_vp);
639		--numdirtybuffers;
640	}
641}
642
643/*
644 *	bawrite:
645 *
646 *	Asynchronous write.  Start output on a buffer, but do not wait for
647 *	it to complete.  The buffer is released when the output completes.
648 */
649void
650bawrite(struct buf * bp)
651{
652	bp->b_flags |= B_ASYNC;
653	(void) VOP_BWRITE(bp);
654}
655
656/*
657 *	bowrite:
658 *
659 *	Ordered write.  Start output on a buffer, and flag it so that the
660 *	device will write it in the order it was queued.  The buffer is
661 *	released when the output completes.
662 */
663int
664bowrite(struct buf * bp)
665{
666	bp->b_flags |= B_ORDERED | B_ASYNC;
667	return (VOP_BWRITE(bp));
668}
669
670/*
671 *	brelse:
672 *
673 *	Release a busy buffer and, if requested, free its resources.  The
674 *	buffer will be stashed in the appropriate bufqueue[] allowing it
675 *	to be accessed later as a cache entity or reused for other purposes.
676 */
677void
678brelse(struct buf * bp)
679{
680	int s;
681
682	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
683
684#if 0
685	if (bp->b_flags & B_CLUSTER) {
686		relpbuf(bp, NULL);
687		return;
688	}
689#endif
690
691	s = splbio();
692
693	if (bp->b_flags & B_LOCKED)
694		bp->b_flags &= ~B_ERROR;
695
696	if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) {
697		bp->b_flags &= ~B_ERROR;
698		bdirty(bp);
699	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
700	    (bp->b_bufsize <= 0)) {
701		bp->b_flags |= B_INVAL;
702		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
703			(*bioops.io_deallocate)(bp);
704		if (bp->b_flags & B_DELWRI)
705			--numdirtybuffers;
706		bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
707		if ((bp->b_flags & B_VMIO) == 0) {
708			if (bp->b_bufsize)
709				allocbuf(bp, 0);
710			if (bp->b_vp)
711				brelvp(bp);
712		}
713	}
714
715	/*
716	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
717	 * is called with B_DELWRI set, the underlying pages may wind up
718	 * getting freed causing a previous write (bdwrite()) to get 'lost'
719	 * because pages associated with a B_DELWRI bp are marked clean.
720	 *
721	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
722	 * if B_DELWRI is set.
723	 */
724
725	if (bp->b_flags & B_DELWRI)
726		bp->b_flags &= ~B_RELBUF;
727
728	/*
729	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
730	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
731	 * but the VM object is kept around.  The B_NOCACHE flag is used to
732	 * invalidate the pages in the VM object.
733	 *
734	 * The b_{validoff,validend,dirtyoff,dirtyend} values are relative
735	 * to b_offset and currently have byte granularity, whereas the
736	 * valid flags in the vm_pages have only DEV_BSIZE resolution.
737	 * The byte resolution fields are used to avoid unnecessary re-reads
738	 * of the buffer but the code really needs to be genericized so
739	 * other filesystem modules can take advantage of these fields.
740	 *
741	 * XXX this seems to cause performance problems.
742	 */
743	if ((bp->b_flags & B_VMIO)
744	    && !(bp->b_vp->v_tag == VT_NFS &&
745		 bp->b_vp->v_type != VBLK &&
746		 (bp->b_flags & B_DELWRI) != 0)
747#ifdef notdef
748	    && (bp->b_vp->v_tag != VT_NFS
749		|| bp->b_vp->v_type == VBLK
750		|| (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR))
751		|| bp->b_validend == 0
752		|| (bp->b_validoff == 0
753		    && bp->b_validend == bp->b_bufsize))
754#endif
755	    ) {
756
757		int i, j, resid;
758		vm_page_t m;
759		off_t foff;
760		vm_pindex_t poff;
761		vm_object_t obj;
762		struct vnode *vp;
763
764		vp = bp->b_vp;
765
766		/*
767		 * Get the base offset and length of the buffer.  Note that
768		 * for block sizes that are less then PAGE_SIZE, the b_data
769		 * base of the buffer does not represent exactly b_offset and
770		 * neither b_offset nor b_size are necessarily page aligned.
771		 * Instead, the starting position of b_offset is:
772		 *
773		 * 	b_data + (b_offset & PAGE_MASK)
774		 *
775		 * block sizes less then DEV_BSIZE (usually 512) are not
776		 * supported due to the page granularity bits (m->valid,
777		 * m->dirty, etc...).
778		 *
779		 * See man buf(9) for more information
780		 */
781
782		resid = bp->b_bufsize;
783		foff = bp->b_offset;
784
785		for (i = 0; i < bp->b_npages; i++) {
786			m = bp->b_pages[i];
787			vm_page_flag_clear(m, PG_ZERO);
788			if (m == bogus_page) {
789
790				obj = (vm_object_t) vp->v_object;
791				poff = OFF_TO_IDX(bp->b_offset);
792
793				for (j = i; j < bp->b_npages; j++) {
794					m = bp->b_pages[j];
795					if (m == bogus_page) {
796						m = vm_page_lookup(obj, poff + j);
797#if !defined(MAX_PERF)
798						if (!m) {
799							panic("brelse: page missing\n");
800						}
801#endif
802						bp->b_pages[j] = m;
803					}
804				}
805
806				if ((bp->b_flags & B_INVAL) == 0) {
807					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
808				}
809			}
810			if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
811				int poffset = foff & PAGE_MASK;
812				int presid = resid > (PAGE_SIZE - poffset) ?
813					(PAGE_SIZE - poffset) : resid;
814
815				KASSERT(presid >= 0, ("brelse: extra page"));
816				vm_page_set_invalid(m, poffset, presid);
817			}
818			resid -= PAGE_SIZE - (foff & PAGE_MASK);
819			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
820		}
821
822		if (bp->b_flags & (B_INVAL | B_RELBUF))
823			vfs_vmio_release(bp);
824
825	} else if (bp->b_flags & B_VMIO) {
826
827		if (bp->b_flags & (B_INVAL | B_RELBUF))
828			vfs_vmio_release(bp);
829
830	}
831
832#if !defined(MAX_PERF)
833	if (bp->b_qindex != QUEUE_NONE)
834		panic("brelse: free buffer onto another queue???");
835#endif
836	/* enqueue */
837
838	/* buffers with no memory */
839	if (bp->b_bufsize == 0) {
840		bp->b_flags |= B_INVAL;
841		bp->b_qindex = QUEUE_EMPTY;
842		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
843		LIST_REMOVE(bp, b_hash);
844		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
845		bp->b_dev = NODEV;
846		kvafreespace += bp->b_kvasize;
847		if (bp->b_kvasize)
848			kvaspacewakeup();
849	/* buffers with junk contents */
850	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
851		bp->b_flags |= B_INVAL;
852		bp->b_qindex = QUEUE_AGE;
853		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
854		LIST_REMOVE(bp, b_hash);
855		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
856		bp->b_dev = NODEV;
857
858	/* buffers that are locked */
859	} else if (bp->b_flags & B_LOCKED) {
860		bp->b_qindex = QUEUE_LOCKED;
861		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
862
863	/* buffers with stale but valid contents */
864	} else if (bp->b_flags & B_AGE) {
865		bp->b_qindex = QUEUE_AGE;
866		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
867
868	/* buffers with valid and quite potentially reuseable contents */
869	} else {
870		bp->b_qindex = QUEUE_LRU;
871		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
872	}
873
874	/*
875	 * If B_INVAL, clear B_DELWRI.
876	 */
877	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) {
878		bp->b_flags &= ~B_DELWRI;
879		--numdirtybuffers;
880	}
881
882	runningbufspace -= bp->b_bufsize;
883
884	/*
885	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
886	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
887	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
888	 * if B_INVAL is set ).
889	 */
890
891	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
892		bufcountwakeup();
893
894	/*
895	 * Something we can maybe free.
896	 */
897
898	if (bp->b_bufsize)
899		bufspacewakeup();
900
901	if (bp->b_flags & B_WANTED) {
902		bp->b_flags &= ~(B_WANTED | B_AGE);
903		wakeup(bp);
904	}
905
906	/* unlock */
907	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
908		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
909	splx(s);
910}
911
912/*
913 * Release a buffer back to the appropriate queue but do not try to free
914 * it.
915 */
916void
917bqrelse(struct buf * bp)
918{
919	int s;
920
921	s = splbio();
922
923	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
924
925#if !defined(MAX_PERF)
926	if (bp->b_qindex != QUEUE_NONE)
927		panic("bqrelse: free buffer onto another queue???");
928#endif
929	if (bp->b_flags & B_LOCKED) {
930		bp->b_flags &= ~B_ERROR;
931		bp->b_qindex = QUEUE_LOCKED;
932		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
933		/* buffers with stale but valid contents */
934	} else {
935		bp->b_qindex = QUEUE_LRU;
936		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
937	}
938
939	runningbufspace -= bp->b_bufsize;
940
941	if ((bp->b_flags & B_LOCKED) == 0 &&
942	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
943	) {
944		bufcountwakeup();
945	}
946
947	/*
948	 * Something we can maybe wakeup
949	 */
950	if (bp->b_bufsize)
951		bufspacewakeup();
952
953	/* anyone need this block? */
954	if (bp->b_flags & B_WANTED) {
955		bp->b_flags &= ~(B_WANTED | B_AGE);
956		wakeup(bp);
957	}
958
959	/* unlock */
960	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
961		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
962	splx(s);
963}
964
965static void
966vfs_vmio_release(bp)
967	struct buf *bp;
968{
969	int i, s;
970	vm_page_t m;
971
972	s = splvm();
973	for (i = 0; i < bp->b_npages; i++) {
974		m = bp->b_pages[i];
975		bp->b_pages[i] = NULL;
976		/*
977		 * In order to keep page LRU ordering consistent, put
978		 * everything on the inactive queue.
979		 */
980		vm_page_unwire(m, 0);
981		/*
982		 * We don't mess with busy pages, it is
983		 * the responsibility of the process that
984		 * busied the pages to deal with them.
985		 */
986		if ((m->flags & PG_BUSY) || (m->busy != 0))
987			continue;
988
989		if (m->wire_count == 0) {
990			vm_page_flag_clear(m, PG_ZERO);
991			/*
992			 * Might as well free the page if we can and it has
993			 * no valid data.
994			 */
995			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) {
996				vm_page_busy(m);
997				vm_page_protect(m, VM_PROT_NONE);
998				vm_page_free(m);
999			}
1000		}
1001	}
1002	bufspace -= bp->b_bufsize;
1003	vmiospace -= bp->b_bufsize;
1004	runningbufspace -= bp->b_bufsize;
1005	splx(s);
1006	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1007	if (bp->b_bufsize)
1008		bufspacewakeup();
1009	bp->b_npages = 0;
1010	bp->b_bufsize = 0;
1011	bp->b_flags &= ~B_VMIO;
1012	if (bp->b_vp)
1013		brelvp(bp);
1014}
1015
1016/*
1017 * Check to see if a block is currently memory resident.
1018 */
1019struct buf *
1020gbincore(struct vnode * vp, daddr_t blkno)
1021{
1022	struct buf *bp;
1023	struct bufhashhdr *bh;
1024
1025	bh = BUFHASH(vp, blkno);
1026	bp = bh->lh_first;
1027
1028	/* Search hash chain */
1029	while (bp != NULL) {
1030		/* hit */
1031		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1032		    (bp->b_flags & B_INVAL) == 0) {
1033			break;
1034		}
1035		bp = bp->b_hash.le_next;
1036	}
1037	return (bp);
1038}
1039
1040/*
1041 * this routine implements clustered async writes for
1042 * clearing out B_DELWRI buffers...  This is much better
1043 * than the old way of writing only one buffer at a time.
1044 */
1045int
1046vfs_bio_awrite(struct buf * bp)
1047{
1048	int i;
1049	daddr_t lblkno = bp->b_lblkno;
1050	struct vnode *vp = bp->b_vp;
1051	int s;
1052	int ncl;
1053	struct buf *bpa;
1054	int nwritten;
1055	int size;
1056	int maxcl;
1057
1058	s = splbio();
1059	/*
1060	 * right now we support clustered writing only to regular files, and
1061	 * then only if our I/O system is not saturated.
1062	 */
1063	if ((vp->v_type == VREG) &&
1064	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1065	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1066
1067		size = vp->v_mount->mnt_stat.f_iosize;
1068		maxcl = MAXPHYS / size;
1069
1070		for (i = 1; i < maxcl; i++) {
1071			if ((bpa = gbincore(vp, lblkno + i)) &&
1072			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1073			    (B_DELWRI | B_CLUSTEROK)) &&
1074			    (bpa->b_bufsize == size)) {
1075				if ((bpa->b_blkno == bpa->b_lblkno) ||
1076				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1077					break;
1078			} else {
1079				break;
1080			}
1081		}
1082		ncl = i;
1083		/*
1084		 * this is a possible cluster write
1085		 */
1086		if (ncl != 1) {
1087			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
1088			splx(s);
1089			return nwritten;
1090		}
1091	}
1092
1093	bremfree(bp);
1094	bp->b_flags |= B_BUSY | B_ASYNC;
1095
1096	splx(s);
1097	/*
1098	 * default (old) behavior, writing out only one block
1099	 */
1100	nwritten = bp->b_bufsize;
1101	(void) VOP_BWRITE(bp);
1102
1103	return nwritten;
1104}
1105
1106/*
1107 *	getnewbuf:
1108 *
1109 *	Find and initialize a new buffer header, freeing up existing buffers
1110 *	in the bufqueues as necessary.
1111 *
1112 *	We block if:
1113 *		We have insufficient buffer headers
1114 *		We have insufficient buffer space
1115 *		buffer_map is too fragmented ( space reservation fails )
1116 *
1117 *	We do *not* attempt to flush dirty buffers more then one level deep.
1118 *	I.e., if P_FLSINPROG is set we do not flush dirty buffers at all.
1119 *
1120 *	If P_FLSINPROG is set, we are allowed to dip into our emergency
1121 *	reserve.
1122 */
1123static struct buf *
1124getnewbuf(struct vnode *vp, daddr_t blkno,
1125	int slpflag, int slptimeo, int size, int maxsize)
1126{
1127	struct buf *bp;
1128	struct buf *nbp;
1129	struct buf *dbp;
1130	int outofspace;
1131	int nqindex;
1132	int defrag = 0;
1133
1134restart:
1135	/*
1136	 * Calculate whether we are out of buffer space.  This state is
1137	 * recalculated on every restart.  If we are out of space, we
1138	 * have to turn off defragmentation.  The outofspace code will
1139	 * defragment too, but the looping conditionals will be messed up
1140	 * if both outofspace and defrag are on.
1141	 */
1142
1143	dbp = NULL;
1144	outofspace = 0;
1145	if (bufspace >= hibufspace) {
1146		if ((curproc->p_flag & P_FLSINPROG) == 0 ||
1147		    bufspace >= maxbufspace
1148		) {
1149			outofspace = 1;
1150			defrag = 0;
1151		}
1152	}
1153
1154	/*
1155	 * defrag state is semi-persistant.  1 means we are flagged for
1156	 * defragging.  -1 means we actually defragged something.
1157	 */
1158	/* nop */
1159
1160	/*
1161	 * Setup for scan.  If we do not have enough free buffers,
1162	 * we setup a degenerate case that falls through the while.
1163	 *
1164	 * If we are in the middle of a flush, we can dip into the
1165	 * emergency reserve.
1166	 *
1167	 * If we are out of space, we skip trying to scan QUEUE_EMPTY
1168	 * because those buffers are, well, empty.
1169	 */
1170
1171	if ((curproc->p_flag & P_FLSINPROG) == 0 &&
1172	    numfreebuffers < lofreebuffers
1173	) {
1174		nqindex = QUEUE_LRU;
1175		nbp = NULL;
1176	} else {
1177		nqindex = QUEUE_EMPTY;
1178		if (outofspace ||
1179		    (nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL
1180		) {
1181			nqindex = QUEUE_AGE;
1182			nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1183			if (nbp == NULL) {
1184				nqindex = QUEUE_LRU;
1185				nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1186			}
1187		}
1188	}
1189
1190	/*
1191	 * Run scan, possibly freeing data and/or kva mappings on the fly
1192	 * depending.
1193	 */
1194
1195	while ((bp = nbp) != NULL) {
1196		int qindex = nqindex;
1197		/*
1198		 * Calculate next bp ( we can only use it if we do not block
1199		 * or do other fancy things ).
1200		 */
1201		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1202			switch(qindex) {
1203			case QUEUE_EMPTY:
1204				nqindex = QUEUE_AGE;
1205				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE])))
1206					break;
1207				/* fall through */
1208			case QUEUE_AGE:
1209				nqindex = QUEUE_LRU;
1210				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])))
1211					break;
1212				/* fall through */
1213			case QUEUE_LRU:
1214				/*
1215				 * nbp is NULL.
1216				 */
1217				break;
1218			}
1219		}
1220
1221		/*
1222		 * Sanity Checks
1223		 */
1224		KASSERT(!(bp->b_flags & B_BUSY), ("getnewbuf: busy buffer %p on free list", bp));
1225		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1226
1227		/*
1228		 * Here we try to move NON VMIO buffers to the end of the
1229		 * LRU queue in order to make VMIO buffers more readily
1230		 * freeable.  We also try to move buffers with a positive
1231		 * usecount to the end.
1232		 *
1233		 * Note that by moving the bp to the end, we setup a following
1234		 * loop.  Since we continue to decrement b_usecount this
1235		 * is ok and, in fact, desireable.
1236		 *
1237		 * If we are at the end of the list, we move ourself to the
1238		 * same place and need to fixup nbp and nqindex to handle
1239		 * the following case.
1240		 */
1241
1242		if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) {
1243			if ((bp->b_flags & B_VMIO) == 0 ||
1244			    (vmiospace < maxvmiobufspace)
1245			) {
1246				--bp->b_usecount;
1247				TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1248				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1249				if (nbp == NULL) {
1250					nqindex = qindex;
1251					nbp = bp;
1252				}
1253				continue;
1254			}
1255		}
1256
1257		/*
1258		 * If we come across a delayed write and numdirtybuffers should
1259		 * be flushed, try to write it out.  Only if P_FLSINPROG is
1260		 * not set.  We can't afford to recursively stack more then
1261		 * one deep due to the possibility of having deep VFS call
1262		 * stacks.
1263		 *
1264		 * Limit the number of dirty buffers we are willing to try
1265		 * to recover since it really isn't our job here.
1266		 */
1267		if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1268			/*
1269			 * This is rather complex, but necessary.  If we come
1270			 * across a B_DELWRI buffer we have to flush it in
1271			 * order to use it.  We only do this if we absolutely
1272			 * need to.  We must also protect against too much
1273			 * recursion which might run us out of stack due to
1274			 * deep VFS call stacks.
1275			 *
1276			 * In heavy-writing situations, QUEUE_LRU can contain
1277			 * a large number of DELWRI buffers at its head.  These
1278			 * buffers must be moved to the tail if they cannot be
1279			 * written async in order to reduce the scanning time
1280			 * required to skip past these buffers in later
1281			 * getnewbuf() calls.
1282			 */
1283			if ((curproc->p_flag & P_FLSINPROG) ||
1284			    numdirtybuffers < hidirtybuffers
1285			) {
1286				if (qindex == QUEUE_LRU) {
1287					/*
1288					 * dbp prevents us from looping forever
1289					 * if all bps in QUEUE_LRU are dirty.
1290					 */
1291					if (bp == dbp) {
1292						bp = NULL;
1293						break;
1294					}
1295					if (dbp == NULL)
1296						dbp = TAILQ_LAST(&bufqueues[QUEUE_LRU], bqueues);
1297					TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1298					TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1299				}
1300				continue;
1301			}
1302			curproc->p_flag |= P_FLSINPROG;
1303			vfs_bio_awrite(bp);
1304			curproc->p_flag &= ~P_FLSINPROG;
1305			goto restart;
1306		}
1307
1308		if (defrag > 0 && bp->b_kvasize == 0)
1309			continue;
1310		if (outofspace > 0 && bp->b_bufsize == 0)
1311			continue;
1312
1313		/*
1314		 * Start freeing the bp.  This is somewhat involved.  nbp
1315		 * remains valid only for QUEUE_EMPTY bp's.
1316		 */
1317
1318		bremfree(bp);
1319		bp->b_flags |= B_BUSY;
1320
1321		if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) {
1322			if (bp->b_flags & B_VMIO) {
1323				bp->b_flags &= ~B_ASYNC;
1324				vfs_vmio_release(bp);
1325			}
1326			if (bp->b_vp)
1327				brelvp(bp);
1328		}
1329
1330		if (bp->b_flags & B_WANTED) {
1331			bp->b_flags &= ~B_WANTED;
1332			wakeup(bp);
1333		}
1334
1335		/*
1336		 * NOTE:  nbp is now entirely invalid.  We can only restart
1337		 * the scan from this point on.
1338		 *
1339		 * Get the rest of the buffer freed up.  b_kva* is still
1340		 * valid after this operation.
1341		 */
1342
1343		if (bp->b_rcred != NOCRED) {
1344			crfree(bp->b_rcred);
1345			bp->b_rcred = NOCRED;
1346		}
1347		if (bp->b_wcred != NOCRED) {
1348			crfree(bp->b_wcred);
1349			bp->b_wcred = NOCRED;
1350		}
1351		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1352			(*bioops.io_deallocate)(bp);
1353
1354		LIST_REMOVE(bp, b_hash);
1355		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1356
1357		if (bp->b_bufsize)
1358			allocbuf(bp, 0);
1359
1360		bp->b_flags = B_BUSY;
1361		bp->b_dev = NODEV;
1362		bp->b_vp = NULL;
1363		bp->b_blkno = bp->b_lblkno = 0;
1364		bp->b_offset = NOOFFSET;
1365		bp->b_iodone = 0;
1366		bp->b_error = 0;
1367		bp->b_resid = 0;
1368		bp->b_bcount = 0;
1369		bp->b_npages = 0;
1370		bp->b_dirtyoff = bp->b_dirtyend = 0;
1371		bp->b_validoff = bp->b_validend = 0;
1372		bp->b_usecount = 5;
1373
1374		LIST_INIT(&bp->b_dep);
1375
1376		/*
1377		 * Ok, now that we have a free buffer, if we are defragging
1378		 * we have to recover the kvaspace.
1379		 */
1380
1381		if (defrag > 0) {
1382			defrag = -1;
1383			bp->b_flags |= B_INVAL;
1384			bfreekva(bp);
1385			brelse(bp);
1386			goto restart;
1387		}
1388
1389		if (outofspace > 0) {
1390			outofspace = -1;
1391			bp->b_flags |= B_INVAL;
1392			bfreekva(bp);
1393			brelse(bp);
1394			goto restart;
1395		}
1396
1397		/*
1398		 * We are done
1399		 */
1400		break;
1401	}
1402
1403	/*
1404	 * If we exhausted our list, sleep as appropriate.
1405	 */
1406
1407	if (bp == NULL) {
1408		int flags;
1409
1410dosleep:
1411		if (defrag > 0)
1412			flags = VFS_BIO_NEED_KVASPACE;
1413		else if (outofspace > 0)
1414			flags = VFS_BIO_NEED_BUFSPACE;
1415		else
1416			flags = VFS_BIO_NEED_ANY;
1417
1418		if (rushjob < syncdelay / 2)
1419			++rushjob;
1420		needsbuffer |= flags;
1421		while (needsbuffer & flags) {
1422			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1423			    "newbuf", slptimeo))
1424				return (NULL);
1425		}
1426	} else {
1427		/*
1428		 * We finally have a valid bp.  We aren't quite out of the
1429		 * woods, we still have to reserve kva space.
1430		 */
1431		vm_offset_t addr = 0;
1432
1433		maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1434
1435		if (maxsize != bp->b_kvasize) {
1436			bfreekva(bp);
1437
1438			if (vm_map_findspace(buffer_map,
1439				vm_map_min(buffer_map), maxsize, &addr)
1440			) {
1441				/*
1442				 * Uh oh.  Buffer map is to fragmented.  Try
1443				 * to defragment.
1444				 */
1445				if (defrag <= 0) {
1446					defrag = 1;
1447					bp->b_flags |= B_INVAL;
1448					brelse(bp);
1449					goto restart;
1450				}
1451				/*
1452				 * Uh oh.  We couldn't seem to defragment
1453				 */
1454				bp = NULL;
1455				goto dosleep;
1456			}
1457		}
1458		if (addr) {
1459			vm_map_insert(buffer_map, NULL, 0,
1460				addr, addr + maxsize,
1461				VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1462
1463			bp->b_kvabase = (caddr_t) addr;
1464			bp->b_kvasize = maxsize;
1465		}
1466		bp->b_data = bp->b_kvabase;
1467	}
1468
1469	return (bp);
1470}
1471
1472/*
1473 *	waitfreebuffers:
1474 *
1475 *	Wait for sufficient free buffers.  This routine is not called if
1476 *	curproc is the update process so we do not have to do anything
1477 *	fancy.
1478 */
1479
1480static void
1481waitfreebuffers(int slpflag, int slptimeo)
1482{
1483	while (numfreebuffers < hifreebuffers) {
1484		flushdirtybuffers(slpflag, slptimeo);
1485		if (numfreebuffers < hifreebuffers)
1486			break;
1487		needsbuffer |= VFS_BIO_NEED_FREE;
1488		if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
1489			break;
1490	}
1491}
1492
1493/*
1494 *	flushdirtybuffers:
1495 *
1496 *	This routine is called when we get too many dirty buffers.
1497 *
1498 *	We have to protect ourselves from recursion, but we also do not want
1499 *	other process's flushdirtybuffers() to interfere with the syncer if
1500 *	it decides to flushdirtybuffers().
1501 *
1502 *	In order to maximize operations, we allow any process to flush
1503 *	dirty buffers and use P_FLSINPROG to prevent recursion.
1504 */
1505
1506static void
1507flushdirtybuffers(int slpflag, int slptimeo)
1508{
1509	int s;
1510
1511	s = splbio();
1512
1513	if (curproc->p_flag & P_FLSINPROG) {
1514		splx(s);
1515		return;
1516	}
1517	curproc->p_flag |= P_FLSINPROG;
1518
1519	while (numdirtybuffers > lodirtybuffers) {
1520		if (flushbufqueues() == 0)
1521			break;
1522	}
1523
1524	curproc->p_flag &= ~P_FLSINPROG;
1525
1526	splx(s);
1527}
1528
1529static int
1530flushbufqueues(void)
1531{
1532	struct buf *bp;
1533	int qindex;
1534	int r = 0;
1535
1536	qindex = QUEUE_AGE;
1537	bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1538
1539	for (;;) {
1540		if (bp == NULL) {
1541			if (qindex == QUEUE_LRU)
1542				break;
1543			qindex = QUEUE_LRU;
1544			if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL)
1545				break;
1546		}
1547
1548		/*
1549		 * XXX NFS does weird things with B_INVAL bps if we bwrite
1550		 * them ( vfs_bio_awrite/bawrite/bdwrite/etc )  Why?
1551		 *
1552		 */
1553		if ((bp->b_flags & B_DELWRI) != 0) {
1554			if (bp->b_flags & B_INVAL) {
1555				bremfree(bp);
1556				bp->b_flags |= B_BUSY;
1557				brelse(bp);
1558			} else {
1559				vfs_bio_awrite(bp);
1560			}
1561			++r;
1562			break;
1563		}
1564		bp = TAILQ_NEXT(bp, b_freelist);
1565	}
1566	return(r);
1567}
1568
1569/*
1570 * Check to see if a block is currently memory resident.
1571 */
1572struct buf *
1573incore(struct vnode * vp, daddr_t blkno)
1574{
1575	struct buf *bp;
1576
1577	int s = splbio();
1578	bp = gbincore(vp, blkno);
1579	splx(s);
1580	return (bp);
1581}
1582
1583/*
1584 * Returns true if no I/O is needed to access the
1585 * associated VM object.  This is like incore except
1586 * it also hunts around in the VM system for the data.
1587 */
1588
1589int
1590inmem(struct vnode * vp, daddr_t blkno)
1591{
1592	vm_object_t obj;
1593	vm_offset_t toff, tinc, size;
1594	vm_page_t m;
1595	vm_ooffset_t off;
1596
1597	if (incore(vp, blkno))
1598		return 1;
1599	if (vp->v_mount == NULL)
1600		return 0;
1601	if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
1602		return 0;
1603
1604	obj = vp->v_object;
1605	size = PAGE_SIZE;
1606	if (size > vp->v_mount->mnt_stat.f_iosize)
1607		size = vp->v_mount->mnt_stat.f_iosize;
1608	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
1609
1610	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1611		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1612		if (!m)
1613			return 0;
1614		tinc = size;
1615		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
1616			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
1617		if (vm_page_is_valid(m,
1618		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1619			return 0;
1620	}
1621	return 1;
1622}
1623
1624/*
1625 * now we set the dirty range for the buffer --
1626 * for NFS -- if the file is mapped and pages have
1627 * been written to, let it know.  We want the
1628 * entire range of the buffer to be marked dirty if
1629 * any of the pages have been written to for consistancy
1630 * with the b_validoff, b_validend set in the nfs write
1631 * code, and used by the nfs read code.
1632 */
1633static void
1634vfs_setdirty(struct buf *bp)
1635{
1636	int i;
1637	vm_object_t object;
1638	vm_offset_t boffset;
1639
1640	/*
1641	 * We qualify the scan for modified pages on whether the
1642	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1643	 * is not cleared simply by protecting pages off.
1644	 */
1645
1646	if ((bp->b_flags & B_VMIO) == 0)
1647		return;
1648
1649	object = bp->b_pages[0]->object;
1650
1651	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1652		printf("Warning: object %p writeable but not mightbedirty\n", object);
1653	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1654		printf("Warning: object %p mightbedirty but not writeable\n", object);
1655
1656	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1657		/*
1658		 * test the pages to see if they have been modified directly
1659		 * by users through the VM system.
1660		 */
1661		for (i = 0; i < bp->b_npages; i++) {
1662			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1663			vm_page_test_dirty(bp->b_pages[i]);
1664		}
1665
1666		/*
1667		 * scan forwards for the first page modified
1668		 */
1669		for (i = 0; i < bp->b_npages; i++) {
1670			if (bp->b_pages[i]->dirty) {
1671				break;
1672			}
1673		}
1674
1675		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1676		if (boffset < bp->b_dirtyoff) {
1677			bp->b_dirtyoff = max(boffset, 0);
1678		}
1679
1680		/*
1681		 * scan backwards for the last page modified
1682		 */
1683		for (i = bp->b_npages - 1; i >= 0; --i) {
1684			if (bp->b_pages[i]->dirty) {
1685				break;
1686			}
1687		}
1688		boffset = (i + 1);
1689#if 0
1690		offset = boffset + bp->b_pages[0]->pindex;
1691		if (offset >= object->size)
1692			boffset = object->size - bp->b_pages[0]->pindex;
1693#endif
1694		boffset = (boffset << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1695		if (bp->b_dirtyend < boffset)
1696			bp->b_dirtyend = min(boffset, bp->b_bufsize);
1697	}
1698}
1699
1700/*
1701 * Get a block given a specified block and offset into a file/device.
1702 */
1703struct buf *
1704getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1705{
1706	struct buf *bp;
1707	int i, s;
1708	struct bufhashhdr *bh;
1709
1710#if !defined(MAX_PERF)
1711	if (size > MAXBSIZE)
1712		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1713#endif
1714
1715	s = splbio();
1716loop:
1717	/*
1718	 * Block if we are low on buffers.  The syncer is allowed more
1719	 * buffers in order to avoid a deadlock.
1720	 */
1721	if (curproc == updateproc && numfreebuffers == 0) {
1722		needsbuffer |= VFS_BIO_NEED_ANY;
1723		tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1724		    slptimeo);
1725	} else if (curproc != updateproc && numfreebuffers < lofreebuffers) {
1726		waitfreebuffers(slpflag, slptimeo);
1727	}
1728
1729	if ((bp = gbincore(vp, blkno))) {
1730		if (bp->b_flags & B_BUSY) {
1731			bp->b_flags |= B_WANTED;
1732			if (bp->b_usecount < BUF_MAXUSE)
1733				++bp->b_usecount;
1734
1735			if (!tsleep(bp,
1736				(PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1737				goto loop;
1738			}
1739
1740			splx(s);
1741			return (struct buf *) NULL;
1742		}
1743		bp->b_flags |= B_BUSY | B_CACHE;
1744		bremfree(bp);
1745
1746		/*
1747		 * check for size inconsistancies for non-VMIO case.
1748		 */
1749
1750		if (bp->b_bcount != size) {
1751			if ((bp->b_flags & B_VMIO) == 0 ||
1752			    (size > bp->b_kvasize)
1753			) {
1754				if (bp->b_flags & B_DELWRI) {
1755					bp->b_flags |= B_NOCACHE;
1756					VOP_BWRITE(bp);
1757				} else {
1758					if ((bp->b_flags & B_VMIO) &&
1759					   (LIST_FIRST(&bp->b_dep) == NULL)) {
1760						bp->b_flags |= B_RELBUF;
1761						brelse(bp);
1762					} else {
1763						bp->b_flags |= B_NOCACHE;
1764						VOP_BWRITE(bp);
1765					}
1766				}
1767				goto loop;
1768			}
1769		}
1770
1771		/*
1772		 * If the size is inconsistant in the VMIO case, we can resize
1773		 * the buffer.  This might lead to B_CACHE getting cleared.
1774		 */
1775
1776		if (bp->b_bcount != size)
1777			allocbuf(bp, size);
1778
1779		KASSERT(bp->b_offset != NOOFFSET,
1780		    ("getblk: no buffer offset"));
1781
1782		/*
1783		 * Check that the constituted buffer really deserves for the
1784		 * B_CACHE bit to be set.  B_VMIO type buffers might not
1785		 * contain fully valid pages.  Normal (old-style) buffers
1786		 * should be fully valid.  This might also lead to B_CACHE
1787		 * getting clear.
1788		 *
1789		 * If B_CACHE is already clear, don't bother checking to see
1790		 * if we have to clear it again.
1791		 *
1792		 * XXX this code should not be necessary unless the B_CACHE
1793		 * handling is broken elsewhere in the kernel.  We need to
1794		 * check the cases and then turn the clearing part of this
1795		 * code into a panic.
1796		 */
1797		if (
1798		    (bp->b_flags & (B_VMIO|B_CACHE)) == (B_VMIO|B_CACHE) &&
1799		    (bp->b_vp->v_tag != VT_NFS || bp->b_validend <= 0)
1800		) {
1801			int checksize = bp->b_bufsize;
1802			int poffset = bp->b_offset & PAGE_MASK;
1803			int resid;
1804			for (i = 0; i < bp->b_npages; i++) {
1805				resid = (checksize > (PAGE_SIZE - poffset)) ?
1806					(PAGE_SIZE - poffset) : checksize;
1807				if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) {
1808					bp->b_flags &= ~(B_CACHE | B_DONE);
1809					break;
1810				}
1811				checksize -= resid;
1812				poffset = 0;
1813			}
1814		}
1815
1816		/*
1817		 * If B_DELWRI is set and B_CACHE got cleared ( or was
1818		 * already clear ), we have to commit the write and
1819		 * retry.  The NFS code absolutely depends on this,
1820		 * and so might the FFS code.  In anycase, it formalizes
1821		 * the B_CACHE rules.  See sys/buf.h.
1822		 */
1823
1824		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
1825			VOP_BWRITE(bp);
1826			goto loop;
1827		}
1828
1829		if (bp->b_usecount < BUF_MAXUSE)
1830			++bp->b_usecount;
1831		splx(s);
1832		return (bp);
1833	} else {
1834		int bsize, maxsize, vmio;
1835		off_t offset;
1836
1837		if (vp->v_type == VBLK)
1838			bsize = DEV_BSIZE;
1839		else if (vp->v_mountedhere)
1840			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
1841		else if (vp->v_mount)
1842			bsize = vp->v_mount->mnt_stat.f_iosize;
1843		else
1844			bsize = size;
1845
1846		offset = (off_t)blkno * bsize;
1847		vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
1848		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
1849		maxsize = imax(maxsize, bsize);
1850
1851		if ((bp = getnewbuf(vp, blkno,
1852			slpflag, slptimeo, size, maxsize)) == 0) {
1853			if (slpflag || slptimeo) {
1854				splx(s);
1855				return NULL;
1856			}
1857			goto loop;
1858		}
1859
1860		/*
1861		 * This code is used to make sure that a buffer is not
1862		 * created while the getnewbuf routine is blocked.
1863		 * This can be a problem whether the vnode is locked or not.
1864		 */
1865		if (gbincore(vp, blkno)) {
1866			bp->b_flags |= B_INVAL;
1867			brelse(bp);
1868			goto loop;
1869		}
1870
1871		/*
1872		 * Insert the buffer into the hash, so that it can
1873		 * be found by incore.
1874		 */
1875		bp->b_blkno = bp->b_lblkno = blkno;
1876		bp->b_offset = offset;
1877
1878		bgetvp(vp, bp);
1879		LIST_REMOVE(bp, b_hash);
1880		bh = BUFHASH(vp, blkno);
1881		LIST_INSERT_HEAD(bh, bp, b_hash);
1882
1883		if (vmio) {
1884			bp->b_flags |= (B_VMIO | B_CACHE);
1885#if defined(VFS_BIO_DEBUG)
1886			if (vp->v_type != VREG && vp->v_type != VBLK)
1887				printf("getblk: vmioing file type %d???\n", vp->v_type);
1888#endif
1889		} else {
1890			bp->b_flags &= ~B_VMIO;
1891		}
1892
1893		allocbuf(bp, size);
1894
1895		splx(s);
1896		return (bp);
1897	}
1898}
1899
1900/*
1901 * Get an empty, disassociated buffer of given size.
1902 */
1903struct buf *
1904geteblk(int size)
1905{
1906	struct buf *bp;
1907	int s;
1908
1909	s = splbio();
1910	while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
1911	splx(s);
1912	allocbuf(bp, size);
1913	bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
1914	return (bp);
1915}
1916
1917
1918/*
1919 * This code constitutes the buffer memory from either anonymous system
1920 * memory (in the case of non-VMIO operations) or from an associated
1921 * VM object (in the case of VMIO operations).  This code is able to
1922 * resize a buffer up or down.
1923 *
1924 * Note that this code is tricky, and has many complications to resolve
1925 * deadlock or inconsistant data situations.  Tread lightly!!!
1926 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
1927 * the caller.  Calling this code willy nilly can result in the loss of data.
1928 */
1929
1930int
1931allocbuf(struct buf *bp, int size)
1932{
1933	int newbsize, mbsize;
1934	int i;
1935
1936#if !defined(MAX_PERF)
1937	if (!(bp->b_flags & B_BUSY))
1938		panic("allocbuf: buffer not busy");
1939
1940	if (bp->b_kvasize < size)
1941		panic("allocbuf: buffer too small");
1942#endif
1943
1944	if ((bp->b_flags & B_VMIO) == 0) {
1945		caddr_t origbuf;
1946		int origbufsize;
1947		/*
1948		 * Just get anonymous memory from the kernel
1949		 */
1950		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1951#if !defined(NO_B_MALLOC)
1952		if (bp->b_flags & B_MALLOC)
1953			newbsize = mbsize;
1954		else
1955#endif
1956			newbsize = round_page(size);
1957
1958		if (newbsize < bp->b_bufsize) {
1959#if !defined(NO_B_MALLOC)
1960			/*
1961			 * malloced buffers are not shrunk
1962			 */
1963			if (bp->b_flags & B_MALLOC) {
1964				if (newbsize) {
1965					bp->b_bcount = size;
1966				} else {
1967					free(bp->b_data, M_BIOBUF);
1968					bufspace -= bp->b_bufsize;
1969					bufmallocspace -= bp->b_bufsize;
1970					runningbufspace -= bp->b_bufsize;
1971					if (bp->b_bufsize)
1972						bufspacewakeup();
1973					bp->b_data = bp->b_kvabase;
1974					bp->b_bufsize = 0;
1975					bp->b_bcount = 0;
1976					bp->b_flags &= ~B_MALLOC;
1977				}
1978				return 1;
1979			}
1980#endif
1981			vm_hold_free_pages(
1982			    bp,
1983			    (vm_offset_t) bp->b_data + newbsize,
1984			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1985		} else if (newbsize > bp->b_bufsize) {
1986#if !defined(NO_B_MALLOC)
1987			/*
1988			 * We only use malloced memory on the first allocation.
1989			 * and revert to page-allocated memory when the buffer grows.
1990			 */
1991			if ( (bufmallocspace < maxbufmallocspace) &&
1992				(bp->b_bufsize == 0) &&
1993				(mbsize <= PAGE_SIZE/2)) {
1994
1995				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
1996				bp->b_bufsize = mbsize;
1997				bp->b_bcount = size;
1998				bp->b_flags |= B_MALLOC;
1999				bufspace += mbsize;
2000				bufmallocspace += mbsize;
2001				runningbufspace += bp->b_bufsize;
2002				return 1;
2003			}
2004#endif
2005			origbuf = NULL;
2006			origbufsize = 0;
2007#if !defined(NO_B_MALLOC)
2008			/*
2009			 * If the buffer is growing on its other-than-first allocation,
2010			 * then we revert to the page-allocation scheme.
2011			 */
2012			if (bp->b_flags & B_MALLOC) {
2013				origbuf = bp->b_data;
2014				origbufsize = bp->b_bufsize;
2015				bp->b_data = bp->b_kvabase;
2016				bufspace -= bp->b_bufsize;
2017				bufmallocspace -= bp->b_bufsize;
2018				runningbufspace -= bp->b_bufsize;
2019				if (bp->b_bufsize)
2020					bufspacewakeup();
2021				bp->b_bufsize = 0;
2022				bp->b_flags &= ~B_MALLOC;
2023				newbsize = round_page(newbsize);
2024			}
2025#endif
2026			vm_hold_load_pages(
2027			    bp,
2028			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2029			    (vm_offset_t) bp->b_data + newbsize);
2030#if !defined(NO_B_MALLOC)
2031			if (origbuf) {
2032				bcopy(origbuf, bp->b_data, origbufsize);
2033				free(origbuf, M_BIOBUF);
2034			}
2035#endif
2036		}
2037	} else {
2038		vm_page_t m;
2039		int desiredpages;
2040
2041		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2042		desiredpages = (size == 0) ? 0 :
2043			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2044
2045#if !defined(NO_B_MALLOC)
2046		if (bp->b_flags & B_MALLOC)
2047			panic("allocbuf: VMIO buffer can't be malloced");
2048#endif
2049
2050		if (newbsize < bp->b_bufsize) {
2051			if (desiredpages < bp->b_npages) {
2052				for (i = desiredpages; i < bp->b_npages; i++) {
2053					/*
2054					 * the page is not freed here -- it
2055					 * is the responsibility of vnode_pager_setsize
2056					 */
2057					m = bp->b_pages[i];
2058					KASSERT(m != bogus_page,
2059					    ("allocbuf: bogus page found"));
2060					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2061						;
2062
2063					bp->b_pages[i] = NULL;
2064					vm_page_unwire(m, 0);
2065				}
2066				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2067				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2068				bp->b_npages = desiredpages;
2069			}
2070		} else if (newbsize > bp->b_bufsize) {
2071			vm_object_t obj;
2072			vm_offset_t tinc, toff;
2073			vm_ooffset_t off;
2074			vm_pindex_t objoff;
2075			int pageindex, curbpnpages;
2076			struct vnode *vp;
2077			int bsize;
2078			int orig_validoff = bp->b_validoff;
2079			int orig_validend = bp->b_validend;
2080
2081			vp = bp->b_vp;
2082
2083			if (vp->v_type == VBLK)
2084				bsize = DEV_BSIZE;
2085			else
2086				bsize = vp->v_mount->mnt_stat.f_iosize;
2087
2088			if (bp->b_npages < desiredpages) {
2089				obj = vp->v_object;
2090				tinc = PAGE_SIZE;
2091
2092				off = bp->b_offset;
2093				KASSERT(bp->b_offset != NOOFFSET,
2094				    ("allocbuf: no buffer offset"));
2095				curbpnpages = bp->b_npages;
2096		doretry:
2097				bp->b_validoff = orig_validoff;
2098				bp->b_validend = orig_validend;
2099				bp->b_flags |= B_CACHE;
2100				for (toff = 0; toff < newbsize; toff += tinc) {
2101					objoff = OFF_TO_IDX(off + toff);
2102					pageindex = objoff - OFF_TO_IDX(off);
2103					tinc = PAGE_SIZE - ((off + toff) & PAGE_MASK);
2104					if (pageindex < curbpnpages) {
2105
2106						m = bp->b_pages[pageindex];
2107#ifdef VFS_BIO_DIAG
2108						if (m->pindex != objoff)
2109							panic("allocbuf: page changed offset?!!!?");
2110#endif
2111						if (tinc > (newbsize - toff))
2112							tinc = newbsize - toff;
2113						if (bp->b_flags & B_CACHE)
2114							vfs_buf_set_valid(bp, off, toff, tinc, m);
2115						continue;
2116					}
2117					m = vm_page_lookup(obj, objoff);
2118					if (!m) {
2119						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
2120						if (!m) {
2121							VM_WAIT;
2122							vm_pageout_deficit += (desiredpages - curbpnpages);
2123							goto doretry;
2124						}
2125
2126						vm_page_wire(m);
2127						vm_page_wakeup(m);
2128						bp->b_flags &= ~B_CACHE;
2129
2130					} else if (vm_page_sleep_busy(m, FALSE, "pgtblk")) {
2131						/*
2132						 *  If we had to sleep, retry.
2133						 *
2134						 *  Also note that we only test
2135						 *  PG_BUSY here, not m->busy.
2136						 *
2137						 *  We cannot sleep on m->busy
2138						 *  here because a vm_fault ->
2139						 *  getpages -> cluster-read ->
2140						 *  ...-> allocbuf sequence
2141						 *  will convert PG_BUSY to
2142						 *  m->busy so we have to let
2143						 *  m->busy through if we do
2144						 *  not want to deadlock.
2145						 */
2146						goto doretry;
2147					} else {
2148						if ((curproc != pageproc) &&
2149							((m->queue - m->pc) == PQ_CACHE) &&
2150						    ((cnt.v_free_count + cnt.v_cache_count) <
2151								(cnt.v_free_min + cnt.v_cache_min))) {
2152							pagedaemon_wakeup();
2153						}
2154						if (tinc > (newbsize - toff))
2155							tinc = newbsize - toff;
2156						if (bp->b_flags & B_CACHE)
2157							vfs_buf_set_valid(bp, off, toff, tinc, m);
2158						vm_page_flag_clear(m, PG_ZERO);
2159						vm_page_wire(m);
2160					}
2161					bp->b_pages[pageindex] = m;
2162					curbpnpages = pageindex + 1;
2163				}
2164				if (vp->v_tag == VT_NFS &&
2165				    vp->v_type != VBLK) {
2166					if (bp->b_dirtyend > 0) {
2167						bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
2168						bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
2169					}
2170					if (bp->b_validend == 0)
2171						bp->b_flags &= ~B_CACHE;
2172				}
2173				bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data);
2174				bp->b_npages = curbpnpages;
2175				pmap_qenter((vm_offset_t) bp->b_data,
2176					bp->b_pages, bp->b_npages);
2177				((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
2178			}
2179		}
2180	}
2181	if (bp->b_flags & B_VMIO)
2182		vmiospace += (newbsize - bp->b_bufsize);
2183	bufspace += (newbsize - bp->b_bufsize);
2184	runningbufspace += (newbsize - bp->b_bufsize);
2185	if (newbsize < bp->b_bufsize)
2186		bufspacewakeup();
2187	bp->b_bufsize = newbsize;
2188	bp->b_bcount = size;
2189	return 1;
2190}
2191
2192/*
2193 * Wait for buffer I/O completion, returning error status.
2194 */
2195int
2196biowait(register struct buf * bp)
2197{
2198	int s;
2199
2200	s = splbio();
2201	while ((bp->b_flags & B_DONE) == 0)
2202#if defined(NO_SCHEDULE_MODS)
2203		tsleep(bp, PRIBIO, "biowait", 0);
2204#else
2205		if (bp->b_flags & B_READ)
2206			tsleep(bp, PRIBIO, "biord", 0);
2207		else
2208			tsleep(bp, PRIBIO, "biowr", 0);
2209#endif
2210	splx(s);
2211	if (bp->b_flags & B_EINTR) {
2212		bp->b_flags &= ~B_EINTR;
2213		return (EINTR);
2214	}
2215	if (bp->b_flags & B_ERROR) {
2216		return (bp->b_error ? bp->b_error : EIO);
2217	} else {
2218		return (0);
2219	}
2220}
2221
2222/*
2223 * Finish I/O on a buffer, calling an optional function.
2224 * This is usually called from interrupt level, so process blocking
2225 * is not *a good idea*.
2226 */
2227void
2228biodone(register struct buf * bp)
2229{
2230	int s;
2231
2232	s = splbio();
2233
2234	KASSERT((bp->b_flags & B_BUSY), ("biodone: bp %p not busy", bp));
2235	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2236
2237	bp->b_flags |= B_DONE;
2238
2239	if (bp->b_flags & B_FREEBUF) {
2240		brelse(bp);
2241		splx(s);
2242		return;
2243	}
2244
2245	if ((bp->b_flags & B_READ) == 0) {
2246		vwakeup(bp);
2247	}
2248
2249	/* call optional completion function if requested */
2250	if (bp->b_flags & B_CALL) {
2251		bp->b_flags &= ~B_CALL;
2252		(*bp->b_iodone) (bp);
2253		splx(s);
2254		return;
2255	}
2256	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
2257		(*bioops.io_complete)(bp);
2258
2259	if (bp->b_flags & B_VMIO) {
2260		int i, resid;
2261		vm_ooffset_t foff;
2262		vm_page_t m;
2263		vm_object_t obj;
2264		int iosize;
2265		struct vnode *vp = bp->b_vp;
2266
2267		obj = vp->v_object;
2268
2269#if defined(VFS_BIO_DEBUG)
2270		if (vp->v_usecount == 0) {
2271			panic("biodone: zero vnode ref count");
2272		}
2273
2274		if (vp->v_object == NULL) {
2275			panic("biodone: missing VM object");
2276		}
2277
2278		if ((vp->v_flag & VOBJBUF) == 0) {
2279			panic("biodone: vnode is not setup for merged cache");
2280		}
2281#endif
2282
2283		foff = bp->b_offset;
2284		KASSERT(bp->b_offset != NOOFFSET,
2285		    ("biodone: no buffer offset"));
2286
2287#if !defined(MAX_PERF)
2288		if (!obj) {
2289			panic("biodone: no object");
2290		}
2291#endif
2292#if defined(VFS_BIO_DEBUG)
2293		if (obj->paging_in_progress < bp->b_npages) {
2294			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2295			    obj->paging_in_progress, bp->b_npages);
2296		}
2297#endif
2298		iosize = bp->b_bufsize;
2299		for (i = 0; i < bp->b_npages; i++) {
2300			int bogusflag = 0;
2301			m = bp->b_pages[i];
2302			if (m == bogus_page) {
2303				bogusflag = 1;
2304				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2305				if (!m) {
2306#if defined(VFS_BIO_DEBUG)
2307					printf("biodone: page disappeared\n");
2308#endif
2309					vm_object_pip_subtract(obj, 1);
2310					continue;
2311				}
2312				bp->b_pages[i] = m;
2313				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2314			}
2315#if defined(VFS_BIO_DEBUG)
2316			if (OFF_TO_IDX(foff) != m->pindex) {
2317				printf(
2318"biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2319				    (unsigned long)foff, m->pindex);
2320			}
2321#endif
2322			resid = IDX_TO_OFF(m->pindex + 1) - foff;
2323			if (resid > iosize)
2324				resid = iosize;
2325
2326			/*
2327			 * In the write case, the valid and clean bits are
2328			 * already changed correctly, so we only need to do this
2329			 * here in the read case.
2330			 */
2331			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2332				vfs_page_set_valid(bp, foff, i, m);
2333			}
2334			vm_page_flag_clear(m, PG_ZERO);
2335
2336			/*
2337			 * when debugging new filesystems or buffer I/O methods, this
2338			 * is the most common error that pops up.  if you see this, you
2339			 * have not set the page busy flag correctly!!!
2340			 */
2341			if (m->busy == 0) {
2342#if !defined(MAX_PERF)
2343				printf("biodone: page busy < 0, "
2344				    "pindex: %d, foff: 0x(%x,%x), "
2345				    "resid: %d, index: %d\n",
2346				    (int) m->pindex, (int)(foff >> 32),
2347						(int) foff & 0xffffffff, resid, i);
2348#endif
2349				if (vp->v_type != VBLK)
2350#if !defined(MAX_PERF)
2351					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2352					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2353					    (int) bp->b_lblkno,
2354					    bp->b_flags, bp->b_npages);
2355				else
2356					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2357					    (int) bp->b_lblkno,
2358					    bp->b_flags, bp->b_npages);
2359				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2360				    m->valid, m->dirty, m->wire_count);
2361#endif
2362				panic("biodone: page busy < 0\n");
2363			}
2364			vm_page_io_finish(m);
2365			vm_object_pip_subtract(obj, 1);
2366			foff += resid;
2367			iosize -= resid;
2368		}
2369		if (obj)
2370			vm_object_pip_wakeupn(obj, 0);
2371	}
2372	/*
2373	 * For asynchronous completions, release the buffer now. The brelse
2374	 * checks for B_WANTED and will do the wakeup there if necessary - so
2375	 * no need to do a wakeup here in the async case.
2376	 */
2377
2378	if (bp->b_flags & B_ASYNC) {
2379		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
2380			brelse(bp);
2381		else
2382			bqrelse(bp);
2383	} else {
2384		bp->b_flags &= ~B_WANTED;
2385		wakeup(bp);
2386	}
2387	splx(s);
2388}
2389
2390#if 0	/* not with kirks code */
2391static int vfs_update_interval = 30;
2392
2393static void
2394vfs_update()
2395{
2396	while (1) {
2397		tsleep(&vfs_update_wakeup, PUSER, "update",
2398		    hz * vfs_update_interval);
2399		vfs_update_wakeup = 0;
2400		sync(curproc, NULL);
2401	}
2402}
2403
2404static int
2405sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
2406{
2407	int error = sysctl_handle_int(oidp,
2408		oidp->oid_arg1, oidp->oid_arg2, req);
2409	if (!error)
2410		wakeup(&vfs_update_wakeup);
2411	return error;
2412}
2413
2414SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
2415	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
2416
2417#endif
2418
2419
2420/*
2421 * This routine is called in lieu of iodone in the case of
2422 * incomplete I/O.  This keeps the busy status for pages
2423 * consistant.
2424 */
2425void
2426vfs_unbusy_pages(struct buf * bp)
2427{
2428	int i;
2429
2430	if (bp->b_flags & B_VMIO) {
2431		struct vnode *vp = bp->b_vp;
2432		vm_object_t obj = vp->v_object;
2433
2434		for (i = 0; i < bp->b_npages; i++) {
2435			vm_page_t m = bp->b_pages[i];
2436
2437			if (m == bogus_page) {
2438				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
2439#if !defined(MAX_PERF)
2440				if (!m) {
2441					panic("vfs_unbusy_pages: page missing\n");
2442				}
2443#endif
2444				bp->b_pages[i] = m;
2445				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2446			}
2447			vm_object_pip_subtract(obj, 1);
2448			vm_page_flag_clear(m, PG_ZERO);
2449			vm_page_io_finish(m);
2450		}
2451		vm_object_pip_wakeupn(obj, 0);
2452	}
2453}
2454
2455/*
2456 * Set NFS' b_validoff and b_validend fields from the valid bits
2457 * of a page.  If the consumer is not NFS, and the page is not
2458 * valid for the entire range, clear the B_CACHE flag to force
2459 * the consumer to re-read the page.
2460 *
2461 * B_CACHE interaction is especially tricky.
2462 */
2463static void
2464vfs_buf_set_valid(struct buf *bp,
2465		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
2466		  vm_page_t m)
2467{
2468	if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) {
2469		vm_offset_t svalid, evalid;
2470		int validbits = m->valid >> (((foff+off)&PAGE_MASK)/DEV_BSIZE);
2471
2472		/*
2473		 * This only bothers with the first valid range in the
2474		 * page.
2475		 */
2476		svalid = off;
2477		while (validbits && !(validbits & 1)) {
2478			svalid += DEV_BSIZE;
2479			validbits >>= 1;
2480		}
2481		evalid = svalid;
2482		while (validbits & 1) {
2483			evalid += DEV_BSIZE;
2484			validbits >>= 1;
2485		}
2486		evalid = min(evalid, off + size);
2487		/*
2488		 * We can only set b_validoff/end if this range is contiguous
2489		 * with the range built up already.  If we cannot set
2490		 * b_validoff/end, we must clear B_CACHE to force an update
2491		 * to clean the bp up.
2492		 */
2493		if (svalid == bp->b_validend) {
2494			bp->b_validoff = min(bp->b_validoff, svalid);
2495			bp->b_validend = max(bp->b_validend, evalid);
2496		} else {
2497			bp->b_flags &= ~B_CACHE;
2498		}
2499	} else if (!vm_page_is_valid(m,
2500				     (vm_offset_t) ((foff + off) & PAGE_MASK),
2501				     size)) {
2502		bp->b_flags &= ~B_CACHE;
2503	}
2504}
2505
2506/*
2507 * Set the valid bits in a page, taking care of the b_validoff,
2508 * b_validend fields which NFS uses to optimise small reads.  Off is
2509 * the offset within the file and pageno is the page index within the buf.
2510 *
2511 * XXX we have to set the valid & clean bits for all page fragments
2512 * touched by b_validoff/validend, even if the page fragment goes somewhat
2513 * beyond b_validoff/validend due to alignment.
2514 */
2515static void
2516vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2517{
2518	struct vnode *vp = bp->b_vp;
2519	vm_ooffset_t soff, eoff;
2520
2521	/*
2522	 * Start and end offsets in buffer.  eoff - soff may not cross a
2523	 * page boundry or cross the end of the buffer.
2524	 */
2525	soff = off;
2526	eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2527	if (eoff > bp->b_offset + bp->b_bufsize)
2528		eoff = bp->b_offset + bp->b_bufsize;
2529
2530	if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
2531		vm_ooffset_t sv, ev;
2532		vm_page_set_invalid(m,
2533		    (vm_offset_t) (soff & PAGE_MASK),
2534		    (vm_offset_t) (eoff - soff));
2535		/*
2536		 * bp->b_validoff and bp->b_validend restrict the valid range
2537		 * that we can set.  Note that these offsets are not DEV_BSIZE
2538		 * aligned.  vm_page_set_validclean() must know what
2539		 * sub-DEV_BSIZE ranges to clear.
2540		 */
2541#if 0
2542		sv = (bp->b_offset + bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2543		ev = (bp->b_offset + bp->b_validend + (DEV_BSIZE - 1)) &
2544		    ~(DEV_BSIZE - 1);
2545#endif
2546		sv = bp->b_offset + bp->b_validoff;
2547		ev = bp->b_offset + bp->b_validend;
2548		soff = qmax(sv, soff);
2549		eoff = qmin(ev, eoff);
2550	}
2551
2552	if (eoff > soff)
2553		vm_page_set_validclean(m,
2554	       (vm_offset_t) (soff & PAGE_MASK),
2555	       (vm_offset_t) (eoff - soff));
2556}
2557
2558/*
2559 * This routine is called before a device strategy routine.
2560 * It is used to tell the VM system that paging I/O is in
2561 * progress, and treat the pages associated with the buffer
2562 * almost as being PG_BUSY.  Also the object paging_in_progress
2563 * flag is handled to make sure that the object doesn't become
2564 * inconsistant.
2565 */
2566void
2567vfs_busy_pages(struct buf * bp, int clear_modify)
2568{
2569	int i, bogus;
2570
2571	if (bp->b_flags & B_VMIO) {
2572		struct vnode *vp = bp->b_vp;
2573		vm_object_t obj = vp->v_object;
2574		vm_ooffset_t foff;
2575
2576		foff = bp->b_offset;
2577		KASSERT(bp->b_offset != NOOFFSET,
2578		    ("vfs_busy_pages: no buffer offset"));
2579		vfs_setdirty(bp);
2580
2581retry:
2582		for (i = 0; i < bp->b_npages; i++) {
2583			vm_page_t m = bp->b_pages[i];
2584			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
2585				goto retry;
2586		}
2587
2588		bogus = 0;
2589		for (i = 0; i < bp->b_npages; i++) {
2590			vm_page_t m = bp->b_pages[i];
2591
2592			vm_page_flag_clear(m, PG_ZERO);
2593			if ((bp->b_flags & B_CLUSTER) == 0) {
2594				vm_object_pip_add(obj, 1);
2595				vm_page_io_start(m);
2596			}
2597
2598			vm_page_protect(m, VM_PROT_NONE);
2599			if (clear_modify)
2600				vfs_page_set_valid(bp, foff, i, m);
2601			else if (m->valid == VM_PAGE_BITS_ALL &&
2602				(bp->b_flags & B_CACHE) == 0) {
2603				bp->b_pages[i] = bogus_page;
2604				bogus++;
2605			}
2606			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2607		}
2608		if (bogus)
2609			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2610	}
2611}
2612
2613/*
2614 * Tell the VM system that the pages associated with this buffer
2615 * are clean.  This is used for delayed writes where the data is
2616 * going to go to disk eventually without additional VM intevention.
2617 */
2618void
2619vfs_clean_pages(struct buf * bp)
2620{
2621	int i;
2622
2623	if (bp->b_flags & B_VMIO) {
2624		vm_ooffset_t foff;
2625		foff = bp->b_offset;
2626		KASSERT(bp->b_offset != NOOFFSET,
2627		    ("vfs_clean_pages: no buffer offset"));
2628		for (i = 0; i < bp->b_npages; i++) {
2629			vm_page_t m = bp->b_pages[i];
2630			vfs_page_set_valid(bp, foff, i, m);
2631			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2632		}
2633	}
2634}
2635
2636void
2637vfs_bio_clrbuf(struct buf *bp) {
2638	int i, mask = 0;
2639	caddr_t sa, ea;
2640	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2641		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2642		    (bp->b_offset & PAGE_MASK) == 0) {
2643			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2644			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2645			    ((bp->b_pages[0]->valid & mask) != mask)) {
2646				bzero(bp->b_data, bp->b_bufsize);
2647			}
2648			bp->b_pages[0]->valid |= mask;
2649			bp->b_resid = 0;
2650			return;
2651		}
2652		ea = sa = bp->b_data;
2653		for(i=0;i<bp->b_npages;i++,sa=ea) {
2654			int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE;
2655			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
2656			ea = (caddr_t)ulmin((u_long)ea,
2657				(u_long)bp->b_data + bp->b_bufsize);
2658			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
2659			if ((bp->b_pages[i]->valid & mask) == mask)
2660				continue;
2661			if ((bp->b_pages[i]->valid & mask) == 0) {
2662				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2663					bzero(sa, ea - sa);
2664				}
2665			} else {
2666				for (; sa < ea; sa += DEV_BSIZE, j++) {
2667					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
2668						(bp->b_pages[i]->valid & (1<<j)) == 0)
2669						bzero(sa, DEV_BSIZE);
2670				}
2671			}
2672			bp->b_pages[i]->valid |= mask;
2673			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2674		}
2675		bp->b_resid = 0;
2676	} else {
2677		clrbuf(bp);
2678	}
2679}
2680
2681/*
2682 * vm_hold_load_pages and vm_hold_unload pages get pages into
2683 * a buffers address space.  The pages are anonymous and are
2684 * not associated with a file object.
2685 */
2686void
2687vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2688{
2689	vm_offset_t pg;
2690	vm_page_t p;
2691	int index;
2692
2693	to = round_page(to);
2694	from = round_page(from);
2695	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2696
2697	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2698
2699tryagain:
2700
2701		p = vm_page_alloc(kernel_object,
2702			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
2703		    VM_ALLOC_NORMAL);
2704		if (!p) {
2705			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
2706			VM_WAIT;
2707			goto tryagain;
2708		}
2709		vm_page_wire(p);
2710		p->valid = VM_PAGE_BITS_ALL;
2711		vm_page_flag_clear(p, PG_ZERO);
2712		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
2713		bp->b_pages[index] = p;
2714		vm_page_wakeup(p);
2715	}
2716	bp->b_npages = index;
2717}
2718
2719void
2720vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2721{
2722	vm_offset_t pg;
2723	vm_page_t p;
2724	int index, newnpages;
2725
2726	from = round_page(from);
2727	to = round_page(to);
2728	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2729
2730	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2731		p = bp->b_pages[index];
2732		if (p && (index < bp->b_npages)) {
2733#if !defined(MAX_PERF)
2734			if (p->busy) {
2735				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2736					bp->b_blkno, bp->b_lblkno);
2737			}
2738#endif
2739			bp->b_pages[index] = NULL;
2740			pmap_kremove(pg);
2741			vm_page_busy(p);
2742			vm_page_unwire(p, 0);
2743			vm_page_free(p);
2744		}
2745	}
2746	bp->b_npages = newnpages;
2747}
2748
2749
2750#include "opt_ddb.h"
2751#ifdef DDB
2752#include <ddb/ddb.h>
2753
2754DB_SHOW_COMMAND(buffer, db_show_buffer)
2755{
2756	/* get args */
2757	struct buf *bp = (struct buf *)addr;
2758
2759	if (!have_addr) {
2760		db_printf("usage: show buffer <addr>\n");
2761		return;
2762	}
2763
2764	db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc,
2765		  (u_int)bp->b_flags, PRINT_BUF_FLAGS);
2766	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
2767		  "b_resid = %ld\nb_dev = 0x%x, b_data = %p, "
2768		  "b_blkno = %d, b_pblkno = %d\n",
2769		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
2770		  bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno);
2771	if (bp->b_npages) {
2772		int i;
2773		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
2774		for (i = 0; i < bp->b_npages; i++) {
2775			vm_page_t m;
2776			m = bp->b_pages[i];
2777			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
2778			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
2779			if ((i + 1) < bp->b_npages)
2780				db_printf(",");
2781		}
2782		db_printf("\n");
2783	}
2784}
2785#endif /* DDB */
2786