vfs_bio.c revision 9708
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 *    John S. Dyson.
16 * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17 *    is allowed if this notation is included.
18 * 5. Modifications may be freely made to this file if the above conditions
19 *    are met.
20 *
21 * $Id: vfs_bio.c,v 1.54 1995/07/25 05:03:06 davidg Exp $
22 */
23
24/*
25 * this file contains a new buffer I/O scheme implementing a coherent
26 * VM object and buffer cache scheme.  Pains have been taken to make
27 * sure that the performance degradation associated with schemes such
28 * as this is not realized.
29 *
30 * Author:  John S. Dyson
31 * Significant help during the development and debugging phases
32 * had been provided by David Greenman, also of the FreeBSD core team.
33 */
34
35#define VMIO
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/vnode.h>
41#include <vm/vm.h>
42#include <vm/vm_kern.h>
43#include <vm/vm_pageout.h>
44#include <vm/vm_page.h>
45#include <vm/vm_object.h>
46#include <sys/buf.h>
47#include <sys/mount.h>
48#include <sys/malloc.h>
49#include <sys/resourcevar.h>
50#include <sys/proc.h>
51
52#include <miscfs/specfs/specdev.h>
53
54struct buf *buf;		/* buffer header pool */
55int nbuf;			/* number of buffer headers calculated
56				 * elsewhere */
57struct swqueue bswlist;
58
59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
61void vfs_clean_pages(struct buf * bp);
62static void vfs_setdirty(struct buf *bp);
63
64int needsbuffer;
65
66/*
67 * Internal update daemon, process 3
68 *	The variable vfs_update_wakeup allows for internal syncs.
69 */
70int vfs_update_wakeup;
71
72
73/*
74 * buffers base kva
75 */
76caddr_t buffers_kva;
77
78/*
79 * bogus page -- for I/O to/from partially complete buffers
80 * this is a temporary solution to the problem, but it is not
81 * really that bad.  it would be better to split the buffer
82 * for input in the case of buffers partially already in memory,
83 * but the code is intricate enough already.
84 */
85vm_page_t bogus_page;
86vm_offset_t bogus_offset;
87
88int bufspace, maxbufspace;
89
90/*
91 * advisory minimum for size of LRU queue or VMIO queue
92 */
93int minbuf;
94
95/*
96 * Initialize buffer headers and related structures.
97 */
98void
99bufinit()
100{
101	struct buf *bp;
102	int i;
103
104	TAILQ_INIT(&bswlist);
105	LIST_INIT(&invalhash);
106
107	/* first, make a null hash table */
108	for (i = 0; i < BUFHSZ; i++)
109		LIST_INIT(&bufhashtbl[i]);
110
111	/* next, make a null set of free lists */
112	for (i = 0; i < BUFFER_QUEUES; i++)
113		TAILQ_INIT(&bufqueues[i]);
114
115	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
116	/* finally, initialize each buffer header and stick on empty q */
117	for (i = 0; i < nbuf; i++) {
118		bp = &buf[i];
119		bzero(bp, sizeof *bp);
120		bp->b_flags = B_INVAL;	/* we're just an empty header */
121		bp->b_dev = NODEV;
122		bp->b_rcred = NOCRED;
123		bp->b_wcred = NOCRED;
124		bp->b_qindex = QUEUE_EMPTY;
125		bp->b_vnbufs.le_next = NOLIST;
126		bp->b_data = buffers_kva + i * MAXBSIZE;
127		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
128		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
129	}
130/*
131 * maxbufspace is currently calculated to support all filesystem blocks
132 * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
133 * cache is still the same as it would be for 8K filesystems.  This
134 * keeps the size of the buffer cache "in check" for big block filesystems.
135 */
136	minbuf = nbuf / 3;
137	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
138
139	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
140	bogus_page = vm_page_alloc(kernel_object,
141			bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL);
142
143}
144
145/*
146 * remove the buffer from the appropriate free list
147 */
148void
149bremfree(struct buf * bp)
150{
151	int s = splbio();
152
153	if (bp->b_qindex != QUEUE_NONE) {
154		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
155		bp->b_qindex = QUEUE_NONE;
156	} else {
157		panic("bremfree: removing a buffer when not on a queue");
158	}
159	splx(s);
160}
161
162/*
163 * Get a buffer with the specified data.  Look in the cache first.
164 */
165int
166bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
167    struct buf ** bpp)
168{
169	struct buf *bp;
170
171	bp = getblk(vp, blkno, size, 0, 0);
172	*bpp = bp;
173
174	/* if not found in cache, do some I/O */
175	if ((bp->b_flags & B_CACHE) == 0) {
176		if (curproc != NULL)
177			curproc->p_stats->p_ru.ru_inblock++;
178		bp->b_flags |= B_READ;
179		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
180		if (bp->b_rcred == NOCRED) {
181			if (cred != NOCRED)
182				crhold(cred);
183			bp->b_rcred = cred;
184		}
185		vfs_busy_pages(bp, 0);
186		VOP_STRATEGY(bp);
187		return (biowait(bp));
188	}
189	return (0);
190}
191
192/*
193 * Operates like bread, but also starts asynchronous I/O on
194 * read-ahead blocks.
195 */
196int
197breadn(struct vnode * vp, daddr_t blkno, int size,
198    daddr_t * rablkno, int *rabsize,
199    int cnt, struct ucred * cred, struct buf ** bpp)
200{
201	struct buf *bp, *rabp;
202	int i;
203	int rv = 0, readwait = 0;
204
205	*bpp = bp = getblk(vp, blkno, size, 0, 0);
206
207	/* if not found in cache, do some I/O */
208	if ((bp->b_flags & B_CACHE) == 0) {
209		if (curproc != NULL)
210			curproc->p_stats->p_ru.ru_inblock++;
211		bp->b_flags |= B_READ;
212		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
213		if (bp->b_rcred == NOCRED) {
214			if (cred != NOCRED)
215				crhold(cred);
216			bp->b_rcred = cred;
217		}
218		vfs_busy_pages(bp, 0);
219		VOP_STRATEGY(bp);
220		++readwait;
221	}
222	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
223		if (inmem(vp, *rablkno))
224			continue;
225		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
226
227		if ((rabp->b_flags & B_CACHE) == 0) {
228			if (curproc != NULL)
229				curproc->p_stats->p_ru.ru_inblock++;
230			rabp->b_flags |= B_READ | B_ASYNC;
231			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
232			if (rabp->b_rcred == NOCRED) {
233				if (cred != NOCRED)
234					crhold(cred);
235				rabp->b_rcred = cred;
236			}
237			vfs_busy_pages(rabp, 0);
238			VOP_STRATEGY(rabp);
239		} else {
240			brelse(rabp);
241		}
242	}
243
244	if (readwait) {
245		rv = biowait(bp);
246	}
247	return (rv);
248}
249
250/*
251 * Write, release buffer on completion.  (Done by iodone
252 * if async.)
253 */
254int
255bwrite(struct buf * bp)
256{
257	int oldflags = bp->b_flags;
258
259	if (bp->b_flags & B_INVAL) {
260		brelse(bp);
261		return (0);
262	}
263	if (!(bp->b_flags & B_BUSY))
264		panic("bwrite: buffer is not busy???");
265
266	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
267	bp->b_flags |= B_WRITEINPROG;
268
269	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
270		reassignbuf(bp, bp->b_vp);
271	}
272
273	bp->b_vp->v_numoutput++;
274	vfs_busy_pages(bp, 1);
275	if (curproc != NULL)
276		curproc->p_stats->p_ru.ru_oublock++;
277	VOP_STRATEGY(bp);
278
279	if ((oldflags & B_ASYNC) == 0) {
280		int rtval = biowait(bp);
281
282		if (oldflags & B_DELWRI) {
283			reassignbuf(bp, bp->b_vp);
284		}
285		brelse(bp);
286		return (rtval);
287	}
288	return (0);
289}
290
291int
292vn_bwrite(ap)
293	struct vop_bwrite_args *ap;
294{
295	return (bwrite(ap->a_bp));
296}
297
298/*
299 * Delayed write. (Buffer is marked dirty).
300 */
301void
302bdwrite(struct buf * bp)
303{
304
305	if ((bp->b_flags & B_BUSY) == 0) {
306		panic("bdwrite: buffer is not busy");
307	}
308	if (bp->b_flags & B_INVAL) {
309		brelse(bp);
310		return;
311	}
312	if (bp->b_flags & B_TAPE) {
313		bawrite(bp);
314		return;
315	}
316	bp->b_flags &= ~(B_READ|B_RELBUF);
317	if ((bp->b_flags & B_DELWRI) == 0) {
318		bp->b_flags |= B_DONE | B_DELWRI;
319		reassignbuf(bp, bp->b_vp);
320	}
321
322	/*
323	 * This bmap keeps the system from needing to do the bmap later,
324	 * perhaps when the system is attempting to do a sync.  Since it
325	 * is likely that the indirect block -- or whatever other datastructure
326	 * that the filesystem needs is still in memory now, it is a good
327	 * thing to do this.  Note also, that if the pageout daemon is
328	 * requesting a sync -- there might not be enough memory to do
329	 * the bmap then...  So, this is important to do.
330	 */
331	if( bp->b_lblkno == bp->b_blkno) {
332		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL);
333	}
334
335	/*
336	 * Set the *dirty* buffer range based upon the VM system dirty pages.
337	 */
338	vfs_setdirty(bp);
339
340	/*
341	 * We need to do this here to satisfy the vnode_pager and the
342	 * pageout daemon, so that it thinks that the pages have been
343	 * "cleaned".  Note that since the pages are in a delayed write
344	 * buffer -- the VFS layer "will" see that the pages get written
345	 * out on the next sync, or perhaps the cluster will be completed.
346	 */
347	vfs_clean_pages(bp);
348	brelse(bp);
349	return;
350}
351
352/*
353 * Asynchronous write.
354 * Start output on a buffer, but do not wait for it to complete.
355 * The buffer is released when the output completes.
356 */
357void
358bawrite(struct buf * bp)
359{
360	bp->b_flags |= B_ASYNC;
361	(void) VOP_BWRITE(bp);
362}
363
364/*
365 * Release a buffer.
366 */
367void
368brelse(struct buf * bp)
369{
370	int s;
371
372	if (bp->b_flags & B_CLUSTER) {
373		relpbuf(bp);
374		return;
375	}
376	/* anyone need a "free" block? */
377	s = splbio();
378
379	if (needsbuffer) {
380		needsbuffer = 0;
381		wakeup(&needsbuffer);
382	}
383
384	/* anyone need this block? */
385	if (bp->b_flags & B_WANTED) {
386		bp->b_flags &= ~(B_WANTED | B_AGE);
387		wakeup(bp);
388	} else if (bp->b_flags & B_VMIO) {
389		bp->b_flags &= ~B_WANTED;
390		wakeup(bp);
391	}
392	if (bp->b_flags & B_LOCKED)
393		bp->b_flags &= ~B_ERROR;
394
395	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
396	    (bp->b_bufsize <= 0)) {
397		bp->b_flags |= B_INVAL;
398		bp->b_flags &= ~(B_DELWRI | B_CACHE);
399		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp)
400			brelvp(bp);
401	}
402
403	/*
404	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
405	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
406	 * but the VM object is kept around.  The B_NOCACHE flag is used to
407	 * invalidate the pages in the VM object.
408	 */
409	if (bp->b_flags & B_VMIO) {
410		vm_offset_t foff;
411		vm_object_t obj;
412		int i, resid;
413		vm_page_t m;
414		int iototal = bp->b_bufsize;
415
416		foff = 0;
417		obj = 0;
418		if (bp->b_npages) {
419			if (bp->b_vp && bp->b_vp->v_mount) {
420				foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
421			} else {
422				/*
423				 * vnode pointer has been ripped away --
424				 * probably file gone...
425				 */
426				foff = bp->b_pages[0]->offset;
427			}
428		}
429		for (i = 0; i < bp->b_npages; i++) {
430			m = bp->b_pages[i];
431			if (m == bogus_page) {
432				m = vm_page_lookup(obj, foff);
433				if (!m) {
434					panic("brelse: page missing\n");
435				}
436				bp->b_pages[i] = m;
437				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
438			}
439			resid = (m->offset + PAGE_SIZE) - foff;
440			if (resid > iototal)
441				resid = iototal;
442			if (resid > 0) {
443				/*
444				 * Don't invalidate the page if the local machine has already
445				 * modified it.  This is the lesser of two evils, and should
446				 * be fixed.
447				 */
448				if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
449					vm_page_test_dirty(m);
450					if (m->dirty == 0) {
451						vm_page_set_invalid(m, foff, resid);
452						if (m->valid == 0)
453							vm_page_protect(m, VM_PROT_NONE);
454					}
455				}
456			}
457			foff += resid;
458			iototal -= resid;
459		}
460
461		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
462			for(i=0;i<bp->b_npages;i++) {
463				m = bp->b_pages[i];
464				--m->bmapped;
465				if (m->bmapped == 0) {
466					if (m->flags & PG_WANTED) {
467						wakeup(m);
468						m->flags &= ~PG_WANTED;
469					}
470					vm_page_test_dirty(m);
471					if ((m->dirty & m->valid) == 0 &&
472						(m->flags & PG_REFERENCED) == 0 &&
473							!pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
474						vm_page_cache(m);
475					} else if ((m->flags & PG_ACTIVE) == 0) {
476						vm_page_activate(m);
477						m->act_count = 0;
478					}
479				}
480			}
481			bufspace -= bp->b_bufsize;
482			pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
483			bp->b_npages = 0;
484			bp->b_bufsize = 0;
485			bp->b_flags &= ~B_VMIO;
486			if (bp->b_vp)
487				brelvp(bp);
488		}
489	}
490	if (bp->b_qindex != QUEUE_NONE)
491		panic("brelse: free buffer onto another queue???");
492
493	/* enqueue */
494	/* buffers with no memory */
495	if (bp->b_bufsize == 0) {
496		bp->b_qindex = QUEUE_EMPTY;
497		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
498		LIST_REMOVE(bp, b_hash);
499		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
500		bp->b_dev = NODEV;
501		/* buffers with junk contents */
502	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
503		bp->b_qindex = QUEUE_AGE;
504		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
505		LIST_REMOVE(bp, b_hash);
506		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
507		bp->b_dev = NODEV;
508		/* buffers that are locked */
509	} else if (bp->b_flags & B_LOCKED) {
510		bp->b_qindex = QUEUE_LOCKED;
511		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
512		/* buffers with stale but valid contents */
513	} else if (bp->b_flags & B_AGE) {
514		bp->b_qindex = QUEUE_AGE;
515		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
516		/* buffers with valid and quite potentially reuseable contents */
517	} else {
518		bp->b_qindex = QUEUE_LRU;
519		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
520	}
521
522	/* unlock */
523	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
524	splx(s);
525}
526
527/*
528 * this routine implements clustered async writes for
529 * clearing out B_DELWRI buffers...  This is much better
530 * than the old way of writing only one buffer at a time.
531 */
532void
533vfs_bio_awrite(struct buf * bp)
534{
535	int i;
536	daddr_t lblkno = bp->b_lblkno;
537	struct vnode *vp = bp->b_vp;
538	int s;
539	int ncl;
540	struct buf *bpa;
541
542	s = splbio();
543	if( vp->v_mount && (vp->v_flag & VVMIO) &&
544	    	(bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
545		int size = vp->v_mount->mnt_stat.f_iosize;
546
547		for (i = 1; i < MAXPHYS / size; i++) {
548			if ((bpa = incore(vp, lblkno + i)) &&
549			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
550			    (B_DELWRI | B_CLUSTEROK)) &&
551			    (bpa->b_bufsize == size)) {
552				if ((bpa->b_blkno == bpa->b_lblkno) ||
553				    (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE))
554					break;
555			} else {
556				break;
557			}
558		}
559		ncl = i;
560		/*
561		 * this is a possible cluster write
562		 */
563		if (ncl != 1) {
564			bremfree(bp);
565			cluster_wbuild(vp, bp, size, lblkno, ncl, -1);
566			splx(s);
567			return;
568		}
569	}
570	/*
571	 * default (old) behavior, writing out only one block
572	 */
573	bremfree(bp);
574	bp->b_flags |= B_BUSY | B_ASYNC;
575	(void) VOP_BWRITE(bp);
576	splx(s);
577}
578
579
580/*
581 * Find a buffer header which is available for use.
582 */
583static struct buf *
584getnewbuf(int slpflag, int slptimeo, int doingvmio)
585{
586	struct buf *bp;
587	int s;
588	int firstbp = 1;
589
590	s = splbio();
591start:
592	if (bufspace >= maxbufspace)
593		goto trytofreespace;
594
595	/* can we constitute a new buffer? */
596	if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
597		if (bp->b_qindex != QUEUE_EMPTY)
598			panic("getnewbuf: inconsistent EMPTY queue");
599		bremfree(bp);
600		goto fillbuf;
601	}
602trytofreespace:
603	/*
604	 * We keep the file I/O from hogging metadata I/O
605	 * This is desirable because file data is cached in the
606	 * VM/Buffer cache even if a buffer is freed.
607	 */
608	if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
609		if (bp->b_qindex != QUEUE_AGE)
610			panic("getnewbuf: inconsistent AGE queue");
611	} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
612		if (bp->b_qindex != QUEUE_LRU)
613			panic("getnewbuf: inconsistent LRU queue");
614	}
615	if (!bp) {
616		/* wait for a free buffer of any kind */
617		needsbuffer = 1;
618		tsleep(&needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo);
619		splx(s);
620		return (0);
621	}
622
623	/* if we are a delayed write, convert to an async write */
624	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
625		vfs_bio_awrite(bp);
626		if (!slpflag && !slptimeo) {
627			splx(s);
628			return (0);
629		}
630		goto start;
631	}
632
633	if (bp->b_flags & B_WANTED) {
634		bp->b_flags &= ~B_WANTED;
635		wakeup(bp);
636	}
637	bremfree(bp);
638
639	if (bp->b_flags & B_VMIO) {
640		bp->b_flags |= B_RELBUF | B_BUSY | B_DONE;
641		brelse(bp);
642		bremfree(bp);
643	}
644
645	if (bp->b_vp)
646		brelvp(bp);
647
648	/* we are not free, nor do we contain interesting data */
649	if (bp->b_rcred != NOCRED)
650		crfree(bp->b_rcred);
651	if (bp->b_wcred != NOCRED)
652		crfree(bp->b_wcred);
653fillbuf:
654	bp->b_flags |= B_BUSY;
655	LIST_REMOVE(bp, b_hash);
656	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
657	splx(s);
658	if (bp->b_bufsize) {
659		allocbuf(bp, 0);
660	}
661	bp->b_flags = B_BUSY;
662	bp->b_dev = NODEV;
663	bp->b_vp = NULL;
664	bp->b_blkno = bp->b_lblkno = 0;
665	bp->b_iodone = 0;
666	bp->b_error = 0;
667	bp->b_resid = 0;
668	bp->b_bcount = 0;
669	bp->b_npages = 0;
670	bp->b_wcred = bp->b_rcred = NOCRED;
671	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
672	bp->b_dirtyoff = bp->b_dirtyend = 0;
673	bp->b_validoff = bp->b_validend = 0;
674	if (bufspace >= maxbufspace) {
675		s = splbio();
676		bp->b_flags |= B_INVAL;
677		brelse(bp);
678		goto trytofreespace;
679	}
680	return (bp);
681}
682
683/*
684 * Check to see if a block is currently memory resident.
685 */
686struct buf *
687incore(struct vnode * vp, daddr_t blkno)
688{
689	struct buf *bp;
690	struct bufhashhdr *bh;
691
692	int s = splbio();
693
694	bh = BUFHASH(vp, blkno);
695	bp = bh->lh_first;
696
697	/* Search hash chain */
698	while (bp) {
699		/* hit */
700		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
701		    (bp->b_flags & B_INVAL) == 0) {
702			splx(s);
703			return (bp);
704		}
705		bp = bp->b_hash.le_next;
706	}
707	splx(s);
708
709	return (0);
710}
711
712/*
713 * Returns true if no I/O is needed to access the
714 * associated VM object.  This is like incore except
715 * it also hunts around in the VM system for the data.
716 */
717
718int
719inmem(struct vnode * vp, daddr_t blkno)
720{
721	vm_object_t obj;
722	vm_offset_t off, toff, tinc;
723	vm_page_t m;
724
725	if (incore(vp, blkno))
726		return 1;
727	if (vp->v_mount == 0)
728		return 0;
729	if ((vp->v_object == 0) || (vp->v_flag & VVMIO) == 0)
730		return 0;
731
732	obj = vp->v_object;
733	tinc = PAGE_SIZE;
734	if (tinc > vp->v_mount->mnt_stat.f_iosize)
735		tinc = vp->v_mount->mnt_stat.f_iosize;
736	off = blkno * vp->v_mount->mnt_stat.f_iosize;
737
738	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
739		int mask;
740
741		m = vm_page_lookup(obj, trunc_page(toff + off));
742		if (!m)
743			return 0;
744		if (vm_page_is_valid(m, toff + off, tinc) == 0)
745			return 0;
746	}
747	return 1;
748}
749
750/*
751 * now we set the dirty range for the buffer --
752 * for NFS -- if the file is mapped and pages have
753 * been written to, let it know.  We want the
754 * entire range of the buffer to be marked dirty if
755 * any of the pages have been written to for consistancy
756 * with the b_validoff, b_validend set in the nfs write
757 * code, and used by the nfs read code.
758 */
759static void
760vfs_setdirty(struct buf *bp) {
761	int i;
762	vm_object_t object;
763	vm_offset_t boffset, offset;
764	/*
765	 * We qualify the scan for modified pages on whether the
766	 * object has been flushed yet.  The OBJ_WRITEABLE flag
767	 * is not cleared simply by protecting pages off.
768	 */
769	if ((bp->b_flags & B_VMIO) &&
770		((object = bp->b_pages[0]->object)->flags & OBJ_WRITEABLE)) {
771		/*
772		 * test the pages to see if they have been modified directly
773		 * by users through the VM system.
774		 */
775		for (i = 0; i < bp->b_npages; i++)
776			vm_page_test_dirty(bp->b_pages[i]);
777
778		/*
779		 * scan forwards for the first page modified
780		 */
781		for (i = 0; i < bp->b_npages; i++) {
782			if (bp->b_pages[i]->dirty) {
783				break;
784			}
785		}
786		boffset = i * PAGE_SIZE;
787		if (boffset < bp->b_dirtyoff) {
788			bp->b_dirtyoff = boffset;
789		}
790
791		/*
792		 * scan backwards for the last page modified
793		 */
794		for (i = bp->b_npages - 1; i >= 0; --i) {
795			if (bp->b_pages[i]->dirty) {
796				break;
797			}
798		}
799		boffset = (i + 1) * PAGE_SIZE;
800		offset = boffset + bp->b_pages[0]->offset;
801		if (offset >= object->size) {
802			boffset = object->size - bp->b_pages[0]->offset;
803		}
804		if (bp->b_dirtyend < boffset) {
805			bp->b_dirtyend = boffset;
806		}
807	}
808}
809
810/*
811 * Get a block given a specified block and offset into a file/device.
812 */
813struct buf *
814getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
815{
816	struct buf *bp;
817	int s;
818	struct bufhashhdr *bh;
819	vm_offset_t off;
820	int nleft;
821
822	s = splbio();
823loop:
824	if (bp = incore(vp, blkno)) {
825		if (bp->b_flags & B_BUSY) {
826			bp->b_flags |= B_WANTED;
827			if (!tsleep(bp, PRIBIO | slpflag, "getblk", slptimeo))
828				goto loop;
829
830			splx(s);
831			return (struct buf *) NULL;
832		}
833		bp->b_flags |= B_BUSY | B_CACHE;
834		bremfree(bp);
835		/*
836		 * check for size inconsistancies
837		 */
838		if (bp->b_bcount != size) {
839			if (bp->b_flags & B_VMIO) {
840				allocbuf(bp, size);
841			} else {
842				bp->b_flags |= B_NOCACHE;
843				VOP_BWRITE(bp);
844				goto loop;
845			}
846		}
847		splx(s);
848		return (bp);
849	} else {
850		vm_object_t obj;
851		int doingvmio;
852
853		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
854			doingvmio = 1;
855		} else {
856			doingvmio = 0;
857		}
858		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
859			if (slpflag || slptimeo)
860				return NULL;
861			goto loop;
862		}
863
864		/*
865		 * This code is used to make sure that a buffer is not
866		 * created while the getnewbuf routine is blocked.
867		 * Normally the vnode is locked so this isn't a problem.
868		 * VBLK type I/O requests, however, don't lock the vnode.
869		 */
870		if (!VOP_ISLOCKED(vp) && incore(vp, blkno)) {
871			bp->b_flags |= B_INVAL;
872			brelse(bp);
873			goto loop;
874		}
875
876		/*
877		 * Insert the buffer into the hash, so that it can
878		 * be found by incore.
879		 */
880		bp->b_blkno = bp->b_lblkno = blkno;
881		bgetvp(vp, bp);
882		LIST_REMOVE(bp, b_hash);
883		bh = BUFHASH(vp, blkno);
884		LIST_INSERT_HEAD(bh, bp, b_hash);
885
886		if (doingvmio) {
887			bp->b_flags |= (B_VMIO | B_CACHE);
888#if defined(VFS_BIO_DEBUG)
889			if (vp->v_type != VREG)
890				printf("getblk: vmioing file type %d???\n", vp->v_type);
891#endif
892		} else {
893			bp->b_flags &= ~B_VMIO;
894		}
895		splx(s);
896
897		allocbuf(bp, size);
898		return (bp);
899	}
900}
901
902/*
903 * Get an empty, disassociated buffer of given size.
904 */
905struct buf *
906geteblk(int size)
907{
908	struct buf *bp;
909
910	while ((bp = getnewbuf(0, 0, 0)) == 0);
911	allocbuf(bp, size);
912	bp->b_flags |= B_INVAL;
913	return (bp);
914}
915
916/*
917 * This code constitutes the buffer memory from either anonymous system
918 * memory (in the case of non-VMIO operations) or from an associated
919 * VM object (in the case of VMIO operations).
920 *
921 * Note that this code is tricky, and has many complications to resolve
922 * deadlock or inconsistant data situations.  Tread lightly!!!
923 *
924 * Modify the length of a buffer's underlying buffer storage without
925 * destroying information (unless, of course the buffer is shrinking).
926 */
927int
928allocbuf(struct buf * bp, int size)
929{
930
931	int s;
932	int newbsize, mbsize;
933	int i;
934
935	if (!(bp->b_flags & B_BUSY))
936		panic("allocbuf: buffer not busy");
937
938	if ((bp->b_flags & B_VMIO) == 0) {
939		/*
940		 * Just get anonymous memory from the kernel
941		 */
942		mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
943		newbsize = round_page(size);
944
945		if (newbsize < bp->b_bufsize) {
946			vm_hold_free_pages(
947			    bp,
948			    (vm_offset_t) bp->b_data + newbsize,
949			    (vm_offset_t) bp->b_data + bp->b_bufsize);
950		} else if (newbsize > bp->b_bufsize) {
951			vm_hold_load_pages(
952			    bp,
953			    (vm_offset_t) bp->b_data + bp->b_bufsize,
954			    (vm_offset_t) bp->b_data + newbsize);
955		}
956	} else {
957		vm_page_t m;
958		int desiredpages;
959
960		newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
961		desiredpages = round_page(newbsize) / PAGE_SIZE;
962
963		if (newbsize < bp->b_bufsize) {
964			if (desiredpages < bp->b_npages) {
965				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
966				    desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages));
967				for (i = desiredpages; i < bp->b_npages; i++) {
968					m = bp->b_pages[i];
969					s = splhigh();
970					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
971						m->flags |= PG_WANTED;
972						tsleep(m, PVM, "biodep", 0);
973					}
974					splx(s);
975
976					if (m->bmapped == 0) {
977						printf("allocbuf: bmapped is zero for page %d\n", i);
978						panic("allocbuf: error");
979					}
980					--m->bmapped;
981					if (m->bmapped == 0) {
982						vm_page_protect(m, VM_PROT_NONE);
983						vm_page_free(m);
984					}
985					bp->b_pages[i] = NULL;
986				}
987				bp->b_npages = desiredpages;
988			}
989		} else if (newbsize > bp->b_bufsize) {
990			vm_object_t obj;
991			vm_offset_t tinc, off, toff, objoff;
992			int pageindex, curbpnpages;
993			struct vnode *vp;
994			int bsize;
995
996			vp = bp->b_vp;
997			bsize = vp->v_mount->mnt_stat.f_iosize;
998
999			if (bp->b_npages < desiredpages) {
1000				obj = vp->v_object;
1001				tinc = PAGE_SIZE;
1002				if (tinc > bsize)
1003					tinc = bsize;
1004				off = bp->b_lblkno * bsize;
1005		doretry:
1006				curbpnpages = bp->b_npages;
1007				bp->b_flags |= B_CACHE;
1008				for (toff = 0; toff < newbsize; toff += tinc) {
1009					int mask;
1010					int bytesinpage;
1011
1012					pageindex = toff / PAGE_SIZE;
1013					objoff = trunc_page(toff + off);
1014					if (pageindex < curbpnpages) {
1015						int pb;
1016
1017						m = bp->b_pages[pageindex];
1018						if (m->offset != objoff)
1019							panic("allocbuf: page changed offset??!!!?");
1020						bytesinpage = tinc;
1021						if (tinc > (newbsize - toff))
1022							bytesinpage = newbsize - toff;
1023						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1024							bp->b_flags &= ~B_CACHE;
1025						}
1026						if ((m->flags & PG_ACTIVE) == 0) {
1027							vm_page_activate(m);
1028							m->act_count = 0;
1029						}
1030						continue;
1031					}
1032					m = vm_page_lookup(obj, objoff);
1033					if (!m) {
1034						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1035						if (!m) {
1036							int j;
1037
1038							for (j = bp->b_npages; j < pageindex; j++) {
1039								PAGE_WAKEUP(bp->b_pages[j]);
1040							}
1041							VM_WAIT;
1042							goto doretry;
1043						}
1044						vm_page_activate(m);
1045						m->act_count = 0;
1046						m->valid = 0;
1047						bp->b_flags &= ~B_CACHE;
1048					} else if (m->flags & PG_BUSY) {
1049						int j;
1050
1051						for (j = bp->b_npages; j < pageindex; j++) {
1052							PAGE_WAKEUP(bp->b_pages[j]);
1053						}
1054
1055						s = splbio();
1056						m->flags |= PG_WANTED;
1057						tsleep(m, PRIBIO, "pgtblk", 0);
1058						splx(s);
1059
1060						goto doretry;
1061					} else {
1062						int pb;
1063						if ((curproc != pageproc) &&
1064							(m->flags & PG_CACHE) &&
1065						    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
1066							pagedaemon_wakeup();
1067						}
1068						bytesinpage = tinc;
1069						if (tinc > (newbsize - toff))
1070							bytesinpage = newbsize - toff;
1071						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1072							bp->b_flags &= ~B_CACHE;
1073						}
1074						if ((m->flags & PG_ACTIVE) == 0) {
1075							vm_page_activate(m);
1076							m->act_count = 0;
1077						}
1078						m->flags |= PG_BUSY;
1079					}
1080					bp->b_pages[pageindex] = m;
1081					curbpnpages = pageindex + 1;
1082				}
1083				for (i = bp->b_npages; i < curbpnpages; i++) {
1084					m = bp->b_pages[i];
1085					m->bmapped++;
1086					PAGE_WAKEUP(m);
1087				}
1088				bp->b_npages = curbpnpages;
1089				bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
1090				pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages);
1091				bp->b_data += off % PAGE_SIZE;
1092			}
1093		}
1094	}
1095	bufspace += (newbsize - bp->b_bufsize);
1096	bp->b_bufsize = newbsize;
1097	bp->b_bcount = size;
1098	return 1;
1099}
1100
1101/*
1102 * Wait for buffer I/O completion, returning error status.
1103 */
1104int
1105biowait(register struct buf * bp)
1106{
1107	int s;
1108
1109	s = splbio();
1110	while ((bp->b_flags & B_DONE) == 0)
1111		tsleep(bp, PRIBIO, "biowait", 0);
1112	splx(s);
1113	if (bp->b_flags & B_EINTR) {
1114		bp->b_flags &= ~B_EINTR;
1115		return (EINTR);
1116	}
1117	if (bp->b_flags & B_ERROR) {
1118		return (bp->b_error ? bp->b_error : EIO);
1119	} else {
1120		return (0);
1121	}
1122}
1123
1124/*
1125 * Finish I/O on a buffer, calling an optional function.
1126 * This is usually called from interrupt level, so process blocking
1127 * is not *a good idea*.
1128 */
1129void
1130biodone(register struct buf * bp)
1131{
1132	int s;
1133
1134	s = splbio();
1135	if (!(bp->b_flags & B_BUSY))
1136		panic("biodone: buffer not busy");
1137
1138	if (bp->b_flags & B_DONE) {
1139		splx(s);
1140		printf("biodone: buffer already done\n");
1141		return;
1142	}
1143	bp->b_flags |= B_DONE;
1144
1145	if ((bp->b_flags & B_READ) == 0) {
1146		struct vnode *vp = bp->b_vp;
1147		vwakeup(bp);
1148	}
1149#ifdef BOUNCE_BUFFERS
1150	if (bp->b_flags & B_BOUNCE)
1151		vm_bounce_free(bp);
1152#endif
1153
1154	/* call optional completion function if requested */
1155	if (bp->b_flags & B_CALL) {
1156		bp->b_flags &= ~B_CALL;
1157		(*bp->b_iodone) (bp);
1158		splx(s);
1159		return;
1160	}
1161	if (bp->b_flags & B_VMIO) {
1162		int i, resid;
1163		vm_offset_t foff;
1164		vm_page_t m;
1165		vm_object_t obj;
1166		int iosize;
1167		struct vnode *vp = bp->b_vp;
1168
1169		foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1170		obj = vp->v_object;
1171		if (!obj) {
1172			panic("biodone: no object");
1173		}
1174#if defined(VFS_BIO_DEBUG)
1175		if (obj->paging_in_progress < bp->b_npages) {
1176			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1177			    obj->paging_in_progress, bp->b_npages);
1178		}
1179#endif
1180		iosize = bp->b_bufsize;
1181		for (i = 0; i < bp->b_npages; i++) {
1182			int bogusflag = 0;
1183			m = bp->b_pages[i];
1184			if (m == bogus_page) {
1185				bogusflag = 1;
1186				m = vm_page_lookup(obj, foff);
1187				if (!m) {
1188#if defined(VFS_BIO_DEBUG)
1189					printf("biodone: page disappeared\n");
1190#endif
1191					--obj->paging_in_progress;
1192					continue;
1193				}
1194				bp->b_pages[i] = m;
1195				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1196			}
1197#if defined(VFS_BIO_DEBUG)
1198			if (trunc_page(foff) != m->offset) {
1199				printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset);
1200			}
1201#endif
1202			resid = (m->offset + PAGE_SIZE) - foff;
1203			if (resid > iosize)
1204				resid = iosize;
1205			/*
1206			 * In the write case, the valid and clean bits are
1207			 * already changed correctly, so we only need to do this
1208			 * here in the read case.
1209			 */
1210			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1211				vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid);
1212				vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid);
1213			}
1214
1215			/*
1216			 * when debugging new filesystems or buffer I/O methods, this
1217			 * is the most common error that pops up.  if you see this, you
1218			 * have not set the page busy flag correctly!!!
1219			 */
1220			if (m->busy == 0) {
1221				printf("biodone: page busy < 0, "
1222				    "off: %ld, foff: %ld, "
1223				    "resid: %d, index: %d\n",
1224				    m->offset, foff, resid, i);
1225				printf(" iosize: %ld, lblkno: %ld, flags: 0x%x, npages: %d\n",
1226				    bp->b_vp->v_mount->mnt_stat.f_iosize,
1227				    bp->b_lblkno, bp->b_flags, bp->b_npages);
1228				printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n",
1229				    m->valid, m->dirty, m->bmapped);
1230				panic("biodone: page busy < 0\n");
1231			}
1232			--m->busy;
1233			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1234				m->flags &= ~PG_WANTED;
1235				wakeup(m);
1236			}
1237			--obj->paging_in_progress;
1238			foff += resid;
1239			iosize -= resid;
1240		}
1241		if (obj && obj->paging_in_progress == 0 &&
1242		    (obj->flags & OBJ_PIPWNT)) {
1243			obj->flags &= ~OBJ_PIPWNT;
1244			wakeup(obj);
1245		}
1246	}
1247	/*
1248	 * For asynchronous completions, release the buffer now. The brelse
1249	 * checks for B_WANTED and will do the wakeup there if necessary - so
1250	 * no need to do a wakeup here in the async case.
1251	 */
1252
1253	if (bp->b_flags & B_ASYNC) {
1254		brelse(bp);
1255	} else {
1256		bp->b_flags &= ~B_WANTED;
1257		wakeup(bp);
1258	}
1259	splx(s);
1260}
1261
1262int
1263count_lock_queue()
1264{
1265	int count;
1266	struct buf *bp;
1267
1268	count = 0;
1269	for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
1270	    bp != NULL;
1271	    bp = bp->b_freelist.tqe_next)
1272		count++;
1273	return (count);
1274}
1275
1276int vfs_update_interval = 30;
1277
1278void
1279vfs_update()
1280{
1281	(void) spl0();
1282	while (1) {
1283		tsleep(&vfs_update_wakeup, PRIBIO, "update",
1284		    hz * vfs_update_interval);
1285		vfs_update_wakeup = 0;
1286		sync(curproc, NULL, NULL);
1287	}
1288}
1289
1290/*
1291 * This routine is called in lieu of iodone in the case of
1292 * incomplete I/O.  This keeps the busy status for pages
1293 * consistant.
1294 */
1295void
1296vfs_unbusy_pages(struct buf * bp)
1297{
1298	int i;
1299
1300	if (bp->b_flags & B_VMIO) {
1301		struct vnode *vp = bp->b_vp;
1302		vm_object_t obj = vp->v_object;
1303		vm_offset_t foff;
1304
1305		foff = trunc_page(vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno);
1306
1307		for (i = 0; i < bp->b_npages; i++) {
1308			vm_page_t m = bp->b_pages[i];
1309
1310			if (m == bogus_page) {
1311				m = vm_page_lookup(obj, foff + i * PAGE_SIZE);
1312				if (!m) {
1313					panic("vfs_unbusy_pages: page missing\n");
1314				}
1315				bp->b_pages[i] = m;
1316				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1317			}
1318			--obj->paging_in_progress;
1319			--m->busy;
1320			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1321				m->flags &= ~PG_WANTED;
1322				wakeup(m);
1323			}
1324		}
1325		if (obj->paging_in_progress == 0 &&
1326		    (obj->flags & OBJ_PIPWNT)) {
1327			obj->flags &= ~OBJ_PIPWNT;
1328			wakeup(obj);
1329		}
1330	}
1331}
1332
1333/*
1334 * This routine is called before a device strategy routine.
1335 * It is used to tell the VM system that paging I/O is in
1336 * progress, and treat the pages associated with the buffer
1337 * almost as being PG_BUSY.  Also the object paging_in_progress
1338 * flag is handled to make sure that the object doesn't become
1339 * inconsistant.
1340 */
1341void
1342vfs_busy_pages(struct buf * bp, int clear_modify)
1343{
1344	int i;
1345
1346	if (bp->b_flags & B_VMIO) {
1347		vm_object_t obj = bp->b_vp->v_object;
1348		vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1349		int iocount = bp->b_bufsize;
1350
1351		vfs_setdirty(bp);
1352		for (i = 0; i < bp->b_npages; i++) {
1353			vm_page_t m = bp->b_pages[i];
1354			int resid = (m->offset + PAGE_SIZE) - foff;
1355
1356			if (resid > iocount)
1357				resid = iocount;
1358			obj->paging_in_progress++;
1359			m->busy++;
1360			if (clear_modify) {
1361				vm_page_protect(m, VM_PROT_READ);
1362				vm_page_set_valid(m,
1363					foff & (PAGE_SIZE-1), resid);
1364				vm_page_set_clean(m,
1365					foff & (PAGE_SIZE-1), resid);
1366			} else if (bp->b_bcount >= PAGE_SIZE) {
1367				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1368					bp->b_pages[i] = bogus_page;
1369					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1370				}
1371			}
1372			foff += resid;
1373			iocount -= resid;
1374		}
1375	}
1376}
1377
1378/*
1379 * Tell the VM system that the pages associated with this buffer
1380 * are clean.  This is used for delayed writes where the data is
1381 * going to go to disk eventually without additional VM intevention.
1382 */
1383void
1384vfs_clean_pages(struct buf * bp)
1385{
1386	int i;
1387
1388	if (bp->b_flags & B_VMIO) {
1389		vm_offset_t foff =
1390			bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1391		int iocount = bp->b_bufsize;
1392
1393		for (i = 0; i < bp->b_npages; i++) {
1394			vm_page_t m = bp->b_pages[i];
1395			int resid = (m->offset + PAGE_SIZE) - foff;
1396
1397			if (resid > iocount)
1398				resid = iocount;
1399			if (resid > 0) {
1400				vm_page_set_valid(m,
1401					foff & (PAGE_SIZE-1), resid);
1402				vm_page_set_clean(m,
1403					foff & (PAGE_SIZE-1), resid);
1404			}
1405			foff += resid;
1406			iocount -= resid;
1407		}
1408	}
1409}
1410
1411void
1412vfs_bio_clrbuf(struct buf *bp) {
1413	int i;
1414	if( bp->b_flags & B_VMIO) {
1415		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1416			int j;
1417			if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) {
1418				for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) {
1419					bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE);
1420				}
1421			}
1422			bp->b_resid = 0;
1423			return;
1424		}
1425		for(i=0;i<bp->b_npages;i++) {
1426			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1427				continue;
1428			if( bp->b_pages[i]->valid == 0) {
1429				bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE);
1430			} else {
1431				int j;
1432				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1433					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1434						bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE);
1435				}
1436			}
1437			bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
1438		}
1439		bp->b_resid = 0;
1440	} else {
1441		clrbuf(bp);
1442	}
1443}
1444
1445/*
1446 * vm_hold_load_pages and vm_hold_unload pages get pages into
1447 * a buffers address space.  The pages are anonymous and are
1448 * not associated with a file object.
1449 */
1450void
1451vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1452{
1453	vm_offset_t pg;
1454	vm_page_t p;
1455	vm_offset_t from = round_page(froma);
1456	vm_offset_t to = round_page(toa);
1457
1458	for (pg = from; pg < to; pg += PAGE_SIZE) {
1459
1460tryagain:
1461
1462		p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS,
1463		    VM_ALLOC_NORMAL);
1464		if (!p) {
1465			VM_WAIT;
1466			goto tryagain;
1467		}
1468		vm_page_wire(p);
1469		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1470		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p;
1471		PAGE_WAKEUP(p);
1472		bp->b_npages++;
1473	}
1474}
1475
1476void
1477vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1478{
1479	vm_offset_t pg;
1480	vm_page_t p;
1481	vm_offset_t from = round_page(froma);
1482	vm_offset_t to = round_page(toa);
1483
1484	for (pg = from; pg < to; pg += PAGE_SIZE) {
1485		p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE];
1486		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0;
1487		pmap_kremove(pg);
1488		vm_page_free(p);
1489		--bp->b_npages;
1490	}
1491}
1492