Deleted Added
full compact
1,8c1,3
< /*-
< * Copyright (c) 1982, 1986, 1989, 1993
< * The Regents of the University of California. All rights reserved.
< * (c) UNIX System Laboratories, Inc.
< * All or some portions of this file are derived from material licensed
< * to the University of California by American Telephone and Telegraph
< * Co. or Unix System Laboratories, Inc. and are reproduced herein with
< * the permission of UNIX System Laboratories, Inc.
---
> /*
> * Copyright (c) 1994 John S. Dyson
> * All rights reserved.
14c9,10
< * notice, this list of conditions and the following disclaimer.
---
> * notice immediately at the beginning of the file, without modification,
> * this list of conditions, and the following disclaimer.
18,38c14,17
< * 3. All advertising materials mentioning features or use of this software
< * must display the following acknowledgement:
< * This product includes software developed by the University of
< * California, Berkeley and its contributors.
< * 4. Neither the name of the University nor the names of its contributors
< * may be used to endorse or promote products derived from this software
< * without specific prior written permission.
< *
< * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
< * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
< * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
< * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
< * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
< * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
< * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
< * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
< * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
< * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
< * SUCH DAMAGE.
< *
< * from: @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
---
> * 3. Absolutely no warranty of function or purpose is made by the author
> * John S. Dyson.
> * 4. Modifications may be freely made to this file if the above conditions
> * are met.
42a22
> #include <sys/kernel.h>
44d23
< #include <sys/buf.h>
45a25
> #include <sys/buf.h>
47d26
< #include <sys/trace.h>
49a29,30
> #include <vm/vm.h>
> #include <vm/vm_pageout.h>
50a32,45
> #include <miscfs/specfs/specdev.h>
>
> struct buf *buf; /* the buffer pool itself */
> int nbuf; /* number of buffer headers */
> int bufpages; /* number of memory pages in the buffer pool */
> struct buf *swbuf; /* swap I/O headers */
> int nswbuf;
> #define BUFHSZ 512
> int bufhash = BUFHSZ - 1;
>
> struct buf *getnewbuf(int,int);
> extern vm_map_t buffer_map, io_map;
> void vm_hold_free_pages(vm_offset_t from, vm_offset_t to);
> void vm_hold_load_pages(vm_offset_t from, vm_offset_t to);
56,57d50
< LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
< u_long bufhash;
60,65d52
< * Insq/Remq for the buffer hash lists.
< */
< #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
< #define bremhash(bp) LIST_REMOVE(bp, b_hash)
<
< /*
68c55
< #define BQUEUES 4 /* number of free buffer queues */
---
> #define BQUEUES 5 /* number of free buffer queues */
70,74c57
< #define BQ_LOCKED 0 /* super-blocks &c */
< #define BQ_LRU 1 /* lru, useful buffers */
< #define BQ_AGE 2 /* rubbish */
< #define BQ_EMPTY 3 /* buffer headers with no memory */
<
---
> LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
76d58
< int needbuffer;
77a60,67
> #define BQ_NONE 0 /* on no queue */
> #define BQ_LOCKED 1 /* locked buffers */
> #define BQ_LRU 2 /* useful buffers */
> #define BQ_AGE 3 /* less useful buffers */
> #define BQ_EMPTY 4 /* empty buffer headers*/
>
> int needsbuffer;
>
79c69,70
< * Insq/Remq for the buffer free lists.
---
> * Internal update daemon, process 3
> * The variable vfs_update_wakeup allows for internal syncs.
81,82c72
< #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
< #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
---
> int vfs_update_wakeup;
84,106d73
< void
< bremfree(bp)
< struct buf *bp;
< {
< struct bqueues *dp = NULL;
<
< /*
< * We only calculate the head of the freelist when removing
< * the last element of the list as that is the only time that
< * it is needed (e.g. to reset the tail pointer).
< *
< * NB: This makes an assumption about how tailq's are implemented.
< */
< if (bp->b_freelist.tqe_next == NULL) {
< for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
< if (dp->tqh_last == &bp->b_freelist.tqe_next)
< break;
< if (dp == &bufqueues[BQUEUES])
< panic("bremfree: lost tail");
< }
< TAILQ_REMOVE(dp, bp, b_freelist);
< }
<
108c75
< * Initialize buffers and hash links for buffers.
---
> * Initialize buffer headers and related structures.
110,111c77
< void
< bufinit()
---
> void bufinit()
113,116c79,80
< register struct buf *bp;
< struct bqueues *dp;
< register int i;
< int base, residual;
---
> struct buf *bp;
> int i;
118,123c82,94
< for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
< TAILQ_INIT(dp);
< bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
< base = bufpages / nbuf;
< residual = bufpages % nbuf;
< for (i = 0; i < nbuf; i++) {
---
> TAILQ_INIT(&bswlist);
> LIST_INIT(&invalhash);
>
> /* first, make a null hash table */
> for(i=0;i<BUFHSZ;i++)
> LIST_INIT(&bufhashtbl[i]);
>
> /* next, make a null set of free lists */
> for(i=0;i<BQUEUES;i++)
> TAILQ_INIT(&bufqueues[i]);
>
> /* finally, initialize each buffer header and stick on empty q */
> for(i=0;i<nbuf;i++) {
125c96,97
< bzero((char *)bp, sizeof *bp);
---
> bzero(bp, sizeof *bp);
> bp->b_flags = B_INVAL; /* we're just an empty header */
126a99
> bp->b_vp = NULL;
128a102
> bp->b_qindex = BQ_EMPTY;
130,138c104,106
< bp->b_data = buffers + i * MAXBSIZE;
< if (i < residual)
< bp->b_bufsize = (base + 1) * CLBYTES;
< else
< bp->b_bufsize = base * CLBYTES;
< bp->b_flags = B_INVAL;
< dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
< binsheadfree(bp, dp);
< binshash(bp, &invalhash);
---
> bp->b_data = (caddr_t)kmem_alloc_pageable(buffer_map, MAXBSIZE);
> TAILQ_INSERT_TAIL(&bufqueues[BQ_EMPTY], bp, b_freelist);
> LIST_INSERT_HEAD(&invalhash, bp, b_hash);
142,147c110,114
< bread(a1, a2, a3, a4, a5)
< struct vnode *a1;
< daddr_t a2;
< int a3;
< struct ucred *a4;
< struct buf **a5;
---
> /*
> * remove the buffer from the appropriate free list
> */
> void
> bremfree(struct buf *bp)
148a116,124
> int s = splbio();
> if( bp->b_qindex != BQ_NONE) {
> TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
> bp->b_qindex = BQ_NONE;
> } else {
> panic("bremfree: removing a buffer when not on a queue");
> }
> splx(s);
> }
150,153c126,153
< /*
< * Body deleted.
< */
< return (EIO);
---
> /*
> * Get a buffer with the specified data. Look in the cache first.
> */
> int
> bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
> struct buf **bpp)
> {
> struct buf *bp;
>
> bp = getblk (vp, blkno, size, 0, 0);
> *bpp = bp;
>
> /* if not found in cache, do some I/O */
> if ((bp->b_flags & B_CACHE) == 0) {
> if (curproc && curproc->p_stats) /* count block I/O */
> curproc->p_stats->p_ru.ru_inblock++;
> bp->b_flags |= B_READ;
> bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
> if( bp->b_rcred == NOCRED) {
> if (cred != NOCRED)
> crhold(cred);
> bp->b_rcred = cred;
> }
> VOP_STRATEGY(bp);
> return( biowait (bp));
> }
>
> return (0);
156,162c156,163
< breadn(a1, a2, a3, a4, a5, a6, a7, a8)
< struct vnode *a1;
< daddr_t a2; int a3;
< daddr_t a4[]; int a5[];
< int a6;
< struct ucred *a7;
< struct buf **a8;
---
> /*
> * Operates like bread, but also starts asynchronous I/O on
> * read-ahead blocks.
> */
> int
> breadn(struct vnode *vp, daddr_t blkno, int size,
> daddr_t *rablkno, int *rabsize,
> int cnt, struct ucred *cred, struct buf **bpp)
163a165,167
> struct buf *bp, *rabp;
> int i;
> int rv = 0, readwait = 0;
165,168c169,212
< /*
< * Body deleted.
< */
< return (EIO);
---
> *bpp = bp = getblk (vp, blkno, size, 0, 0);
>
> /* if not found in cache, do some I/O */
> if ((bp->b_flags & B_CACHE) == 0) {
> if (curproc && curproc->p_stats) /* count block I/O */
> curproc->p_stats->p_ru.ru_inblock++;
> bp->b_flags |= B_READ;
> bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
> if( bp->b_rcred == NOCRED) {
> if (cred != NOCRED)
> crhold(cred);
> bp->b_rcred = cred;
> }
> VOP_STRATEGY(bp);
> ++readwait;
> }
>
> for(i=0;i<cnt;i++, rablkno++, rabsize++) {
> if( incore(vp, *rablkno)) {
> continue;
> }
> rabp = getblk (vp, *rablkno, *rabsize, 0, 0);
>
> if ((rabp->b_flags & B_CACHE) == 0) {
> if (curproc && curproc->p_stats)
> curproc->p_stats->p_ru.ru_inblock++;
> rabp->b_flags |= B_READ | B_ASYNC;
> rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
> if( rabp->b_rcred == NOCRED) {
> if (cred != NOCRED)
> crhold(cred);
> rabp->b_rcred = cred;
> }
> VOP_STRATEGY(rabp);
> } else {
> brelse(rabp);
> }
> }
>
> if( readwait) {
> rv = biowait (bp);
> }
>
> return (rv);
171,172c215,220
< bwrite(a1)
< struct buf *a1;
---
> /*
> * Write, release buffer on completion. (Done by iodone
> * if async.)
> */
> int
> bwrite(struct buf *bp)
173a222
> int oldflags = bp->b_flags;
175,178c224,257
< /*
< * Body deleted.
< */
< return (EIO);
---
> if(bp->b_flags & B_INVAL) {
> brelse(bp);
> return (0);
> }
>
> if(!(bp->b_flags & B_BUSY))
> panic("bwrite: buffer is not busy???");
>
> bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
> bp->b_flags |= B_WRITEINPROG;
>
> if (oldflags & B_ASYNC) {
> if (oldflags & B_DELWRI) {
> reassignbuf(bp, bp->b_vp);
> } else if( curproc) {
> ++curproc->p_stats->p_ru.ru_oublock;
> }
> }
>
> bp->b_vp->v_numoutput++;
> VOP_STRATEGY(bp);
>
> if( (oldflags & B_ASYNC) == 0) {
> int rtval = biowait(bp);
> if (oldflags & B_DELWRI) {
> reassignbuf(bp, bp->b_vp);
> } else if( curproc) {
> ++curproc->p_stats->p_ru.ru_oublock;
> }
> brelse(bp);
> return (rtval);
> }
>
> return(0);
188,189c267,271
< bdwrite(a1)
< struct buf *a1;
---
> /*
> * Delayed write. (Buffer is marked dirty).
> */
> void
> bdwrite(struct buf *bp)
192,194c274,295
< /*
< * Body deleted.
< */
---
> if((bp->b_flags & B_BUSY) == 0) {
> panic("bdwrite: buffer is not busy");
> }
>
> if(bp->b_flags & B_INVAL) {
> brelse(bp);
> return;
> }
>
> if(bp->b_flags & B_TAPE) {
> bawrite(bp);
> return;
> }
>
> bp->b_flags &= ~B_READ;
> if( (bp->b_flags & B_DELWRI) == 0) {
> if( curproc)
> ++curproc->p_stats->p_ru.ru_oublock;
> bp->b_flags |= B_DONE|B_DELWRI;
> reassignbuf(bp, bp->b_vp);
> }
> brelse(bp);
198,199c299,305
< bawrite(a1)
< struct buf *a1;
---
> /*
> * Asynchronous write.
> * Start output on a buffer, but do not wait for it to complete.
> * The buffer is released when the output completes.
> */
> void
> bawrite(struct buf *bp)
201,205c307,308
<
< /*
< * Body deleted.
< */
< return;
---
> bp->b_flags |= B_ASYNC;
> (void) bwrite(bp);
208,209c311,315
< brelse(a1)
< struct buf *a1;
---
> /*
> * Release a buffer.
> */
> void
> brelse(struct buf *bp)
210a317
> int x;
212,215c319,375
< /*
< * Body deleted.
< */
< return;
---
> /* anyone need a "free" block? */
> x=splbio();
> if (needsbuffer) {
> needsbuffer = 0;
> wakeup((caddr_t)&needsbuffer);
> }
> /* anyone need this very block? */
> if (bp->b_flags & B_WANTED) {
> bp->b_flags &= ~(B_WANTED|B_AGE);
> wakeup((caddr_t)bp);
> }
>
> if (bp->b_flags & B_LOCKED)
> bp->b_flags &= ~B_ERROR;
>
> if ((bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR)) ||
> (bp->b_bufsize <= 0)) {
> bp->b_flags |= B_INVAL;
> bp->b_flags &= ~(B_DELWRI|B_CACHE);
> if(bp->b_vp)
> brelvp(bp);
> }
>
> if( bp->b_qindex != BQ_NONE)
> panic("brelse: free buffer onto another queue???");
>
> /* enqueue */
> /* buffers with junk contents */
> if(bp->b_bufsize == 0) {
> bp->b_qindex = BQ_EMPTY;
> TAILQ_INSERT_HEAD(&bufqueues[BQ_EMPTY], bp, b_freelist);
> LIST_REMOVE(bp, b_hash);
> LIST_INSERT_HEAD(&invalhash, bp, b_hash);
> bp->b_dev = NODEV;
> } else if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE)) {
> bp->b_qindex = BQ_AGE;
> TAILQ_INSERT_HEAD(&bufqueues[BQ_AGE], bp, b_freelist);
> LIST_REMOVE(bp, b_hash);
> LIST_INSERT_HEAD(&invalhash, bp, b_hash);
> bp->b_dev = NODEV;
> /* buffers that are locked */
> } else if(bp->b_flags & B_LOCKED) {
> bp->b_qindex = BQ_LOCKED;
> TAILQ_INSERT_TAIL(&bufqueues[BQ_LOCKED], bp, b_freelist);
> /* buffers with stale but valid contents */
> } else if(bp->b_flags & B_AGE) {
> bp->b_qindex = BQ_AGE;
> TAILQ_INSERT_TAIL(&bufqueues[BQ_AGE], bp, b_freelist);
> /* buffers with valid and quite potentially reuseable contents */
> } else {
> bp->b_qindex = BQ_LRU;
> TAILQ_INSERT_TAIL(&bufqueues[BQ_LRU], bp, b_freelist);
> }
>
> /* unlock */
> bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_NOCACHE|B_AGE);
> splx(x);
217a378,383
> int freebufspace;
> int allocbufspace;
>
> /*
> * Find a buffer header which is available for use.
> */
219,221c385
< incore(a1, a2)
< struct vnode *a1;
< daddr_t a2;
---
> getnewbuf(int slpflag, int slptimeo)
222a387,397
> struct buf *bp;
> int x;
> x = splbio();
> start:
> /* can we constitute a new buffer? */
> if (bp = bufqueues[BQ_EMPTY].tqh_first) {
> if( bp->b_qindex != BQ_EMPTY)
> panic("getnewbuf: inconsistent EMPTY queue");
> bremfree(bp);
> goto fillbuf;
> }
224,227c399,447
< /*
< * Body deleted.
< */
< return (0);
---
> tryfree:
> if (bp = bufqueues[BQ_AGE].tqh_first) {
> if( bp->b_qindex != BQ_AGE)
> panic("getnewbuf: inconsistent AGE queue");
> bremfree(bp);
> } else if (bp = bufqueues[BQ_LRU].tqh_first) {
> if( bp->b_qindex != BQ_LRU)
> panic("getnewbuf: inconsistent LRU queue");
> bremfree(bp);
> } else {
> /* wait for a free buffer of any kind */
> needsbuffer = 1;
> tsleep((caddr_t)&needsbuffer, PRIBIO, "newbuf", 0);
> splx(x);
> return (0);
> }
>
>
> /* if we are a delayed write, convert to an async write */
> if (bp->b_flags & B_DELWRI) {
> bp->b_flags |= B_BUSY;
> bawrite (bp);
> goto start;
> }
>
> if(bp->b_vp)
> brelvp(bp);
>
> /* we are not free, nor do we contain interesting data */
> if (bp->b_rcred != NOCRED)
> crfree(bp->b_rcred);
> if (bp->b_wcred != NOCRED)
> crfree(bp->b_wcred);
> fillbuf:
> bp->b_flags = B_BUSY;
> LIST_REMOVE(bp, b_hash);
> LIST_INSERT_HEAD(&invalhash, bp, b_hash);
> splx(x);
> bp->b_dev = NODEV;
> bp->b_vp = NULL;
> bp->b_blkno = bp->b_lblkno = 0;
> bp->b_iodone = 0;
> bp->b_error = 0;
> bp->b_resid = 0;
> bp->b_bcount = 0;
> bp->b_wcred = bp->b_rcred = NOCRED;
> bp->b_dirtyoff = bp->b_dirtyend = 0;
> bp->b_validoff = bp->b_validend = 0;
> return (bp);
229a450,452
> /*
> * Check to see if a block is currently memory resident.
> */
231,234c454
< getblk(a1, a2, a3, a4, a5)
< struct vnode *a1;
< daddr_t a2;
< int a3, a4, a5;
---
> incore(struct vnode *vp, daddr_t blkno)
235a456,457
> struct buf *bp;
> struct bufhashhdr *bh;
237,240c459,479
< /*
< * Body deleted.
< */
< return ((struct buf *)0);
---
> int s = splbio();
>
> bh = BUFHASH(vp, blkno);
> bp = bh->lh_first;
>
> /* Search hash chain */
> while (bp) {
> if( (bp < buf) || (bp >= buf + nbuf)) {
> printf("incore: buf out of range: %lx, hash: %d\n",
> bp, bh - bufhashtbl);
> panic("incore: buf fault");
> }
> /* hit */
> if (bp->b_lblkno == blkno && bp->b_vp == vp
> && (bp->b_flags & B_INVAL) == 0)
> return (bp);
> bp = bp->b_hash.le_next;
> }
> splx(s);
>
> return(0);
242a482,484
> /*
> * Get a block given a specified block and offset into a file/device.
> */
244,245c486
< geteblk(a1)
< int a1;
---
> getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
246a488,490
> struct buf *bp;
> int x;
> struct bufhashhdr *bh;
248,251c492,533
< /*
< * Body deleted.
< */
< return ((struct buf *)0);
---
> x = splbio();
> loop:
> if (bp = incore(vp, blkno)) {
> if (bp->b_flags & B_BUSY) {
> bp->b_flags |= B_WANTED;
> tsleep ((caddr_t)bp, PRIBIO, "getblk", 0);
> goto loop;
> }
> bp->b_flags |= B_BUSY | B_CACHE;
> bremfree(bp);
> /*
> * check for size inconsistancies
> */
> if (bp->b_bcount != size) {
> printf("getblk: invalid buffer size: %d\n", bp->b_bcount);
> bp->b_flags |= B_INVAL;
> bwrite(bp);
> goto loop;
> }
> } else {
>
> if ((bp = getnewbuf(0, 0)) == 0)
> goto loop;
> allocbuf(bp, size);
> /*
> * have to check again, because of a possible
> * race condition.
> */
> if (incore( vp, blkno)) {
> allocbuf(bp, 0);
> bp->b_flags |= B_INVAL;
> brelse(bp);
> goto loop;
> }
> bp->b_blkno = bp->b_lblkno = blkno;
> bgetvp(vp, bp);
> LIST_REMOVE(bp, b_hash);
> bh = BUFHASH(vp, blkno);
> LIST_INSERT_HEAD(bh, bp, b_hash);
> }
> splx(x);
> return (bp);
254,256c536,540
< allocbuf(a1, a2)
< struct buf *a1;
< int a2;
---
> /*
> * Get an empty, disassociated buffer of given size.
> */
> struct buf *
> geteblk(int size)
258,262c542,547
<
< /*
< * Body deleted.
< */
< return (0);
---
> struct buf *bp;
> while ((bp = getnewbuf(0, 0)) == 0)
> ;
> allocbuf(bp, size);
> bp->b_flags |= B_INVAL;
> return (bp);
265,267c550,555
< struct buf *
< getnewbuf(a1, a2)
< int a1, a2;
---
> /*
> * Modify the length of a buffer's underlying buffer storage without
> * destroying information (unless, of course the buffer is shrinking).
> */
> void
> allocbuf(struct buf *bp, int size)
270,273c558,578
< /*
< * Body deleted.
< */
< return ((struct buf *)0);
---
> int newbsize = round_page(size);
>
> if( newbsize == bp->b_bufsize) {
> bp->b_bcount = size;
> return;
> } else if( newbsize < bp->b_bufsize) {
> vm_hold_free_pages(
> (vm_offset_t) bp->b_data + newbsize,
> (vm_offset_t) bp->b_data + bp->b_bufsize);
> } else if( newbsize > bp->b_bufsize) {
> vm_hold_load_pages(
> (vm_offset_t) bp->b_data + bp->b_bufsize,
> (vm_offset_t) bp->b_data + newbsize);
> }
>
> /* adjust buffer cache's idea of memory allocated to buffer contents */
> freebufspace -= newbsize - bp->b_bufsize;
> allocbufspace += newbsize - bp->b_bufsize;
>
> bp->b_bufsize = newbsize;
> bp->b_bcount = size;
276,277c581,585
< biowait(a1)
< struct buf *a1;
---
> /*
> * Wait for buffer I/O completion, returning error status.
> */
> int
> biowait(register struct buf *bp)
278a587
> int x;
280,283c589,608
< /*
< * Body deleted.
< */
< return (EIO);
---
> x = splbio();
> while ((bp->b_flags & B_DONE) == 0)
> tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
> if((bp->b_flags & B_ERROR) || bp->b_error) {
> if ((bp->b_flags & B_INVAL) == 0) {
> bp->b_flags |= B_INVAL;
> bp->b_dev = NODEV;
> LIST_REMOVE(bp, b_hash);
> LIST_INSERT_HEAD(&invalhash, bp, b_hash);
> }
> if (!bp->b_error)
> bp->b_error = EIO;
> else
> bp->b_flags |= B_ERROR;
> splx(x);
> return (bp->b_error);
> } else {
> splx(x);
> return (0);
> }
285a611,615
> /*
> * Finish I/O on a buffer, calling an optional function.
> * This is usually called from interrupt level, so process blocking
> * is not *a good idea*.
> */
287,288c617
< biodone(a1)
< struct buf *a1;
---
> biodone(register struct buf *bp)
289a619,621
> int s;
> s = splbio();
> bp->b_flags |= B_DONE;
291,294c623,647
< /*
< * Body deleted.
< */
< return;
---
> if ((bp->b_flags & B_READ) == 0) {
> vwakeup(bp);
> }
>
> /* call optional completion function if requested */
> if (bp->b_flags & B_CALL) {
> bp->b_flags &= ~B_CALL;
> (*bp->b_iodone)(bp);
> splx(s);
> return;
> }
>
> /*
> * For asynchronous completions, release the buffer now. The brelse
> * checks for B_WANTED and will do the wakeup there if necessary -
> * so no need to do a wakeup here in the async case.
> */
>
> if (bp->b_flags & B_ASYNC) {
> brelse(bp);
> } else {
> bp->b_flags &= ~B_WANTED;
> wakeup((caddr_t) bp);
> }
> splx(s);
299a653,662
> int count;
> struct buf *bp;
>
> count = 0;
> for(bp = bufqueues[BQ_LOCKED].tqh_first;
> bp != NULL;
> bp = bp->b_freelist.tqe_next)
> count++;
> return(count);
> }
301,304c664,678
< /*
< * Body deleted.
< */
< return (0);
---
> #ifndef UPDATE_INTERVAL
> int vfs_update_interval = 30;
> #else
> int vfs_update_interval = UPDATE_INTERVAL;
> #endif
>
> void
> vfs_update() {
> (void) spl0();
> while(1) {
> tsleep((caddr_t)&vfs_update_wakeup, PRIBIO, "update",
> hz * vfs_update_interval);
> vfs_update_wakeup = 0;
> sync(curproc, NULL, NULL);
> }
307d680
< #ifdef DIAGNOSTIC
309,311c682,683
< * Print out statistics on the current allocation of the buffer pool.
< * Can be enabled to print out on every ``sync'' by setting "syncprt"
< * in vfs_syscalls.c using sysctl.
---
> * these routines are not in the correct place (yet)
> * also they work *ONLY* for kernel_pmap!!!
314,320c686,690
< vfs_bufstats()
< {
< int s, i, j, count;
< register struct buf *bp;
< register struct bqueues *dp;
< int counts[MAXBSIZE/CLBYTES+1];
< static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
---
> vm_hold_load_pages(vm_offset_t froma, vm_offset_t toa) {
> vm_offset_t pg;
> vm_page_t p;
> vm_offset_t from = round_page(froma);
> vm_offset_t to = round_page(toa);
322,329c692,699
< for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
< count = 0;
< for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
< counts[j] = 0;
< s = splbio();
< for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
< counts[bp->b_bufsize/CLBYTES]++;
< count++;
---
> for(pg = from ; pg < to ; pg += PAGE_SIZE) {
> vm_offset_t pa;
>
> tryagain:
> p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS);
> if( !p) {
> VM_WAIT;
> goto tryagain;
331,336c701,704
< splx(s);
< printf("%s: total-%d", bname[i], count);
< for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
< if (counts[j] != 0)
< printf(", %d-%d", j * CLBYTES, counts[j]);
< printf("\n");
---
>
> vm_page_wire(p);
> pmap_enter(kernel_pmap, pg, VM_PAGE_TO_PHYS(p),
> VM_PROT_READ|VM_PROT_WRITE, 1);
339c707,732
< #endif /* DIAGNOSTIC */
---
>
> void
> vm_hold_free_pages(vm_offset_t froma, vm_offset_t toa) {
> vm_offset_t pg;
> vm_page_t p;
> vm_offset_t from = round_page(froma);
> vm_offset_t to = round_page(toa);
>
> for(pg = from ; pg < to ; pg += PAGE_SIZE) {
> vm_offset_t pa;
> pa = pmap_kextract(pg);
> if( !pa) {
> printf("No pa for va: %x\n", pg);
> } else {
> p = PHYS_TO_VM_PAGE( pa);
> pmap_remove(kernel_pmap, pg, pg + PAGE_SIZE);
> vm_page_free(p);
> }
> }
> }
>
> void
> bufstats()
> {
> }
>