vfs_cluster.c revision 122537
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 45455Sdg * Modifications/enhancements: 55455Sdg * Copyright (c) 1995 John S. Dyson. All rights reserved. 61541Srgrimes * 71541Srgrimes * Redistribution and use in source and binary forms, with or without 81541Srgrimes * modification, are permitted provided that the following conditions 91541Srgrimes * are met: 101541Srgrimes * 1. Redistributions of source code must retain the above copyright 111541Srgrimes * notice, this list of conditions and the following disclaimer. 121541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 131541Srgrimes * notice, this list of conditions and the following disclaimer in the 141541Srgrimes * documentation and/or other materials provided with the distribution. 151541Srgrimes * 3. All advertising materials mentioning features or use of this software 161541Srgrimes * must display the following acknowledgement: 171541Srgrimes * This product includes software developed by the University of 181541Srgrimes * California, Berkeley and its contributors. 191541Srgrimes * 4. Neither the name of the University nor the names of its contributors 201541Srgrimes * may be used to endorse or promote products derived from this software 211541Srgrimes * without specific prior written permission. 221541Srgrimes * 231541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 241541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 251541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 261541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 271541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 281541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 291541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 301541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 311541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 321541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 331541Srgrimes * SUCH DAMAGE. 341541Srgrimes * 351541Srgrimes * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 361541Srgrimes */ 371541Srgrimes 38116182Sobrien#include <sys/cdefs.h> 39116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_cluster.c 122537 2003-11-12 08:01:40Z mckusick $"); 40116182Sobrien 4132929Seivind#include "opt_debug_cluster.h" 4232929Seivind 431541Srgrimes#include <sys/param.h> 441549Srgrimes#include <sys/systm.h> 4541168Sbde#include <sys/kernel.h> 461541Srgrimes#include <sys/proc.h> 4760041Sphk#include <sys/bio.h> 481541Srgrimes#include <sys/buf.h> 491541Srgrimes#include <sys/vnode.h> 5041124Sdg#include <sys/malloc.h> 511541Srgrimes#include <sys/mount.h> 521541Srgrimes#include <sys/resourcevar.h> 5368885Sdillon#include <sys/vmmeter.h> 546621Sdg#include <vm/vm.h> 5510541Sdyson#include <vm/vm_object.h> 5610541Sdyson#include <vm/vm_page.h> 5748545Smckusick#include <sys/sysctl.h> 581541Srgrimes 5921002Sdyson#if defined(CLUSTERDEBUG) 6021002Sdysonstatic int rcluster= 0; 6191690SeivindSYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 6291690Seivind "Debug VFS clustering code"); 6321002Sdyson#endif 6421002Sdyson 6541124Sdgstatic MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 6641124Sdg 6712973Sbdestatic struct cluster_save * 6892723Salfred cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 6912973Sbdestatic struct buf * 7092723Salfred cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 7196572Sphk daddr_t blkno, long size, int run, struct buf *fbp); 721541Srgrimes 7348545Smckusickstatic int write_behind = 1; 7491690SeivindSYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 7591690Seivind "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 7648545Smckusick 77112175Sjeffstatic int read_max = 8; 78112080SjeffSYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 79112080Sjeff "Cluster read-ahead max block count"); 80112080Sjeff 8191690Seivind/* Page expended to mark partially backed buffers */ 8212973Sbdeextern vm_page_t bogus_page; 835455Sdg 8491690Seivind/* 8591690Seivind * Number of physical bufs (pbufs) this subsystem is allowed. 8691690Seivind * Manipulated by vm_pager.c 8791690Seivind */ 8842957Sdillonextern int cluster_pbuf_freecnt; 8942957Sdillon 901541Srgrimes/* 9191690Seivind * Read data to a buf, including read-ahead if we find this to be beneficial. 9291690Seivind * cluster_read replaces bread. 9310541Sdyson */ 941549Srgrimesint 9521002Sdysoncluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 961541Srgrimes struct vnode *vp; 971541Srgrimes u_quad_t filesize; 981541Srgrimes daddr_t lblkno; 991541Srgrimes long size; 1001541Srgrimes struct ucred *cred; 10121002Sdyson long totread; 10221002Sdyson int seqcount; 1031541Srgrimes struct buf **bpp; 1041541Srgrimes{ 10521002Sdyson struct buf *bp, *rbp, *reqbp; 10696572Sphk daddr_t blkno, origblkno; 107112080Sjeff int maxra, racluster; 108112080Sjeff int error, ncontig; 10910541Sdyson int i; 1101541Srgrimes 1111541Srgrimes error = 0; 11221002Sdyson 1135455Sdg /* 11421002Sdyson * Try to limit the amount of read-ahead by a few 11521002Sdyson * ad-hoc parameters. This needs work!!! 11621002Sdyson */ 11751797Sphk racluster = vp->v_mount->mnt_iosize_max / size; 118112080Sjeff maxra = seqcount; 119112080Sjeff maxra = min(read_max, maxra); 120112080Sjeff maxra = min(nbuf/8, maxra); 121112080Sjeff if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 122112080Sjeff maxra = (filesize / size) - lblkno; 12321002Sdyson 12421002Sdyson /* 1255455Sdg * get the requested block 1265455Sdg */ 127111856Sjeff *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 12821002Sdyson origblkno = lblkno; 12912767Sdyson 1305455Sdg /* 1315455Sdg * if it is in the cache, then check to see if the reads have been 1325455Sdg * sequential. If they have, then try some read-ahead, otherwise 1335455Sdg * back-off on prospective read-aheads. 1345455Sdg */ 1351541Srgrimes if (bp->b_flags & B_CACHE) { 13621002Sdyson if (!seqcount) { 1375455Sdg return 0; 13821002Sdyson } else if ((bp->b_flags & B_RAM) == 0) { 13921002Sdyson return 0; 14021002Sdyson } else { 14121002Sdyson int s; 14221002Sdyson bp->b_flags &= ~B_RAM; 14321002Sdyson /* 14421002Sdyson * We do the spl here so that there is no window 14521002Sdyson * between the incore and the b_usecount increment 14621002Sdyson * below. We opt to keep the spl out of the loop 14721002Sdyson * for efficiency. 14821002Sdyson */ 14921002Sdyson s = splbio(); 150103931Sjeff VI_LOCK(vp); 15148225Smckusick for (i = 1; i < maxra; i++) { 15299737Sdillon /* 15399737Sdillon * Stop if the buffer does not exist or it 15499737Sdillon * is invalid (about to go away?) 15599737Sdillon */ 156112080Sjeff rbp = gbincore(vp, lblkno+i); 157112080Sjeff if (rbp == NULL || (rbp->b_flags & B_INVAL)) 15821002Sdyson break; 15921002Sdyson 16021002Sdyson /* 16148677Smckusick * Set another read-ahead mark so we know 16248677Smckusick * to check again. 16321002Sdyson */ 16421002Sdyson if (((i % racluster) == (racluster - 1)) || 16521002Sdyson (i == (maxra - 1))) 166112080Sjeff rbp->b_flags |= B_RAM; 16721002Sdyson } 168103931Sjeff VI_UNLOCK(vp); 16921002Sdyson splx(s); 17021002Sdyson if (i >= maxra) { 1715839Sdg return 0; 17210541Sdyson } 17321002Sdyson lblkno += i; 17421002Sdyson } 17521002Sdyson reqbp = bp = NULL; 176111886Sjeff /* 177111886Sjeff * If it isn't in the cache, then get a chunk from 178111886Sjeff * disk if sequential, otherwise just get the block. 179111886Sjeff */ 18021002Sdyson } else { 18142453Seivind off_t firstread = bp->b_offset; 182111886Sjeff int nblks; 18342453Seivind 18442408Seivind KASSERT(bp->b_offset != NOOFFSET, 18542453Seivind ("cluster_read: no buffer offset")); 186111886Sjeff 187112080Sjeff ncontig = 0; 188111886Sjeff 189111886Sjeff /* 190111886Sjeff * Compute the total number of blocks that we should read 191111886Sjeff * synchronously. 192111886Sjeff */ 19321002Sdyson if (firstread + totread > filesize) 19421002Sdyson totread = filesize - firstread; 195111886Sjeff nblks = howmany(totread, size); 196111886Sjeff if (nblks > racluster) 197111886Sjeff nblks = racluster; 19821002Sdyson 199111886Sjeff /* 200111886Sjeff * Now compute the number of contiguous blocks. 201111886Sjeff */ 202111886Sjeff if (nblks > 1) { 20321002Sdyson error = VOP_BMAP(vp, lblkno, NULL, 204112080Sjeff &blkno, &ncontig, NULL); 205111886Sjeff /* 206111886Sjeff * If this failed to map just do the original block. 207111886Sjeff */ 208111886Sjeff if (error || blkno == -1) 209112080Sjeff ncontig = 0; 210111886Sjeff } 21121002Sdyson 212111886Sjeff /* 213111886Sjeff * If we have contiguous data available do a cluster 214111886Sjeff * otherwise just read the requested block. 215111886Sjeff */ 216112080Sjeff if (ncontig) { 217111886Sjeff /* Account for our first block. */ 218112080Sjeff ncontig = min(ncontig + 1, nblks); 219112080Sjeff if (ncontig < nblks) 220112080Sjeff nblks = ncontig; 22121002Sdyson bp = cluster_rbuild(vp, filesize, lblkno, 22221002Sdyson blkno, size, nblks, bp); 22334694Sdyson lblkno += (bp->b_bufsize / size); 22410541Sdyson } else { 22558345Sphk bp->b_flags |= B_RAM; 22658345Sphk bp->b_iocmd = BIO_READ; 22710541Sdyson lblkno += 1; 2288876Srgrimes } 2291541Srgrimes } 2305455Sdg 2315455Sdg /* 232112080Sjeff * handle the synchronous read so that it is available ASAP. 2335455Sdg */ 2345455Sdg if (bp) { 23570374Sdillon if ((bp->b_flags & B_CLUSTER) == 0) { 23636275Sdyson vfs_busy_pages(bp, 0); 23770374Sdillon } 23858934Sphk bp->b_flags &= ~B_INVAL; 23958934Sphk bp->b_ioflags &= ~BIO_ERROR; 24058345Sphk if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 24148333Speter BUF_KERNPROC(bp); 242121205Sphk bp->b_iooffset = dbtob(bp->b_blkno); 243117879Sphk error = VOP_STRATEGY(vp, bp); 24436275Sdyson curproc->p_stats->p_ru.ru_inblock++; 245112080Sjeff if (error) 246112080Sjeff return (error); 2475455Sdg } 24834611Sdyson 2495455Sdg /* 250112080Sjeff * If we have been doing sequential I/O, then do some read-ahead. 2515455Sdg */ 252112080Sjeff while (lblkno < (origblkno + maxra)) { 253112080Sjeff error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 254112080Sjeff if (error) 255112080Sjeff break; 256112080Sjeff 257112080Sjeff if (blkno == -1) 258112080Sjeff break; 259112080Sjeff 260112080Sjeff /* 261112080Sjeff * We could throttle ncontig here by maxra but we might as 262112080Sjeff * well read the data if it is contiguous. We're throttled 263112080Sjeff * by racluster anyway. 264112080Sjeff */ 265112080Sjeff if (ncontig) { 266112080Sjeff ncontig = min(ncontig + 1, racluster); 267112080Sjeff rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 268112080Sjeff size, ncontig, NULL); 269112080Sjeff lblkno += (rbp->b_bufsize / size); 270112838Sjeff if (rbp->b_flags & B_DELWRI) { 271112838Sjeff bqrelse(rbp); 272112838Sjeff continue; 273112838Sjeff } 274112080Sjeff } else { 275112080Sjeff rbp = getblk(vp, lblkno, size, 0, 0, 0); 276112838Sjeff lblkno += 1; 277112838Sjeff if (rbp->b_flags & B_DELWRI) { 278112838Sjeff bqrelse(rbp); 279112838Sjeff continue; 280112838Sjeff } 281112080Sjeff rbp->b_flags |= B_ASYNC | B_RAM; 282112080Sjeff rbp->b_iocmd = BIO_READ; 283112080Sjeff rbp->b_blkno = blkno; 284112080Sjeff } 285112080Sjeff if (rbp->b_flags & B_CACHE) { 28658345Sphk rbp->b_flags &= ~B_ASYNC; 28713490Sdyson bqrelse(rbp); 288112080Sjeff continue; 2895455Sdg } 290112080Sjeff if ((rbp->b_flags & B_CLUSTER) == 0) { 291112080Sjeff vfs_busy_pages(rbp, 0); 292112080Sjeff } 293112080Sjeff rbp->b_flags &= ~B_INVAL; 294112080Sjeff rbp->b_ioflags &= ~BIO_ERROR; 295112080Sjeff if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 296112080Sjeff BUF_KERNPROC(rbp); 297121205Sphk rbp->b_iooffset = dbtob(rbp->b_blkno); 298117879Sphk (void) VOP_STRATEGY(vp, rbp); 299112080Sjeff curproc->p_stats->p_ru.ru_inblock++; 3005455Sdg } 301112080Sjeff 30221002Sdyson if (reqbp) 30359762Sphk return (bufwait(reqbp)); 30421002Sdyson else 30521002Sdyson return (error); 3061541Srgrimes} 3071541Srgrimes 3081541Srgrimes/* 3091541Srgrimes * If blocks are contiguous on disk, use this to provide clustered 3101541Srgrimes * read ahead. We will read as many blocks as possible sequentially 3111541Srgrimes * and then parcel them up into logical blocks in the buffer hash table. 3121541Srgrimes */ 31310541Sdysonstatic struct buf * 31421002Sdysoncluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 3151541Srgrimes struct vnode *vp; 3161541Srgrimes u_quad_t filesize; 3171541Srgrimes daddr_t lbn; 31896572Sphk daddr_t blkno; 3191541Srgrimes long size; 3201541Srgrimes int run; 32121002Sdyson struct buf *fbp; 3221541Srgrimes{ 32310541Sdyson struct buf *bp, *tbp; 3241541Srgrimes daddr_t bn; 32540648Sphk int i, inc, j; 3261541Srgrimes 32779224Sdillon GIANT_REQUIRED; 32879224Sdillon 32942408Seivind KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 330122537Smckusick ("cluster_rbuild: size %ld != filesize %jd\n", 331122537Smckusick size, (intmax_t)vp->v_mount->mnt_stat.f_iosize)); 33242453Seivind 33312767Sdyson /* 33412767Sdyson * avoid a division 33512767Sdyson */ 33612767Sdyson while ((u_quad_t) size * (lbn + run) > filesize) { 3371541Srgrimes --run; 33812767Sdyson } 33910541Sdyson 34021002Sdyson if (fbp) { 34121002Sdyson tbp = fbp; 34258345Sphk tbp->b_iocmd = BIO_READ; 34321002Sdyson } else { 344111856Sjeff tbp = getblk(vp, lbn, size, 0, 0, 0); 34521002Sdyson if (tbp->b_flags & B_CACHE) 34621002Sdyson return tbp; 34758345Sphk tbp->b_flags |= B_ASYNC | B_RAM; 34858345Sphk tbp->b_iocmd = BIO_READ; 34921002Sdyson } 35010541Sdyson 35110541Sdyson tbp->b_blkno = blkno; 35216086Sdyson if( (tbp->b_flags & B_MALLOC) || 35316086Sdyson ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 35410541Sdyson return tbp; 35510541Sdyson 35642957Sdillon bp = trypbuf(&cluster_pbuf_freecnt); 35710541Sdyson if (bp == 0) 35810541Sdyson return tbp; 35910541Sdyson 36085272Sdillon /* 36185272Sdillon * We are synthesizing a buffer out of vm_page_t's, but 36285272Sdillon * if the block size is not page aligned then the starting 36385272Sdillon * address may not be either. Inherit the b_data offset 36485272Sdillon * from the original buffer. 36585272Sdillon */ 36637467Sbde bp->b_data = (char *)((vm_offset_t)bp->b_data | 36737467Sbde ((vm_offset_t)tbp->b_data & PAGE_MASK)); 36858345Sphk bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 36958345Sphk bp->b_iocmd = BIO_READ; 3705455Sdg bp->b_iodone = cluster_callback; 3715455Sdg bp->b_blkno = blkno; 3725455Sdg bp->b_lblkno = lbn; 37334611Sdyson bp->b_offset = tbp->b_offset; 37442453Seivind KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 3755455Sdg pbgetvp(vp, bp); 3761541Srgrimes 37712404Sdyson TAILQ_INIT(&bp->b_cluster.cluster_head); 3781541Srgrimes 3795455Sdg bp->b_bcount = 0; 3805455Sdg bp->b_bufsize = 0; 3815455Sdg bp->b_npages = 0; 3825455Sdg 3831541Srgrimes inc = btodb(size); 38410541Sdyson for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 3855455Sdg if (i != 0) { 38612767Sdyson if ((bp->b_npages * PAGE_SIZE) + 38785272Sdillon round_page(size) > vp->v_mount->mnt_iosize_max) { 38810541Sdyson break; 38985272Sdillon } 39010978Sdyson 391111886Sjeff tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 39212767Sdyson 393111886Sjeff /* Don't wait around for locked bufs. */ 394111886Sjeff if (tbp == NULL) 395111886Sjeff break; 39634611Sdyson 39771230Sdillon /* 39885272Sdillon * Stop scanning if the buffer is fully valid 39985272Sdillon * (marked B_CACHE), or locked (may be doing a 40085272Sdillon * background write), or if the buffer is not 40185272Sdillon * VMIO backed. The clustering code can only deal 40285272Sdillon * with VMIO-backed buffers. 40371230Sdillon */ 404119521Sjeff VI_LOCK(bp->b_vp); 405119521Sjeff if ((tbp->b_vflags & BV_BKGRDINPROG) || 406119521Sjeff (tbp->b_flags & B_CACHE) || 407119521Sjeff (tbp->b_flags & B_VMIO) == 0) { 408119521Sjeff VI_UNLOCK(bp->b_vp); 40913490Sdyson bqrelse(tbp); 4105455Sdg break; 4115455Sdg } 412119521Sjeff VI_UNLOCK(bp->b_vp); 41310541Sdyson 41485272Sdillon /* 41585272Sdillon * The buffer must be completely invalid in order to 41685272Sdillon * take part in the cluster. If it is partially valid 41785272Sdillon * then we stop. 41885272Sdillon */ 419121269Salc VM_OBJECT_LOCK(tbp->b_object); 42071230Sdillon for (j = 0;j < tbp->b_npages; j++) { 421121269Salc VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object, 422121269Salc MA_OWNED); 42334611Sdyson if (tbp->b_pages[j]->valid) 42410541Sdyson break; 42571230Sdillon } 426121269Salc VM_OBJECT_UNLOCK(tbp->b_object); 42710541Sdyson if (j != tbp->b_npages) { 42834611Sdyson bqrelse(tbp); 42910541Sdyson break; 43010541Sdyson } 43110541Sdyson 43285272Sdillon /* 43385272Sdillon * Set a read-ahead mark as appropriate 43485272Sdillon */ 43521002Sdyson if ((fbp && (i == 1)) || (i == (run - 1))) 43621002Sdyson tbp->b_flags |= B_RAM; 43785272Sdillon 43885272Sdillon /* 43985272Sdillon * Set the buffer up for an async read (XXX should 44085272Sdillon * we do this only if we do not wind up brelse()ing?). 44185272Sdillon * Set the block number if it isn't set, otherwise 44285272Sdillon * if it is make sure it matches the block number we 44385272Sdillon * expect. 44485272Sdillon */ 44558345Sphk tbp->b_flags |= B_ASYNC; 44658345Sphk tbp->b_iocmd = BIO_READ; 44712767Sdyson if (tbp->b_blkno == tbp->b_lblkno) { 44810541Sdyson tbp->b_blkno = bn; 44910541Sdyson } else if (tbp->b_blkno != bn) { 45010541Sdyson brelse(tbp); 45110541Sdyson break; 45210541Sdyson } 4531541Srgrimes } 45448333Speter /* 45548333Speter * XXX fbp from caller may not be B_ASYNC, but we are going 45648333Speter * to biodone() it in cluster_callback() anyway 45748333Speter */ 45848333Speter BUF_KERNPROC(tbp); 45912404Sdyson TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 46012404Sdyson tbp, b_cluster.cluster_entry); 461121269Salc VM_OBJECT_LOCK(tbp->b_object); 462101019Salc vm_page_lock_queues(); 4635455Sdg for (j = 0; j < tbp->b_npages; j += 1) { 46410541Sdyson vm_page_t m; 46510541Sdyson m = tbp->b_pages[j]; 46638799Sdfr vm_page_io_start(m); 46738517Sdfr vm_object_pip_add(m->object, 1); 46810541Sdyson if ((bp->b_npages == 0) || 46912413Sdyson (bp->b_pages[bp->b_npages-1] != m)) { 47010541Sdyson bp->b_pages[bp->b_npages] = m; 47110541Sdyson bp->b_npages++; 47210541Sdyson } 47318737Sdyson if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 47418737Sdyson tbp->b_pages[j] = bogus_page; 4751541Srgrimes } 476101019Salc vm_page_unlock_queues(); 477121269Salc VM_OBJECT_UNLOCK(tbp->b_object); 47885511Sdillon /* 47985511Sdillon * XXX shouldn't this be += size for both, like in 48085511Sdillon * cluster_wbuild()? 48185511Sdillon * 48285511Sdillon * Don't inherit tbp->b_bufsize as it may be larger due to 48385511Sdillon * a non-page-aligned size. Instead just aggregate using 48485511Sdillon * 'size'. 48585511Sdillon */ 48685511Sdillon if (tbp->b_bcount != size) 48785511Sdillon printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 48885511Sdillon if (tbp->b_bufsize != size) 48985511Sdillon printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 49085511Sdillon bp->b_bcount += size; 49185511Sdillon bp->b_bufsize += size; 4921541Srgrimes } 49318737Sdyson 49485272Sdillon /* 49585272Sdillon * Fully valid pages in the cluster are already good and do not need 49685272Sdillon * to be re-read from disk. Replace the page with bogus_page 49785272Sdillon */ 498121269Salc VM_OBJECT_LOCK(bp->b_object); 49985272Sdillon for (j = 0; j < bp->b_npages; j++) { 500121269Salc VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED); 50118737Sdyson if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 50285272Sdillon VM_PAGE_BITS_ALL) { 50318737Sdyson bp->b_pages[j] = bogus_page; 50485272Sdillon } 50518737Sdyson } 506121269Salc VM_OBJECT_UNLOCK(bp->b_object); 50720054Sdyson if (bp->b_bufsize > bp->b_kvasize) 50837559Sbde panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 50937559Sbde bp->b_bufsize, bp->b_kvasize); 51020054Sdyson bp->b_kvasize = bp->b_bufsize; 51118737Sdyson 51210541Sdyson pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 51310541Sdyson (vm_page_t *)bp->b_pages, bp->b_npages); 5145455Sdg return (bp); 5151541Srgrimes} 5161541Srgrimes 5171541Srgrimes/* 5181541Srgrimes * Cleanup after a clustered read or write. 5191541Srgrimes * This is complicated by the fact that any of the buffers might have 5201541Srgrimes * extra memory (if there were no empty buffer headers at allocbuf time) 5211541Srgrimes * that we will need to shift around. 5221541Srgrimes */ 5231541Srgrimesvoid 5241541Srgrimescluster_callback(bp) 5251541Srgrimes struct buf *bp; 5261541Srgrimes{ 52712404Sdyson struct buf *nbp, *tbp; 5281541Srgrimes int error = 0; 5291541Srgrimes 53079224Sdillon GIANT_REQUIRED; 53179224Sdillon 5321541Srgrimes /* 5331541Srgrimes * Must propogate errors to all the components. 5341541Srgrimes */ 53558934Sphk if (bp->b_ioflags & BIO_ERROR) 5361541Srgrimes error = bp->b_error; 5371541Srgrimes 53810541Sdyson pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 5391541Srgrimes /* 5401541Srgrimes * Move memory from the large cluster buffer into the component 5411541Srgrimes * buffers and mark IO as done on these. 5421541Srgrimes */ 54321002Sdyson for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 54412404Sdyson tbp; tbp = nbp) { 54521002Sdyson nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 5461541Srgrimes if (error) { 54758934Sphk tbp->b_ioflags |= BIO_ERROR; 5481541Srgrimes tbp->b_error = error; 54946349Salc } else { 55046349Salc tbp->b_dirtyoff = tbp->b_dirtyend = 0; 55158934Sphk tbp->b_flags &= ~B_INVAL; 55258934Sphk tbp->b_ioflags &= ~BIO_ERROR; 55377115Sdillon /* 55477115Sdillon * XXX the bdwrite()/bqrelse() issued during 55577115Sdillon * cluster building clears B_RELBUF (see bqrelse() 55677115Sdillon * comment). If direct I/O was specified, we have 55777115Sdillon * to restore it here to allow the buffer and VM 55877115Sdillon * to be freed. 55977115Sdillon */ 56077115Sdillon if (tbp->b_flags & B_DIRECT) 56177115Sdillon tbp->b_flags |= B_RELBUF; 56246349Salc } 56359249Sphk bufdone(tbp); 5641541Srgrimes } 56542957Sdillon relpbuf(bp, &cluster_pbuf_freecnt); 5661541Srgrimes} 5671541Srgrimes 5681541Srgrimes/* 56948545Smckusick * cluster_wbuild_wb: 57048545Smckusick * 57148545Smckusick * Implement modified write build for cluster. 57248545Smckusick * 57348545Smckusick * write_behind = 0 write behind disabled 57448545Smckusick * write_behind = 1 write behind normal (default) 57548545Smckusick * write_behind = 2 write behind backed-off 57648545Smckusick */ 57748545Smckusick 57848545Smckusickstatic __inline int 57948545Smckusickcluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 58048545Smckusick{ 58148545Smckusick int r = 0; 58248545Smckusick 58348545Smckusick switch(write_behind) { 58448545Smckusick case 2: 58548545Smckusick if (start_lbn < len) 58648545Smckusick break; 58748545Smckusick start_lbn -= len; 588102412Scharnier /* FALLTHROUGH */ 58948545Smckusick case 1: 59048545Smckusick r = cluster_wbuild(vp, size, start_lbn, len); 591102412Scharnier /* FALLTHROUGH */ 59248545Smckusick default: 593102412Scharnier /* FALLTHROUGH */ 59448545Smckusick break; 59548545Smckusick } 59648545Smckusick return(r); 59748545Smckusick} 59848545Smckusick 59948545Smckusick/* 6001541Srgrimes * Do clustered write for FFS. 6011541Srgrimes * 6021541Srgrimes * Three cases: 6031541Srgrimes * 1. Write is not sequential (write asynchronously) 6041541Srgrimes * Write is sequential: 6051541Srgrimes * 2. beginning of cluster - begin cluster 6061541Srgrimes * 3. middle of a cluster - add to cluster 6071541Srgrimes * 4. end of a cluster - asynchronously write cluster 6081541Srgrimes */ 6091541Srgrimesvoid 61058909Sdilloncluster_write(bp, filesize, seqcount) 6115455Sdg struct buf *bp; 6121541Srgrimes u_quad_t filesize; 61358909Sdillon int seqcount; 6141541Srgrimes{ 6155455Sdg struct vnode *vp; 6165455Sdg daddr_t lbn; 6175455Sdg int maxclen, cursize; 6185455Sdg int lblocksize; 61912404Sdyson int async; 6201541Srgrimes 6215455Sdg vp = bp->b_vp; 62232286Sdyson if (vp->v_type == VREG) { 62332286Sdyson async = vp->v_mount->mnt_flag & MNT_ASYNC; 62432286Sdyson lblocksize = vp->v_mount->mnt_stat.f_iosize; 62532286Sdyson } else { 62632286Sdyson async = 0; 62732286Sdyson lblocksize = bp->b_bufsize; 62832286Sdyson } 6295455Sdg lbn = bp->b_lblkno; 63042408Seivind KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 63134694Sdyson 6321541Srgrimes /* Initialize vnode to beginning of file. */ 6331541Srgrimes if (lbn == 0) 6341541Srgrimes vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 6351541Srgrimes 6365455Sdg if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 6375455Sdg (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 63851797Sphk maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 6391541Srgrimes if (vp->v_clen != 0) { 6401541Srgrimes /* 6411541Srgrimes * Next block is not sequential. 6428876Srgrimes * 6431541Srgrimes * If we are not writing at end of file, the process 6445455Sdg * seeked to another point in the file since its last 6455455Sdg * write, or we have reached our maximum cluster size, 6465455Sdg * then push the previous cluster. Otherwise try 6475455Sdg * reallocating to make it sequential. 64858909Sdillon * 64958909Sdillon * Change to algorithm: only push previous cluster if 65058909Sdillon * it was sequential from the point of view of the 65158909Sdillon * seqcount heuristic, otherwise leave the buffer 65258909Sdillon * intact so we can potentially optimize the I/O 65358909Sdillon * later on in the buf_daemon or update daemon 65458909Sdillon * flush. 6551541Srgrimes */ 6561541Srgrimes cursize = vp->v_lastw - vp->v_cstart + 1; 65734611Sdyson if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 65810541Sdyson lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 65958909Sdillon if (!async && seqcount > 0) { 66048677Smckusick cluster_wbuild_wb(vp, lblocksize, 66112404Sdyson vp->v_cstart, cursize); 66258909Sdillon } 66310541Sdyson } else { 66410541Sdyson struct buf **bpp, **endbp; 66510541Sdyson struct cluster_save *buflist; 66610541Sdyson 66710541Sdyson buflist = cluster_collectbufs(vp, bp); 66810541Sdyson endbp = &buflist->bs_children 66910541Sdyson [buflist->bs_nchildren - 1]; 67010541Sdyson if (VOP_REALLOCBLKS(vp, buflist)) { 67110541Sdyson /* 67258909Sdillon * Failed, push the previous cluster 67358909Sdillon * if *really* writing sequentially 67458909Sdillon * in the logical file (seqcount > 1), 67558909Sdillon * otherwise delay it in the hopes that 67658909Sdillon * the low level disk driver can 67758909Sdillon * optimize the write ordering. 67810541Sdyson */ 67910541Sdyson for (bpp = buflist->bs_children; 68010541Sdyson bpp < endbp; bpp++) 68110541Sdyson brelse(*bpp); 68210541Sdyson free(buflist, M_SEGMENT); 68358909Sdillon if (seqcount > 1) { 68458909Sdillon cluster_wbuild_wb(vp, 68558909Sdillon lblocksize, vp->v_cstart, 68658909Sdillon cursize); 68758909Sdillon } 68810541Sdyson } else { 68910541Sdyson /* 69010541Sdyson * Succeeded, keep building cluster. 69110541Sdyson */ 69210541Sdyson for (bpp = buflist->bs_children; 69310541Sdyson bpp <= endbp; bpp++) 69410541Sdyson bdwrite(*bpp); 69510541Sdyson free(buflist, M_SEGMENT); 69610541Sdyson vp->v_lastw = lbn; 69710541Sdyson vp->v_lasta = bp->b_blkno; 69810541Sdyson return; 69910541Sdyson } 70010541Sdyson } 7011541Srgrimes } 7021541Srgrimes /* 7035455Sdg * Consider beginning a cluster. If at end of file, make 7045455Sdg * cluster as large as possible, otherwise find size of 7055455Sdg * existing cluster. 7061541Srgrimes */ 70732286Sdyson if ((vp->v_type == VREG) && 70834611Sdyson ((u_quad_t) bp->b_offset + lblocksize) != filesize && 7097613Sdg (bp->b_blkno == bp->b_lblkno) && 71010551Sdyson (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 71110541Sdyson bp->b_blkno == -1)) { 7121541Srgrimes bawrite(bp); 7131541Srgrimes vp->v_clen = 0; 7141541Srgrimes vp->v_lasta = bp->b_blkno; 7151541Srgrimes vp->v_cstart = lbn + 1; 7161541Srgrimes vp->v_lastw = lbn; 7171541Srgrimes return; 7181541Srgrimes } 7195455Sdg vp->v_clen = maxclen; 72012404Sdyson if (!async && maxclen == 0) { /* I/O not contiguous */ 7211541Srgrimes vp->v_cstart = lbn + 1; 72213490Sdyson bawrite(bp); 7235455Sdg } else { /* Wait for rest of cluster */ 7241541Srgrimes vp->v_cstart = lbn; 7255455Sdg bdwrite(bp); 7261541Srgrimes } 7271541Srgrimes } else if (lbn == vp->v_cstart + vp->v_clen) { 7281541Srgrimes /* 72958909Sdillon * At end of cluster, write it out if seqcount tells us we 73058909Sdillon * are operating sequentially, otherwise let the buf or 73158909Sdillon * update daemon handle it. 7321541Srgrimes */ 73312404Sdyson bdwrite(bp); 73458909Sdillon if (seqcount > 1) 73558909Sdillon cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 7361541Srgrimes vp->v_clen = 0; 7371541Srgrimes vp->v_cstart = lbn + 1; 73868885Sdillon } else if (vm_page_count_severe()) { 73968885Sdillon /* 74068885Sdillon * We are low on memory, get it going NOW 74168885Sdillon */ 74268885Sdillon bawrite(bp); 74358909Sdillon } else { 7441541Srgrimes /* 7455455Sdg * In the middle of a cluster, so just delay the I/O for now. 7461541Srgrimes */ 7471541Srgrimes bdwrite(bp); 74858909Sdillon } 7491541Srgrimes vp->v_lastw = lbn; 7501541Srgrimes vp->v_lasta = bp->b_blkno; 7511541Srgrimes} 7521541Srgrimes 7531541Srgrimes 7541541Srgrimes/* 7551541Srgrimes * This is an awful lot like cluster_rbuild...wish they could be combined. 7561541Srgrimes * The last lbn argument is the current block on which I/O is being 7571541Srgrimes * performed. Check to see that it doesn't fall in the middle of 7581541Srgrimes * the current block (if last_bp == NULL). 7591541Srgrimes */ 76012767Sdysonint 76112404Sdysoncluster_wbuild(vp, size, start_lbn, len) 7621541Srgrimes struct vnode *vp; 7631541Srgrimes long size; 7641541Srgrimes daddr_t start_lbn; 7651541Srgrimes int len; 7661541Srgrimes{ 76712404Sdyson struct buf *bp, *tbp; 7685455Sdg int i, j, s; 76912767Sdyson int totalwritten = 0; 77012404Sdyson int dbsize = btodb(size); 77135595Sbde 77279224Sdillon GIANT_REQUIRED; 77379224Sdillon 77412767Sdyson while (len > 0) { 77512767Sdyson s = splbio(); 77671230Sdillon /* 77771230Sdillon * If the buffer is not delayed-write (i.e. dirty), or it 77871230Sdillon * is delayed-write but either locked or inval, it cannot 77972080Sasmodai * partake in the clustered write. 78071230Sdillon */ 781111886Sjeff VI_LOCK(vp); 782119521Sjeff if ((tbp = gbincore(vp, start_lbn)) == NULL || 783119521Sjeff (tbp->b_vflags & BV_BKGRDINPROG)) { 784111886Sjeff VI_UNLOCK(vp); 78512767Sdyson ++start_lbn; 78612767Sdyson --len; 78712767Sdyson splx(s); 78812767Sdyson continue; 78912767Sdyson } 790111886Sjeff if (BUF_LOCK(tbp, 791111886Sjeff LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 792111886Sjeff ++start_lbn; 793111886Sjeff --len; 794111886Sjeff splx(s); 795111886Sjeff continue; 796111886Sjeff } 797119521Sjeff if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) { 798111886Sjeff BUF_UNLOCK(tbp); 799111886Sjeff ++start_lbn; 800111886Sjeff --len; 801111886Sjeff splx(s); 802111886Sjeff continue; 803111886Sjeff } 80412767Sdyson bremfree(tbp); 80512767Sdyson tbp->b_flags &= ~B_DONE; 80612767Sdyson splx(s); 8071541Srgrimes 80847967Sjulian /* 80947967Sjulian * Extra memory in the buffer, punt on this buffer. 81047967Sjulian * XXX we could handle this in most cases, but we would 81147967Sjulian * have to push the extra memory down to after our max 81247967Sjulian * possible cluster size and then potentially pull it back 81347967Sjulian * up if the cluster was terminated prematurely--too much 81447967Sjulian * hassle. 81547967Sjulian */ 81668868Stegge if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 81768868Stegge (B_CLUSTEROK | B_VMIO)) || 81834630Sjulian (tbp->b_bcount != tbp->b_bufsize) || 81934630Sjulian (tbp->b_bcount != size) || 82034630Sjulian (len == 1) || 82147948Sdg ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 82212767Sdyson totalwritten += tbp->b_bufsize; 82312767Sdyson bawrite(tbp); 82412767Sdyson ++start_lbn; 82512767Sdyson --len; 82612767Sdyson continue; 82712767Sdyson } 82812404Sdyson 82934630Sjulian /* 83034630Sjulian * We got a pbuf to make the cluster in. 83134630Sjulian * so initialise it. 83234630Sjulian */ 83312767Sdyson TAILQ_INIT(&bp->b_cluster.cluster_head); 83412767Sdyson bp->b_bcount = 0; 83575580Sphk bp->b_magic = tbp->b_magic; 83675580Sphk bp->b_op = tbp->b_op; 83712767Sdyson bp->b_bufsize = 0; 83812767Sdyson bp->b_npages = 0; 83984827Sjhb if (tbp->b_wcred != NOCRED) 84084827Sjhb bp->b_wcred = crhold(tbp->b_wcred); 8411541Srgrimes 84212767Sdyson bp->b_blkno = tbp->b_blkno; 84312767Sdyson bp->b_lblkno = tbp->b_lblkno; 84434611Sdyson bp->b_offset = tbp->b_offset; 84585272Sdillon 84685272Sdillon /* 84785272Sdillon * We are synthesizing a buffer out of vm_page_t's, but 84885272Sdillon * if the block size is not page aligned then the starting 84985272Sdillon * address may not be either. Inherit the b_data offset 85085272Sdillon * from the original buffer. 85185272Sdillon */ 85237467Sbde bp->b_data = (char *)((vm_offset_t)bp->b_data | 85337467Sbde ((vm_offset_t)tbp->b_data & PAGE_MASK)); 85458345Sphk bp->b_flags |= B_CLUSTER | 855115456Sphk (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 85612767Sdyson bp->b_iodone = cluster_callback; 85712767Sdyson pbgetvp(vp, bp); 85834630Sjulian /* 85934630Sjulian * From this location in the file, scan forward to see 86034630Sjulian * if there are buffers with adjacent data that need to 86134630Sjulian * be written as well. 86234630Sjulian */ 86312767Sdyson for (i = 0; i < len; ++i, ++start_lbn) { 86434630Sjulian if (i != 0) { /* If not the first buffer */ 86512767Sdyson s = splbio(); 86634630Sjulian /* 86734630Sjulian * If the adjacent data is not even in core it 86834630Sjulian * can't need to be written. 86934630Sjulian */ 870111886Sjeff VI_LOCK(vp); 871119521Sjeff if ((tbp = gbincore(vp, start_lbn)) == NULL || 872119521Sjeff (tbp->b_vflags & BV_BKGRDINPROG)) { 873111886Sjeff VI_UNLOCK(vp); 87412767Sdyson splx(s); 87512767Sdyson break; 87612767Sdyson } 8771541Srgrimes 87834630Sjulian /* 87934630Sjulian * If it IS in core, but has different 88071230Sdillon * characteristics, or is locked (which 88171230Sdillon * means it could be undergoing a background 88271230Sdillon * I/O or be in a weird state), then don't 88371230Sdillon * cluster with it. 88434630Sjulian */ 885111886Sjeff if (BUF_LOCK(tbp, 886111886Sjeff LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 887111886Sjeff VI_MTX(vp))) { 888111886Sjeff splx(s); 889111886Sjeff break; 890111886Sjeff } 891111886Sjeff 89248225Smckusick if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 89348225Smckusick B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 894111886Sjeff != (B_DELWRI | B_CLUSTEROK | 89548225Smckusick (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 896111886Sjeff tbp->b_wcred != bp->b_wcred) { 897112347Sjeff BUF_UNLOCK(tbp); 89812767Sdyson splx(s); 89912767Sdyson break; 90012767Sdyson } 90112767Sdyson 90234630Sjulian /* 90334630Sjulian * Check that the combined cluster 90434630Sjulian * would make sense with regard to pages 90534630Sjulian * and would not be too large 90634630Sjulian */ 90712767Sdyson if ((tbp->b_bcount != size) || 90834630Sjulian ((bp->b_blkno + (dbsize * i)) != 90934694Sdyson tbp->b_blkno) || 91034630Sjulian ((tbp->b_npages + bp->b_npages) > 91151797Sphk (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 91248225Smckusick BUF_UNLOCK(tbp); 91312767Sdyson splx(s); 91412767Sdyson break; 91512767Sdyson } 91634630Sjulian /* 91734630Sjulian * Ok, it's passed all the tests, 91834630Sjulian * so remove it from the free list 91934630Sjulian * and mark it busy. We will use it. 92034630Sjulian */ 92112767Sdyson bremfree(tbp); 92212767Sdyson tbp->b_flags &= ~B_DONE; 92312404Sdyson splx(s); 92434630Sjulian } /* end of code for non-first buffers only */ 92534266Sjulian /* check for latent dependencies to be handled */ 926115365Siedowse if ((LIST_FIRST(&tbp->b_dep)) != NULL) { 927115365Siedowse tbp->b_iocmd = BIO_WRITE; 92861724Sphk buf_start(tbp); 929115365Siedowse } 93034630Sjulian /* 93134630Sjulian * If the IO is via the VM then we do some 93285272Sdillon * special VM hackery (yuck). Since the buffer's 93385272Sdillon * block size may not be page-aligned it is possible 93485272Sdillon * for a page to be shared between two buffers. We 93585272Sdillon * have to get rid of the duplication when building 93685272Sdillon * the cluster. 93734630Sjulian */ 93813490Sdyson if (tbp->b_flags & B_VMIO) { 93932937Sdyson vm_page_t m; 94032937Sdyson 94134630Sjulian if (i != 0) { /* if not first buffer */ 94232937Sdyson for (j = 0; j < tbp->b_npages; j += 1) { 94332937Sdyson m = tbp->b_pages[j]; 94450701Stegge if (m->flags & PG_BUSY) { 94550701Stegge bqrelse(tbp); 94632937Sdyson goto finishcluster; 94750701Stegge } 94832937Sdyson } 94932937Sdyson } 950113745Salc if (tbp->b_object != NULL) 951113745Salc VM_OBJECT_LOCK(tbp->b_object); 952101019Salc vm_page_lock_queues(); 95313490Sdyson for (j = 0; j < tbp->b_npages; j += 1) { 95413490Sdyson m = tbp->b_pages[j]; 95538799Sdfr vm_page_io_start(m); 95638517Sdfr vm_object_pip_add(m->object, 1); 95713490Sdyson if ((bp->b_npages == 0) || 95834630Sjulian (bp->b_pages[bp->b_npages - 1] != m)) { 95913490Sdyson bp->b_pages[bp->b_npages] = m; 96013490Sdyson bp->b_npages++; 96113490Sdyson } 96212767Sdyson } 963101019Salc vm_page_unlock_queues(); 964113745Salc if (tbp->b_object != NULL) 965113745Salc VM_OBJECT_UNLOCK(tbp->b_object); 96612767Sdyson } 96712767Sdyson bp->b_bcount += size; 96812767Sdyson bp->b_bufsize += size; 9691541Srgrimes 97038299Sdfr s = splbio(); 97144679Sjulian bundirty(tbp); 97258934Sphk tbp->b_flags &= ~B_DONE; 97358934Sphk tbp->b_ioflags &= ~BIO_ERROR; 97412767Sdyson tbp->b_flags |= B_ASYNC; 97558345Sphk tbp->b_iocmd = BIO_WRITE; 97612767Sdyson reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 977103931Sjeff VI_LOCK(tbp->b_vp); 97812767Sdyson ++tbp->b_vp->v_numoutput; 979103931Sjeff VI_UNLOCK(tbp->b_vp); 98038299Sdfr splx(s); 98148333Speter BUF_KERNPROC(tbp); 98212767Sdyson TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 98312767Sdyson tbp, b_cluster.cluster_entry); 9841541Srgrimes } 98532937Sdyson finishcluster: 98612767Sdyson pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 98712767Sdyson (vm_page_t *) bp->b_pages, bp->b_npages); 98820054Sdyson if (bp->b_bufsize > bp->b_kvasize) 98937559Sbde panic( 99037559Sbde "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 99137559Sbde bp->b_bufsize, bp->b_kvasize); 99220054Sdyson bp->b_kvasize = bp->b_bufsize; 99312767Sdyson totalwritten += bp->b_bufsize; 99417304Sdyson bp->b_dirtyoff = 0; 99517304Sdyson bp->b_dirtyend = bp->b_bufsize; 99612767Sdyson bawrite(bp); 9971541Srgrimes 99812767Sdyson len -= i; 9991541Srgrimes } 100012767Sdyson return totalwritten; 10011541Srgrimes} 10021541Srgrimes 10031541Srgrimes/* 10041541Srgrimes * Collect together all the buffers in a cluster. 10051541Srgrimes * Plus add one additional buffer. 10061541Srgrimes */ 100712973Sbdestatic struct cluster_save * 10081541Srgrimescluster_collectbufs(vp, last_bp) 10091541Srgrimes struct vnode *vp; 10101541Srgrimes struct buf *last_bp; 10111541Srgrimes{ 10121541Srgrimes struct cluster_save *buflist; 101341205Smckusick struct buf *bp; 10145455Sdg daddr_t lbn; 10151541Srgrimes int i, len; 10161541Srgrimes 10171541Srgrimes len = vp->v_lastw - vp->v_cstart + 1; 10181541Srgrimes buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 1019111119Simp M_SEGMENT, M_WAITOK); 10201541Srgrimes buflist->bs_nchildren = 0; 10215455Sdg buflist->bs_children = (struct buf **) (buflist + 1); 102241205Smckusick for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 102341205Smckusick (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 102441205Smckusick buflist->bs_children[i] = bp; 102541205Smckusick if (bp->b_blkno == bp->b_lblkno) 102641205Smckusick VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 102741205Smckusick NULL, NULL); 102841205Smckusick } 102941529Smckusick buflist->bs_children[i] = bp = last_bp; 103041529Smckusick if (bp->b_blkno == bp->b_lblkno) 103141529Smckusick VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 103241529Smckusick NULL, NULL); 10331541Srgrimes buflist->bs_nchildren = i + 1; 10341541Srgrimes return (buflist); 10351541Srgrimes} 1036