vfs_bio.c revision 42014
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.188 1998/12/22 14:43:58 luoqi Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <miscfs/specfs/specdev.h> 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <vm/vm_prot.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_pageout.h> 46#include <vm/vm_page.h> 47#include <vm/vm_object.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_map.h> 50#include <sys/buf.h> 51#include <sys/mount.h> 52#include <sys/malloc.h> 53#include <sys/resourcevar.h> 54 55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57struct bio_ops bioops; /* I/O operation notification */ 58 59#if 0 /* replaced bu sched_sync */ 60static void vfs_update __P((void)); 61static struct proc *updateproc; 62static struct kproc_desc up_kp = { 63 "update", 64 vfs_update, 65 &updateproc 66}; 67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 68#endif 69 70struct buf *buf; /* buffer header pool */ 71struct swqueue bswlist; 72 73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 74 vm_offset_t to); 75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 78 vm_offset_t off, vm_offset_t size, 79 vm_page_t m); 80static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 81 int pageno, vm_page_t m); 82static void vfs_clean_pages(struct buf * bp); 83static void vfs_setdirty(struct buf *bp); 84static void vfs_vmio_release(struct buf *bp); 85static void flushdirtybuffers(int slpflag, int slptimeo); 86 87int needsbuffer; 88 89/* 90 * Internal update daemon, process 3 91 * The variable vfs_update_wakeup allows for internal syncs. 92 */ 93int vfs_update_wakeup; 94 95 96/* 97 * buffers base kva 98 */ 99 100/* 101 * bogus page -- for I/O to/from partially complete buffers 102 * this is a temporary solution to the problem, but it is not 103 * really that bad. it would be better to split the buffer 104 * for input in the case of buffers partially already in memory, 105 * but the code is intricate enough already. 106 */ 107vm_page_t bogus_page; 108static vm_offset_t bogus_offset; 109 110static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 111 bufmallocspace, maxbufmallocspace; 112int numdirtybuffers; 113static int lodirtybuffers, hidirtybuffers; 114static int numfreebuffers, lofreebuffers, hifreebuffers; 115static int kvafreespace; 116 117SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 118 &numdirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 120 &lodirtybuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 122 &hidirtybuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 124 &numfreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 126 &lofreebuffers, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 128 &hifreebuffers, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 130 &maxbufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 132 &bufspace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 134 &maxvmiobufspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 136 &vmiospace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 138 &maxbufmallocspace, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 140 &bufmallocspace, 0, ""); 141SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 142 &kvafreespace, 0, ""); 143 144static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 145struct bqueues bufqueues[BUFFER_QUEUES] = {0}; 146 147extern int vm_swap_size; 148 149#define BUF_MAXUSE 24 150 151#define VFS_BIO_NEED_ANY 1 152#define VFS_BIO_NEED_LOWLIMIT 2 153#define VFS_BIO_NEED_FREE 4 154 155/* 156 * Initialize buffer headers and related structures. 157 */ 158void 159bufinit() 160{ 161 struct buf *bp; 162 int i; 163 164 TAILQ_INIT(&bswlist); 165 LIST_INIT(&invalhash); 166 167 /* first, make a null hash table */ 168 for (i = 0; i < BUFHSZ; i++) 169 LIST_INIT(&bufhashtbl[i]); 170 171 /* next, make a null set of free lists */ 172 for (i = 0; i < BUFFER_QUEUES; i++) 173 TAILQ_INIT(&bufqueues[i]); 174 175 /* finally, initialize each buffer header and stick on empty q */ 176 for (i = 0; i < nbuf; i++) { 177 bp = &buf[i]; 178 bzero(bp, sizeof *bp); 179 bp->b_flags = B_INVAL; /* we're just an empty header */ 180 bp->b_dev = NODEV; 181 bp->b_rcred = NOCRED; 182 bp->b_wcred = NOCRED; 183 bp->b_qindex = QUEUE_EMPTY; 184 bp->b_xflags = 0; 185 LIST_INIT(&bp->b_dep); 186 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 187 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 188 } 189/* 190 * maxbufspace is currently calculated to support all filesystem blocks 191 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 192 * cache is still the same as it would be for 8K filesystems. This 193 * keeps the size of the buffer cache "in check" for big block filesystems. 194 */ 195 maxbufspace = (nbuf + 8) * DFLTBSIZE; 196/* 197 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 198 */ 199 maxvmiobufspace = 2 * maxbufspace / 3; 200/* 201 * Limit the amount of malloc memory since it is wired permanently into 202 * the kernel space. Even though this is accounted for in the buffer 203 * allocation, we don't want the malloced region to grow uncontrolled. 204 * The malloc scheme improves memory utilization significantly on average 205 * (small) directories. 206 */ 207 maxbufmallocspace = maxbufspace / 20; 208 209/* 210 * Remove the probability of deadlock conditions by limiting the 211 * number of dirty buffers. 212 */ 213 hidirtybuffers = nbuf / 8 + 20; 214 lodirtybuffers = nbuf / 16 + 10; 215 numdirtybuffers = 0; 216 lofreebuffers = nbuf / 18 + 5; 217 hifreebuffers = 2 * lofreebuffers; 218 numfreebuffers = nbuf; 219 kvafreespace = 0; 220 221 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 222 bogus_page = vm_page_alloc(kernel_object, 223 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 224 VM_ALLOC_NORMAL); 225 226} 227 228/* 229 * Free the kva allocation for a buffer 230 * Must be called only at splbio or higher, 231 * as this is the only locking for buffer_map. 232 */ 233static void 234bfreekva(struct buf * bp) 235{ 236 if (bp->b_kvasize == 0) 237 return; 238 239 vm_map_delete(buffer_map, 240 (vm_offset_t) bp->b_kvabase, 241 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 242 243 bp->b_kvasize = 0; 244 245} 246 247/* 248 * remove the buffer from the appropriate free list 249 */ 250void 251bremfree(struct buf * bp) 252{ 253 int s = splbio(); 254 255 if (bp->b_qindex != QUEUE_NONE) { 256 if (bp->b_qindex == QUEUE_EMPTY) { 257 kvafreespace -= bp->b_kvasize; 258 } 259 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 260 bp->b_qindex = QUEUE_NONE; 261 } else { 262#if !defined(MAX_PERF) 263 panic("bremfree: removing a buffer when not on a queue"); 264#endif 265 } 266 if ((bp->b_flags & B_INVAL) || 267 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 268 --numfreebuffers; 269 splx(s); 270} 271 272 273/* 274 * Get a buffer with the specified data. Look in the cache first. 275 */ 276int 277bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 278 struct buf ** bpp) 279{ 280 struct buf *bp; 281 282 bp = getblk(vp, blkno, size, 0, 0); 283 *bpp = bp; 284 285 /* if not found in cache, do some I/O */ 286 if ((bp->b_flags & B_CACHE) == 0) { 287 if (curproc != NULL) 288 curproc->p_stats->p_ru.ru_inblock++; 289 bp->b_flags |= B_READ; 290 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 291 if (bp->b_rcred == NOCRED) { 292 if (cred != NOCRED) 293 crhold(cred); 294 bp->b_rcred = cred; 295 } 296 vfs_busy_pages(bp, 0); 297 VOP_STRATEGY(vp, bp); 298 return (biowait(bp)); 299 } 300 return (0); 301} 302 303/* 304 * Operates like bread, but also starts asynchronous I/O on 305 * read-ahead blocks. 306 */ 307int 308breadn(struct vnode * vp, daddr_t blkno, int size, 309 daddr_t * rablkno, int *rabsize, 310 int cnt, struct ucred * cred, struct buf ** bpp) 311{ 312 struct buf *bp, *rabp; 313 int i; 314 int rv = 0, readwait = 0; 315 316 *bpp = bp = getblk(vp, blkno, size, 0, 0); 317 318 /* if not found in cache, do some I/O */ 319 if ((bp->b_flags & B_CACHE) == 0) { 320 if (curproc != NULL) 321 curproc->p_stats->p_ru.ru_inblock++; 322 bp->b_flags |= B_READ; 323 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 324 if (bp->b_rcred == NOCRED) { 325 if (cred != NOCRED) 326 crhold(cred); 327 bp->b_rcred = cred; 328 } 329 vfs_busy_pages(bp, 0); 330 VOP_STRATEGY(vp, bp); 331 ++readwait; 332 } 333 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 334 if (inmem(vp, *rablkno)) 335 continue; 336 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 337 338 if ((rabp->b_flags & B_CACHE) == 0) { 339 if (curproc != NULL) 340 curproc->p_stats->p_ru.ru_inblock++; 341 rabp->b_flags |= B_READ | B_ASYNC; 342 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 343 if (rabp->b_rcred == NOCRED) { 344 if (cred != NOCRED) 345 crhold(cred); 346 rabp->b_rcred = cred; 347 } 348 vfs_busy_pages(rabp, 0); 349 VOP_STRATEGY(vp, rabp); 350 } else { 351 brelse(rabp); 352 } 353 } 354 355 if (readwait) { 356 rv = biowait(bp); 357 } 358 return (rv); 359} 360 361/* 362 * Write, release buffer on completion. (Done by iodone 363 * if async.) 364 */ 365int 366bwrite(struct buf * bp) 367{ 368 int oldflags, s; 369 struct vnode *vp; 370 struct mount *mp; 371 372 373 if (bp->b_flags & B_INVAL) { 374 brelse(bp); 375 return (0); 376 } 377 378 oldflags = bp->b_flags; 379 380#if !defined(MAX_PERF) 381 if ((bp->b_flags & B_BUSY) == 0) 382 panic("bwrite: buffer is not busy???"); 383#endif 384 385 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 386 bp->b_flags |= B_WRITEINPROG; 387 388 s = splbio(); 389 if ((oldflags & B_DELWRI) == B_DELWRI) { 390 --numdirtybuffers; 391 reassignbuf(bp, bp->b_vp); 392 } 393 394 bp->b_vp->v_numoutput++; 395 vfs_busy_pages(bp, 1); 396 if (curproc != NULL) 397 curproc->p_stats->p_ru.ru_oublock++; 398 splx(s); 399 VOP_STRATEGY(bp->b_vp, bp); 400 401 /* 402 * Collect statistics on synchronous and asynchronous writes. 403 * Writes to block devices are charged to their associated 404 * filesystem (if any). 405 */ 406 if ((vp = bp->b_vp) != NULL) { 407 if (vp->v_type == VBLK) 408 mp = vp->v_specmountpoint; 409 else 410 mp = vp->v_mount; 411 if (mp != NULL) 412 if ((oldflags & B_ASYNC) == 0) 413 mp->mnt_stat.f_syncwrites++; 414 else 415 mp->mnt_stat.f_asyncwrites++; 416 } 417 418 if ((oldflags & B_ASYNC) == 0) { 419 int rtval = biowait(bp); 420 brelse(bp); 421 return (rtval); 422 } 423 return (0); 424} 425 426void 427vfs_bio_need_satisfy(void) { 428 ++numfreebuffers; 429 if (!needsbuffer) 430 return; 431 if (numdirtybuffers < lodirtybuffers) { 432 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 433 } else { 434 needsbuffer &= ~VFS_BIO_NEED_ANY; 435 } 436 if (numfreebuffers >= hifreebuffers) { 437 needsbuffer &= ~VFS_BIO_NEED_FREE; 438 } 439 wakeup(&needsbuffer); 440} 441 442/* 443 * Delayed write. (Buffer is marked dirty). 444 */ 445void 446bdwrite(struct buf * bp) 447{ 448 struct vnode *vp; 449 450#if !defined(MAX_PERF) 451 if ((bp->b_flags & B_BUSY) == 0) { 452 panic("bdwrite: buffer is not busy"); 453 } 454#endif 455 456 if (bp->b_flags & B_INVAL) { 457 brelse(bp); 458 return; 459 } 460 bp->b_flags &= ~(B_READ|B_RELBUF); 461 if ((bp->b_flags & B_DELWRI) == 0) { 462 bp->b_flags |= B_DONE | B_DELWRI; 463 reassignbuf(bp, bp->b_vp); 464 ++numdirtybuffers; 465 } 466 467 /* 468 * This bmap keeps the system from needing to do the bmap later, 469 * perhaps when the system is attempting to do a sync. Since it 470 * is likely that the indirect block -- or whatever other datastructure 471 * that the filesystem needs is still in memory now, it is a good 472 * thing to do this. Note also, that if the pageout daemon is 473 * requesting a sync -- there might not be enough memory to do 474 * the bmap then... So, this is important to do. 475 */ 476 if (bp->b_lblkno == bp->b_blkno) { 477 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 478 } 479 480 /* 481 * Set the *dirty* buffer range based upon the VM system dirty pages. 482 */ 483 vfs_setdirty(bp); 484 485 /* 486 * We need to do this here to satisfy the vnode_pager and the 487 * pageout daemon, so that it thinks that the pages have been 488 * "cleaned". Note that since the pages are in a delayed write 489 * buffer -- the VFS layer "will" see that the pages get written 490 * out on the next sync, or perhaps the cluster will be completed. 491 */ 492 vfs_clean_pages(bp); 493 bqrelse(bp); 494 495 /* 496 * XXX The soft dependency code is not prepared to 497 * have I/O done when a bdwrite is requested. For 498 * now we just let the write be delayed if it is 499 * requested by the soft dependency code. 500 */ 501 if ((vp = bp->b_vp) && 502 ((vp->v_type == VBLK && vp->v_specmountpoint && 503 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 504 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)))) 505 return; 506 507 if (numdirtybuffers >= hidirtybuffers) 508 flushdirtybuffers(0, 0); 509 510 return; 511} 512 513 514/* 515 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 516 * Check how this compares with vfs_setdirty(); XXX [JRE] 517 */ 518void 519bdirty(bp) 520 struct buf *bp; 521{ 522 523 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */ 524 if ((bp->b_flags & B_DELWRI) == 0) { 525 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */ 526 reassignbuf(bp, bp->b_vp); 527 ++numdirtybuffers; 528 } 529} 530 531/* 532 * Asynchronous write. 533 * Start output on a buffer, but do not wait for it to complete. 534 * The buffer is released when the output completes. 535 */ 536void 537bawrite(struct buf * bp) 538{ 539 bp->b_flags |= B_ASYNC; 540 (void) VOP_BWRITE(bp); 541} 542 543/* 544 * Ordered write. 545 * Start output on a buffer, and flag it so that the device will write 546 * it in the order it was queued. The buffer is released when the output 547 * completes. 548 */ 549int 550bowrite(struct buf * bp) 551{ 552 bp->b_flags |= B_ORDERED|B_ASYNC; 553 return (VOP_BWRITE(bp)); 554} 555 556/* 557 * Release a buffer. 558 */ 559void 560brelse(struct buf * bp) 561{ 562 int s; 563 564 if (bp->b_flags & B_CLUSTER) { 565 relpbuf(bp); 566 return; 567 } 568 569 s = splbio(); 570 571 /* anyone need this block? */ 572 if (bp->b_flags & B_WANTED) { 573 bp->b_flags &= ~(B_WANTED | B_AGE); 574 wakeup(bp); 575 } 576 577 if (bp->b_flags & B_LOCKED) 578 bp->b_flags &= ~B_ERROR; 579 580 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 581 (bp->b_bufsize <= 0)) { 582 bp->b_flags |= B_INVAL; 583 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 584 (*bioops.io_deallocate)(bp); 585 if (bp->b_flags & B_DELWRI) 586 --numdirtybuffers; 587 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 588 if ((bp->b_flags & B_VMIO) == 0) { 589 if (bp->b_bufsize) 590 allocbuf(bp, 0); 591 if (bp->b_vp) 592 brelvp(bp); 593 } 594 } 595 596 /* 597 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 598 * is called with B_DELWRI set, the underlying pages may wind up 599 * getting freed causing a previous write (bdwrite()) to get 'lost' 600 * because pages associated with a B_DELWRI bp are marked clean. 601 * 602 * We still allow the B_INVAL case to call vfs_vmio_release(), even 603 * if B_DELWRI is set. 604 */ 605 606 if (bp->b_flags & B_DELWRI) 607 bp->b_flags &= ~B_RELBUF; 608 609 /* 610 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 611 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 612 * but the VM object is kept around. The B_NOCACHE flag is used to 613 * invalidate the pages in the VM object. 614 * 615 * The b_{validoff,validend,dirtyoff,dirtyend} values are relative 616 * to b_offset and currently have byte granularity, whereas the 617 * valid flags in the vm_pages have only DEV_BSIZE resolution. 618 * The byte resolution fields are used to avoid unnecessary re-reads 619 * of the buffer but the code really needs to be genericized so 620 * other filesystem modules can take advantage of these fields. 621 * 622 * XXX this seems to cause performance problems. 623 */ 624 if ((bp->b_flags & B_VMIO) 625 && !(bp->b_vp->v_tag == VT_NFS && 626 bp->b_vp->v_type != VBLK && 627 (bp->b_flags & B_DELWRI) != 0) 628#ifdef notdef 629 && (bp->b_vp->v_tag != VT_NFS 630 || bp->b_vp->v_type == VBLK 631 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 632 || bp->b_validend == 0 633 || (bp->b_validoff == 0 634 && bp->b_validend == bp->b_bufsize)) 635#endif 636 ) { 637 638 int i, j, resid; 639 vm_page_t m; 640 off_t foff; 641 vm_pindex_t poff; 642 vm_object_t obj; 643 struct vnode *vp; 644 645 vp = bp->b_vp; 646 647 /* 648 * Get the base offset and length of the buffer. Note that 649 * for block sizes that are less then PAGE_SIZE, the b_data 650 * base of the buffer does not represent exactly b_offset and 651 * neither b_offset nor b_size are necessarily page aligned. 652 * Instead, the starting position of b_offset is: 653 * 654 * b_data + (b_offset & PAGE_MASK) 655 * 656 * block sizes less then DEV_BSIZE (usually 512) are not 657 * supported due to the page granularity bits (m->valid, 658 * m->dirty, etc...). 659 * 660 * See man buf(9) for more information 661 */ 662 663 resid = bp->b_bufsize; 664 foff = bp->b_offset; 665 666 for (i = 0; i < bp->b_npages; i++) { 667 m = bp->b_pages[i]; 668 vm_page_flag_clear(m, PG_ZERO); 669 if (m == bogus_page) { 670 671 obj = (vm_object_t) vp->v_object; 672 poff = OFF_TO_IDX(bp->b_offset); 673 674 for (j = i; j < bp->b_npages; j++) { 675 m = bp->b_pages[j]; 676 if (m == bogus_page) { 677 m = vm_page_lookup(obj, poff + j); 678#if !defined(MAX_PERF) 679 if (!m) { 680 panic("brelse: page missing\n"); 681 } 682#endif 683 bp->b_pages[j] = m; 684 } 685 } 686 687 if ((bp->b_flags & B_INVAL) == 0) { 688 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 689 } 690 } 691 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 692 int poffset = foff & PAGE_MASK; 693 int presid = resid > (PAGE_SIZE - poffset) ? 694 (PAGE_SIZE - poffset) : resid; 695#ifdef DIAGNOSTIC 696 if (presid < 0) 697 panic("brelse: extra page"); 698#endif 699 vm_page_set_invalid(m, poffset, presid); 700 } 701 resid -= PAGE_SIZE - (foff & PAGE_MASK); 702 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 703 } 704 705 if (bp->b_flags & (B_INVAL | B_RELBUF)) 706 vfs_vmio_release(bp); 707 708 } else if (bp->b_flags & B_VMIO) { 709 710 if (bp->b_flags & (B_INVAL | B_RELBUF)) 711 vfs_vmio_release(bp); 712 713 } 714 715#if !defined(MAX_PERF) 716 if (bp->b_qindex != QUEUE_NONE) 717 panic("brelse: free buffer onto another queue???"); 718#endif 719 720 /* enqueue */ 721 /* buffers with no memory */ 722 if (bp->b_bufsize == 0) { 723 bp->b_flags |= B_INVAL; 724 bp->b_qindex = QUEUE_EMPTY; 725 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 726 LIST_REMOVE(bp, b_hash); 727 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 728 bp->b_dev = NODEV; 729 kvafreespace += bp->b_kvasize; 730 731 /* buffers with junk contents */ 732 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 733 bp->b_flags |= B_INVAL; 734 bp->b_qindex = QUEUE_AGE; 735 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 736 LIST_REMOVE(bp, b_hash); 737 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 738 bp->b_dev = NODEV; 739 740 /* buffers that are locked */ 741 } else if (bp->b_flags & B_LOCKED) { 742 bp->b_qindex = QUEUE_LOCKED; 743 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 744 745 /* buffers with stale but valid contents */ 746 } else if (bp->b_flags & B_AGE) { 747 bp->b_qindex = QUEUE_AGE; 748 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 749 750 /* buffers with valid and quite potentially reuseable contents */ 751 } else { 752 bp->b_qindex = QUEUE_LRU; 753 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 754 } 755 756 if ((bp->b_flags & B_INVAL) || 757 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 758 if (bp->b_flags & B_DELWRI) { 759 --numdirtybuffers; 760 bp->b_flags &= ~B_DELWRI; 761 } 762 vfs_bio_need_satisfy(); 763 } 764 765 /* unlock */ 766 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 767 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 768 splx(s); 769} 770 771/* 772 * Release a buffer. 773 */ 774void 775bqrelse(struct buf * bp) 776{ 777 int s; 778 779 s = splbio(); 780 781 /* anyone need this block? */ 782 if (bp->b_flags & B_WANTED) { 783 bp->b_flags &= ~(B_WANTED | B_AGE); 784 wakeup(bp); 785 } 786 787#if !defined(MAX_PERF) 788 if (bp->b_qindex != QUEUE_NONE) 789 panic("bqrelse: free buffer onto another queue???"); 790#endif 791 792 if (bp->b_flags & B_LOCKED) { 793 bp->b_flags &= ~B_ERROR; 794 bp->b_qindex = QUEUE_LOCKED; 795 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 796 /* buffers with stale but valid contents */ 797 } else { 798 bp->b_qindex = QUEUE_LRU; 799 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 800 } 801 802 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 803 vfs_bio_need_satisfy(); 804 } 805 806 /* unlock */ 807 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 808 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 809 splx(s); 810} 811 812static void 813vfs_vmio_release(bp) 814 struct buf *bp; 815{ 816 int i, s; 817 vm_page_t m; 818 819 s = splvm(); 820 for (i = 0; i < bp->b_npages; i++) { 821 m = bp->b_pages[i]; 822 bp->b_pages[i] = NULL; 823 /* 824 * In order to keep page LRU ordering consistent, put 825 * everything on the inactive queue. 826 */ 827 vm_page_unwire(m, 0); 828 /* 829 * We don't mess with busy pages, it is 830 * the responsibility of the process that 831 * busied the pages to deal with them. 832 */ 833 if ((m->flags & PG_BUSY) || (m->busy != 0)) 834 continue; 835 836 if (m->wire_count == 0) { 837 vm_page_flag_clear(m, PG_ZERO); 838 /* 839 * Might as well free the page if we can and it has 840 * no valid data. 841 */ 842 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 843 vm_page_busy(m); 844 vm_page_protect(m, VM_PROT_NONE); 845 vm_page_free(m); 846 } 847 } 848 } 849 splx(s); 850 bufspace -= bp->b_bufsize; 851 vmiospace -= bp->b_bufsize; 852 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 853 bp->b_npages = 0; 854 bp->b_bufsize = 0; 855 bp->b_flags &= ~B_VMIO; 856 if (bp->b_vp) 857 brelvp(bp); 858} 859 860/* 861 * Check to see if a block is currently memory resident. 862 */ 863struct buf * 864gbincore(struct vnode * vp, daddr_t blkno) 865{ 866 struct buf *bp; 867 struct bufhashhdr *bh; 868 869 bh = BUFHASH(vp, blkno); 870 bp = bh->lh_first; 871 872 /* Search hash chain */ 873 while (bp != NULL) { 874 /* hit */ 875 if (bp->b_vp == vp && bp->b_lblkno == blkno && 876 (bp->b_flags & B_INVAL) == 0) { 877 break; 878 } 879 bp = bp->b_hash.le_next; 880 } 881 return (bp); 882} 883 884/* 885 * this routine implements clustered async writes for 886 * clearing out B_DELWRI buffers... This is much better 887 * than the old way of writing only one buffer at a time. 888 */ 889int 890vfs_bio_awrite(struct buf * bp) 891{ 892 int i; 893 daddr_t lblkno = bp->b_lblkno; 894 struct vnode *vp = bp->b_vp; 895 int s; 896 int ncl; 897 struct buf *bpa; 898 int nwritten; 899 int size; 900 int maxcl; 901 902 s = splbio(); 903 /* 904 * right now we support clustered writing only to regular files 905 */ 906 if ((vp->v_type == VREG) && 907 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 908 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 909 910 size = vp->v_mount->mnt_stat.f_iosize; 911 maxcl = MAXPHYS / size; 912 913 for (i = 1; i < maxcl; i++) { 914 if ((bpa = gbincore(vp, lblkno + i)) && 915 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 916 (B_DELWRI | B_CLUSTEROK)) && 917 (bpa->b_bufsize == size)) { 918 if ((bpa->b_blkno == bpa->b_lblkno) || 919 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 920 break; 921 } else { 922 break; 923 } 924 } 925 ncl = i; 926 /* 927 * this is a possible cluster write 928 */ 929 if (ncl != 1) { 930 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 931 splx(s); 932 return nwritten; 933 } 934 } 935 936 bremfree(bp); 937 bp->b_flags |= B_BUSY | B_ASYNC; 938 939 splx(s); 940 /* 941 * default (old) behavior, writing out only one block 942 */ 943 nwritten = bp->b_bufsize; 944 (void) VOP_BWRITE(bp); 945 return nwritten; 946} 947 948 949/* 950 * Find a buffer header which is available for use. 951 */ 952static struct buf * 953getnewbuf(struct vnode *vp, daddr_t blkno, 954 int slpflag, int slptimeo, int size, int maxsize) 955{ 956 struct buf *bp, *bp1; 957 int nbyteswritten = 0; 958 vm_offset_t addr; 959 static int writerecursion = 0; 960 961start: 962 if (bufspace >= maxbufspace) 963 goto trytofreespace; 964 965 /* can we constitute a new buffer? */ 966 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 967#if !defined(MAX_PERF) 968 if (bp->b_qindex != QUEUE_EMPTY) 969 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 970 bp->b_qindex); 971#endif 972 bp->b_flags |= B_BUSY; 973 bremfree(bp); 974 goto fillbuf; 975 } 976trytofreespace: 977 /* 978 * We keep the file I/O from hogging metadata I/O 979 * This is desirable because file data is cached in the 980 * VM/Buffer cache even if a buffer is freed. 981 */ 982 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 983#if !defined(MAX_PERF) 984 if (bp->b_qindex != QUEUE_AGE) 985 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 986 bp->b_qindex); 987#endif 988 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 989#if !defined(MAX_PERF) 990 if (bp->b_qindex != QUEUE_LRU) 991 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 992 bp->b_qindex); 993#endif 994 } 995 if (!bp) { 996 /* wait for a free buffer of any kind */ 997 needsbuffer |= VFS_BIO_NEED_ANY; 998 do 999 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1000 slptimeo); 1001 while (needsbuffer & VFS_BIO_NEED_ANY); 1002 return (0); 1003 } 1004 1005#if defined(DIAGNOSTIC) 1006 if (bp->b_flags & B_BUSY) { 1007 panic("getnewbuf: busy buffer on free list\n"); 1008 } 1009#endif 1010 1011 /* 1012 * We are fairly aggressive about freeing VMIO buffers, but since 1013 * the buffering is intact without buffer headers, there is not 1014 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1015 */ 1016 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1017 if ((bp->b_flags & B_VMIO) == 0 || 1018 (vmiospace < maxvmiobufspace)) { 1019 --bp->b_usecount; 1020 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1021 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1022 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1023 goto start; 1024 } 1025 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1026 } 1027 } 1028 1029 1030 /* if we are a delayed write, convert to an async write */ 1031 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1032 1033 /* 1034 * If our delayed write is likely to be used soon, then 1035 * recycle back onto the LRU queue. 1036 */ 1037 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1038 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1039 1040 if (bp->b_usecount > 0) { 1041 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1042 1043 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1044 1045 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1046 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1047 bp->b_usecount--; 1048 goto start; 1049 } 1050 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1051 } 1052 } 1053 } 1054 1055 /* 1056 * Certain layered filesystems can recursively re-enter the vfs_bio 1057 * code, due to delayed writes. This helps keep the system from 1058 * deadlocking. 1059 */ 1060 if (writerecursion > 0) { 1061 if (writerecursion > 5) { 1062 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1063 while (bp) { 1064 if ((bp->b_flags & B_DELWRI) == 0) 1065 break; 1066 bp = TAILQ_NEXT(bp, b_freelist); 1067 } 1068 if (bp == NULL) { 1069 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1070 while (bp) { 1071 if ((bp->b_flags & B_DELWRI) == 0) 1072 break; 1073 bp = TAILQ_NEXT(bp, b_freelist); 1074 } 1075 } 1076 if (bp == NULL) 1077 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1078 } else { 1079 bremfree(bp); 1080 bp->b_flags |= B_BUSY | B_AGE | B_ASYNC; 1081 nbyteswritten += bp->b_bufsize; 1082 ++writerecursion; 1083 VOP_BWRITE(bp); 1084 --writerecursion; 1085 if (!slpflag && !slptimeo) { 1086 return (0); 1087 } 1088 goto start; 1089 } 1090 } else { 1091 ++writerecursion; 1092 nbyteswritten += vfs_bio_awrite(bp); 1093 --writerecursion; 1094 if (!slpflag && !slptimeo) { 1095 return (0); 1096 } 1097 goto start; 1098 } 1099 } 1100 1101 if (bp->b_flags & B_WANTED) { 1102 bp->b_flags &= ~B_WANTED; 1103 wakeup(bp); 1104 } 1105 bremfree(bp); 1106 bp->b_flags |= B_BUSY; 1107 1108 if (bp->b_flags & B_VMIO) { 1109 bp->b_flags &= ~B_ASYNC; 1110 vfs_vmio_release(bp); 1111 } 1112 1113 if (bp->b_vp) 1114 brelvp(bp); 1115 1116fillbuf: 1117 1118 /* we are not free, nor do we contain interesting data */ 1119 if (bp->b_rcred != NOCRED) { 1120 crfree(bp->b_rcred); 1121 bp->b_rcred = NOCRED; 1122 } 1123 if (bp->b_wcred != NOCRED) { 1124 crfree(bp->b_wcred); 1125 bp->b_wcred = NOCRED; 1126 } 1127 if (LIST_FIRST(&bp->b_dep) != NULL && 1128 bioops.io_deallocate) 1129 (*bioops.io_deallocate)(bp); 1130 1131 LIST_REMOVE(bp, b_hash); 1132 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1133 if (bp->b_bufsize) { 1134 allocbuf(bp, 0); 1135 } 1136 bp->b_flags = B_BUSY; 1137 bp->b_dev = NODEV; 1138 bp->b_vp = NULL; 1139 bp->b_blkno = bp->b_lblkno = 0; 1140 bp->b_offset = NOOFFSET; 1141 bp->b_iodone = 0; 1142 bp->b_error = 0; 1143 bp->b_resid = 0; 1144 bp->b_bcount = 0; 1145 bp->b_npages = 0; 1146 bp->b_dirtyoff = bp->b_dirtyend = 0; 1147 bp->b_validoff = bp->b_validend = 0; 1148 bp->b_usecount = 5; 1149 /* Here, not kern_physio.c, is where this should be done*/ 1150 LIST_INIT(&bp->b_dep); 1151 1152 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1153 1154 /* 1155 * we assume that buffer_map is not at address 0 1156 */ 1157 addr = 0; 1158 if (maxsize != bp->b_kvasize) { 1159 bfreekva(bp); 1160 1161findkvaspace: 1162 /* 1163 * See if we have buffer kva space 1164 */ 1165 if (vm_map_findspace(buffer_map, 1166 vm_map_min(buffer_map), maxsize, &addr)) { 1167 if (kvafreespace > 0) { 1168 int totfree = 0, freed; 1169 do { 1170 freed = 0; 1171 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1172 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1173 if (bp1->b_kvasize != 0) { 1174 totfree += bp1->b_kvasize; 1175 freed = bp1->b_kvasize; 1176 bremfree(bp1); 1177 bfreekva(bp1); 1178 brelse(bp1); 1179 break; 1180 } 1181 } 1182 } while (freed); 1183 /* 1184 * if we found free space, then retry with the same buffer. 1185 */ 1186 if (totfree) 1187 goto findkvaspace; 1188 } 1189 bp->b_flags |= B_INVAL; 1190 brelse(bp); 1191 goto trytofreespace; 1192 } 1193 } 1194 1195 /* 1196 * See if we are below are allocated minimum 1197 */ 1198 if (bufspace >= (maxbufspace + nbyteswritten)) { 1199 bp->b_flags |= B_INVAL; 1200 brelse(bp); 1201 goto trytofreespace; 1202 } 1203 1204 /* 1205 * create a map entry for the buffer -- in essence 1206 * reserving the kva space. 1207 */ 1208 if (addr) { 1209 vm_map_insert(buffer_map, NULL, 0, 1210 addr, addr + maxsize, 1211 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1212 1213 bp->b_kvabase = (caddr_t) addr; 1214 bp->b_kvasize = maxsize; 1215 } 1216 bp->b_data = bp->b_kvabase; 1217 1218 return (bp); 1219} 1220 1221static void 1222waitfreebuffers(int slpflag, int slptimeo) { 1223 while (numfreebuffers < hifreebuffers) { 1224 flushdirtybuffers(slpflag, slptimeo); 1225 if (numfreebuffers < hifreebuffers) 1226 break; 1227 needsbuffer |= VFS_BIO_NEED_FREE; 1228 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1229 break; 1230 } 1231} 1232 1233static void 1234flushdirtybuffers(int slpflag, int slptimeo) { 1235 int s; 1236 static pid_t flushing = 0; 1237 1238 s = splbio(); 1239 1240 if (flushing) { 1241 if (flushing == curproc->p_pid) { 1242 splx(s); 1243 return; 1244 } 1245 while (flushing) { 1246 if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) { 1247 splx(s); 1248 return; 1249 } 1250 } 1251 } 1252 flushing = curproc->p_pid; 1253 1254 while (numdirtybuffers > lodirtybuffers) { 1255 struct buf *bp; 1256 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1257 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1258 if (bp == NULL) 1259 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1260 1261 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1262 bp = TAILQ_NEXT(bp, b_freelist); 1263 } 1264 1265 if (bp) { 1266 vfs_bio_awrite(bp); 1267 continue; 1268 } 1269 break; 1270 } 1271 1272 flushing = 0; 1273 wakeup(&flushing); 1274 splx(s); 1275} 1276 1277/* 1278 * Check to see if a block is currently memory resident. 1279 */ 1280struct buf * 1281incore(struct vnode * vp, daddr_t blkno) 1282{ 1283 struct buf *bp; 1284 1285 int s = splbio(); 1286 bp = gbincore(vp, blkno); 1287 splx(s); 1288 return (bp); 1289} 1290 1291/* 1292 * Returns true if no I/O is needed to access the 1293 * associated VM object. This is like incore except 1294 * it also hunts around in the VM system for the data. 1295 */ 1296 1297int 1298inmem(struct vnode * vp, daddr_t blkno) 1299{ 1300 vm_object_t obj; 1301 vm_offset_t toff, tinc, size; 1302 vm_page_t m; 1303 vm_ooffset_t off; 1304 1305 if (incore(vp, blkno)) 1306 return 1; 1307 if (vp->v_mount == NULL) 1308 return 0; 1309 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1310 return 0; 1311 1312 obj = vp->v_object; 1313 size = PAGE_SIZE; 1314 if (size > vp->v_mount->mnt_stat.f_iosize) 1315 size = vp->v_mount->mnt_stat.f_iosize; 1316 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1317 1318 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1319 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1320 if (!m) 1321 return 0; 1322 tinc = size; 1323 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1324 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1325 if (vm_page_is_valid(m, 1326 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1327 return 0; 1328 } 1329 return 1; 1330} 1331 1332/* 1333 * now we set the dirty range for the buffer -- 1334 * for NFS -- if the file is mapped and pages have 1335 * been written to, let it know. We want the 1336 * entire range of the buffer to be marked dirty if 1337 * any of the pages have been written to for consistancy 1338 * with the b_validoff, b_validend set in the nfs write 1339 * code, and used by the nfs read code. 1340 */ 1341static void 1342vfs_setdirty(struct buf *bp) { 1343 int i; 1344 vm_object_t object; 1345 vm_offset_t boffset, offset; 1346 /* 1347 * We qualify the scan for modified pages on whether the 1348 * object has been flushed yet. The OBJ_WRITEABLE flag 1349 * is not cleared simply by protecting pages off. 1350 */ 1351 if ((bp->b_flags & B_VMIO) && 1352 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1353 /* 1354 * test the pages to see if they have been modified directly 1355 * by users through the VM system. 1356 */ 1357 for (i = 0; i < bp->b_npages; i++) { 1358 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1359 vm_page_test_dirty(bp->b_pages[i]); 1360 } 1361 1362 /* 1363 * scan forwards for the first page modified 1364 */ 1365 for (i = 0; i < bp->b_npages; i++) { 1366 if (bp->b_pages[i]->dirty) { 1367 break; 1368 } 1369 } 1370 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1371 if (boffset < bp->b_dirtyoff) { 1372 bp->b_dirtyoff = max(boffset, 0); 1373 } 1374 1375 /* 1376 * scan backwards for the last page modified 1377 */ 1378 for (i = bp->b_npages - 1; i >= 0; --i) { 1379 if (bp->b_pages[i]->dirty) { 1380 break; 1381 } 1382 } 1383 boffset = (i + 1); 1384#if 0 1385 offset = boffset + bp->b_pages[0]->pindex; 1386 if (offset >= object->size) 1387 boffset = object->size - bp->b_pages[0]->pindex; 1388#endif 1389 boffset = (boffset << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 1390 if (bp->b_dirtyend < boffset) 1391 bp->b_dirtyend = min(boffset, bp->b_bufsize); 1392 } 1393} 1394 1395/* 1396 * Get a block given a specified block and offset into a file/device. 1397 */ 1398struct buf * 1399getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1400{ 1401 struct buf *bp; 1402 int i, s; 1403 struct bufhashhdr *bh; 1404 int maxsize; 1405 1406#if !defined(MAX_PERF) 1407 if (size > MAXBSIZE) 1408 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1409#endif 1410 1411 s = splbio(); 1412loop: 1413 if (numfreebuffers < lofreebuffers) { 1414 waitfreebuffers(slpflag, slptimeo); 1415 } 1416 1417 if ((bp = gbincore(vp, blkno))) { 1418 if (bp->b_flags & B_BUSY) { 1419 1420 bp->b_flags |= B_WANTED; 1421 if (bp->b_usecount < BUF_MAXUSE) 1422 ++bp->b_usecount; 1423 1424 if (!tsleep(bp, 1425 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) { 1426 goto loop; 1427 } 1428 1429 splx(s); 1430 return (struct buf *) NULL; 1431 } 1432 bp->b_flags |= B_BUSY | B_CACHE; 1433 bremfree(bp); 1434 1435 /* 1436 * check for size inconsistancies (note that they shouldn't 1437 * happen but do when filesystems don't handle the size changes 1438 * correctly.) We are conservative on metadata and don't just 1439 * extend the buffer but write (if needed) and re-constitute it. 1440 */ 1441 1442 if (bp->b_bcount != size) { 1443 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1444 allocbuf(bp, size); 1445 } else { 1446 if (bp->b_flags & B_DELWRI) { 1447 bp->b_flags |= B_NOCACHE; 1448 VOP_BWRITE(bp); 1449 } else { 1450 if ((bp->b_flags & B_VMIO) && 1451 (LIST_FIRST(&bp->b_dep) == NULL)) { 1452 bp->b_flags |= B_RELBUF; 1453 brelse(bp); 1454 } else { 1455 bp->b_flags |= B_NOCACHE; 1456 VOP_BWRITE(bp); 1457 } 1458 } 1459 goto loop; 1460 } 1461 } 1462 1463#ifdef DIAGNOSTIC 1464 if (bp->b_offset == NOOFFSET) 1465 panic("getblk: no buffer offset"); 1466#endif 1467 1468 /* 1469 * Check that the constituted buffer really deserves for the 1470 * B_CACHE bit to be set. B_VMIO type buffers might not 1471 * contain fully valid pages. Normal (old-style) buffers 1472 * should be fully valid. 1473 */ 1474 if (bp->b_flags & B_VMIO) { 1475 int checksize = bp->b_bufsize; 1476 int poffset = bp->b_offset & PAGE_MASK; 1477 int resid; 1478 for (i = 0; i < bp->b_npages; i++) { 1479 resid = (checksize > (PAGE_SIZE - poffset)) ? 1480 (PAGE_SIZE - poffset) : checksize; 1481 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) { 1482 bp->b_flags &= ~(B_CACHE | B_DONE); 1483 break; 1484 } 1485 checksize -= resid; 1486 poffset = 0; 1487 } 1488 } 1489 1490 if (bp->b_usecount < BUF_MAXUSE) 1491 ++bp->b_usecount; 1492 splx(s); 1493 return (bp); 1494 } else { 1495 int bsize, maxsize, vmio; 1496 off_t offset; 1497 1498 if (vp->v_type == VBLK) 1499 bsize = DEV_BSIZE; 1500 else if (vp->v_mountedhere) 1501 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 1502 else if (vp->v_mount) 1503 bsize = vp->v_mount->mnt_stat.f_iosize; 1504 else 1505 bsize = size; 1506 1507 offset = (off_t)blkno * bsize; 1508 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF); 1509 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 1510 maxsize = imax(maxsize, bsize); 1511 1512 if ((bp = getnewbuf(vp, blkno, 1513 slpflag, slptimeo, size, maxsize)) == 0) { 1514 if (slpflag || slptimeo) { 1515 splx(s); 1516 return NULL; 1517 } 1518 goto loop; 1519 } 1520 1521 /* 1522 * This code is used to make sure that a buffer is not 1523 * created while the getnewbuf routine is blocked. 1524 * Normally the vnode is locked so this isn't a problem. 1525 * VBLK type I/O requests, however, don't lock the vnode. 1526 */ 1527 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE && gbincore(vp, blkno)) { 1528 bp->b_flags |= B_INVAL; 1529 brelse(bp); 1530 goto loop; 1531 } 1532 1533 /* 1534 * Insert the buffer into the hash, so that it can 1535 * be found by incore. 1536 */ 1537 bp->b_blkno = bp->b_lblkno = blkno; 1538 bp->b_offset = offset; 1539 1540 bgetvp(vp, bp); 1541 LIST_REMOVE(bp, b_hash); 1542 bh = BUFHASH(vp, blkno); 1543 LIST_INSERT_HEAD(bh, bp, b_hash); 1544 1545 if (vmio) { 1546 bp->b_flags |= (B_VMIO | B_CACHE); 1547#if defined(VFS_BIO_DEBUG) 1548 if (vp->v_type != VREG && vp->v_type != VBLK) 1549 printf("getblk: vmioing file type %d???\n", vp->v_type); 1550#endif 1551 } else { 1552 bp->b_flags &= ~B_VMIO; 1553 } 1554 1555 allocbuf(bp, size); 1556 1557 splx(s); 1558 return (bp); 1559 } 1560} 1561 1562/* 1563 * Get an empty, disassociated buffer of given size. 1564 */ 1565struct buf * 1566geteblk(int size) 1567{ 1568 struct buf *bp; 1569 int s; 1570 1571 s = splbio(); 1572 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1573 splx(s); 1574 allocbuf(bp, size); 1575 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 1576 return (bp); 1577} 1578 1579 1580/* 1581 * This code constitutes the buffer memory from either anonymous system 1582 * memory (in the case of non-VMIO operations) or from an associated 1583 * VM object (in the case of VMIO operations). 1584 * 1585 * Note that this code is tricky, and has many complications to resolve 1586 * deadlock or inconsistant data situations. Tread lightly!!! 1587 * 1588 * Modify the length of a buffer's underlying buffer storage without 1589 * destroying information (unless, of course the buffer is shrinking). 1590 */ 1591int 1592allocbuf(struct buf * bp, int size) 1593{ 1594 1595 int s; 1596 int newbsize, mbsize; 1597 int i; 1598 1599#if !defined(MAX_PERF) 1600 if (!(bp->b_flags & B_BUSY)) 1601 panic("allocbuf: buffer not busy"); 1602 1603 if (bp->b_kvasize < size) 1604 panic("allocbuf: buffer too small"); 1605#endif 1606 1607 if ((bp->b_flags & B_VMIO) == 0) { 1608 caddr_t origbuf; 1609 int origbufsize; 1610 /* 1611 * Just get anonymous memory from the kernel 1612 */ 1613 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1614#if !defined(NO_B_MALLOC) 1615 if (bp->b_flags & B_MALLOC) 1616 newbsize = mbsize; 1617 else 1618#endif 1619 newbsize = round_page(size); 1620 1621 if (newbsize < bp->b_bufsize) { 1622#if !defined(NO_B_MALLOC) 1623 /* 1624 * malloced buffers are not shrunk 1625 */ 1626 if (bp->b_flags & B_MALLOC) { 1627 if (newbsize) { 1628 bp->b_bcount = size; 1629 } else { 1630 free(bp->b_data, M_BIOBUF); 1631 bufspace -= bp->b_bufsize; 1632 bufmallocspace -= bp->b_bufsize; 1633 bp->b_data = bp->b_kvabase; 1634 bp->b_bufsize = 0; 1635 bp->b_bcount = 0; 1636 bp->b_flags &= ~B_MALLOC; 1637 } 1638 return 1; 1639 } 1640#endif 1641 vm_hold_free_pages( 1642 bp, 1643 (vm_offset_t) bp->b_data + newbsize, 1644 (vm_offset_t) bp->b_data + bp->b_bufsize); 1645 } else if (newbsize > bp->b_bufsize) { 1646#if !defined(NO_B_MALLOC) 1647 /* 1648 * We only use malloced memory on the first allocation. 1649 * and revert to page-allocated memory when the buffer grows. 1650 */ 1651 if ( (bufmallocspace < maxbufmallocspace) && 1652 (bp->b_bufsize == 0) && 1653 (mbsize <= PAGE_SIZE/2)) { 1654 1655 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1656 bp->b_bufsize = mbsize; 1657 bp->b_bcount = size; 1658 bp->b_flags |= B_MALLOC; 1659 bufspace += mbsize; 1660 bufmallocspace += mbsize; 1661 return 1; 1662 } 1663#endif 1664 origbuf = NULL; 1665 origbufsize = 0; 1666#if !defined(NO_B_MALLOC) 1667 /* 1668 * If the buffer is growing on its other-than-first allocation, 1669 * then we revert to the page-allocation scheme. 1670 */ 1671 if (bp->b_flags & B_MALLOC) { 1672 origbuf = bp->b_data; 1673 origbufsize = bp->b_bufsize; 1674 bp->b_data = bp->b_kvabase; 1675 bufspace -= bp->b_bufsize; 1676 bufmallocspace -= bp->b_bufsize; 1677 bp->b_bufsize = 0; 1678 bp->b_flags &= ~B_MALLOC; 1679 newbsize = round_page(newbsize); 1680 } 1681#endif 1682 vm_hold_load_pages( 1683 bp, 1684 (vm_offset_t) bp->b_data + bp->b_bufsize, 1685 (vm_offset_t) bp->b_data + newbsize); 1686#if !defined(NO_B_MALLOC) 1687 if (origbuf) { 1688 bcopy(origbuf, bp->b_data, origbufsize); 1689 free(origbuf, M_BIOBUF); 1690 } 1691#endif 1692 } 1693 } else { 1694 vm_page_t m; 1695 int desiredpages; 1696 1697 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1698 desiredpages = (size == 0) ? 0 : 1699 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 1700 1701#if !defined(NO_B_MALLOC) 1702 if (bp->b_flags & B_MALLOC) 1703 panic("allocbuf: VMIO buffer can't be malloced"); 1704#endif 1705 1706 if (newbsize < bp->b_bufsize) { 1707 if (desiredpages < bp->b_npages) { 1708 for (i = desiredpages; i < bp->b_npages; i++) { 1709 /* 1710 * the page is not freed here -- it 1711 * is the responsibility of vnode_pager_setsize 1712 */ 1713 m = bp->b_pages[i]; 1714#if defined(DIAGNOSTIC) 1715 if (m == bogus_page) 1716 panic("allocbuf: bogus page found"); 1717#endif 1718 vm_page_sleep(m, "biodep", &m->busy); 1719 1720 bp->b_pages[i] = NULL; 1721 vm_page_unwire(m, 0); 1722 } 1723 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 1724 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1725 bp->b_npages = desiredpages; 1726 } 1727 } else if (newbsize > bp->b_bufsize) { 1728 vm_object_t obj; 1729 vm_offset_t tinc, toff; 1730 vm_ooffset_t off; 1731 vm_pindex_t objoff; 1732 int pageindex, curbpnpages; 1733 struct vnode *vp; 1734 int bsize; 1735 int orig_validoff = bp->b_validoff; 1736 int orig_validend = bp->b_validend; 1737 1738 vp = bp->b_vp; 1739 1740 if (vp->v_type == VBLK) 1741 bsize = DEV_BSIZE; 1742 else 1743 bsize = vp->v_mount->mnt_stat.f_iosize; 1744 1745 if (bp->b_npages < desiredpages) { 1746 obj = vp->v_object; 1747 tinc = PAGE_SIZE; 1748 1749 off = bp->b_offset; 1750#ifdef DIAGNOSTIC 1751 if (bp->b_offset == NOOFFSET) 1752 panic("allocbuf: no buffer offset"); 1753#endif 1754 1755 curbpnpages = bp->b_npages; 1756 doretry: 1757 bp->b_validoff = orig_validoff; 1758 bp->b_validend = orig_validend; 1759 bp->b_flags |= B_CACHE; 1760 for (toff = 0; toff < newbsize; toff += tinc) { 1761 objoff = OFF_TO_IDX(off + toff); 1762 pageindex = objoff - OFF_TO_IDX(off); 1763 tinc = PAGE_SIZE - ((off + toff) & PAGE_MASK); 1764 if (pageindex < curbpnpages) { 1765 1766 m = bp->b_pages[pageindex]; 1767#ifdef VFS_BIO_DIAG 1768 if (m->pindex != objoff) 1769 panic("allocbuf: page changed offset?!!!?"); 1770#endif 1771 if (tinc > (newbsize - toff)) 1772 tinc = newbsize - toff; 1773 if (bp->b_flags & B_CACHE) 1774 vfs_buf_set_valid(bp, off, toff, tinc, m); 1775 continue; 1776 } 1777 m = vm_page_lookup(obj, objoff); 1778 if (!m) { 1779 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1780 if (!m) { 1781 VM_WAIT; 1782 vm_pageout_deficit += (desiredpages - curbpnpages); 1783 goto doretry; 1784 } 1785 1786 vm_page_wire(m); 1787 vm_page_flag_clear(m, PG_BUSY); 1788 bp->b_flags &= ~B_CACHE; 1789 1790 } else if (m->flags & PG_BUSY) { 1791 s = splvm(); 1792 if (m->flags & PG_BUSY) { 1793 vm_page_flag_set(m, PG_WANTED); 1794 tsleep(m, PVM, "pgtblk", 0); 1795 } 1796 splx(s); 1797 goto doretry; 1798 } else { 1799 if ((curproc != pageproc) && 1800 ((m->queue - m->pc) == PQ_CACHE) && 1801 ((cnt.v_free_count + cnt.v_cache_count) < 1802 (cnt.v_free_min + cnt.v_cache_min))) { 1803 pagedaemon_wakeup(); 1804 } 1805 if (tinc > (newbsize - toff)) 1806 tinc = newbsize - toff; 1807 if (bp->b_flags & B_CACHE) 1808 vfs_buf_set_valid(bp, off, toff, tinc, m); 1809 vm_page_flag_clear(m, PG_ZERO); 1810 vm_page_wire(m); 1811 } 1812 bp->b_pages[pageindex] = m; 1813 curbpnpages = pageindex + 1; 1814 } 1815 if (vp->v_tag == VT_NFS && 1816 vp->v_type != VBLK) { 1817 if (bp->b_dirtyend > 0) { 1818 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1819 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1820 } 1821 if (bp->b_validend == 0) 1822 bp->b_flags &= ~B_CACHE; 1823 } 1824 bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data); 1825 bp->b_npages = curbpnpages; 1826 pmap_qenter((vm_offset_t) bp->b_data, 1827 bp->b_pages, bp->b_npages); 1828 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1829 } 1830 } 1831 } 1832 if (bp->b_flags & B_VMIO) 1833 vmiospace += (newbsize - bp->b_bufsize); 1834 bufspace += (newbsize - bp->b_bufsize); 1835 bp->b_bufsize = newbsize; 1836 bp->b_bcount = size; 1837 return 1; 1838} 1839 1840/* 1841 * Wait for buffer I/O completion, returning error status. 1842 */ 1843int 1844biowait(register struct buf * bp) 1845{ 1846 int s; 1847 1848 s = splbio(); 1849 while ((bp->b_flags & B_DONE) == 0) 1850#if defined(NO_SCHEDULE_MODS) 1851 tsleep(bp, PRIBIO, "biowait", 0); 1852#else 1853 if (bp->b_flags & B_READ) 1854 tsleep(bp, PRIBIO, "biord", 0); 1855 else 1856 tsleep(bp, PRIBIO, "biowr", 0); 1857#endif 1858 splx(s); 1859 if (bp->b_flags & B_EINTR) { 1860 bp->b_flags &= ~B_EINTR; 1861 return (EINTR); 1862 } 1863 if (bp->b_flags & B_ERROR) { 1864 return (bp->b_error ? bp->b_error : EIO); 1865 } else { 1866 return (0); 1867 } 1868} 1869 1870/* 1871 * Finish I/O on a buffer, calling an optional function. 1872 * This is usually called from interrupt level, so process blocking 1873 * is not *a good idea*. 1874 */ 1875void 1876biodone(register struct buf * bp) 1877{ 1878 int s; 1879 1880 s = splbio(); 1881 1882#if !defined(MAX_PERF) 1883 if (!(bp->b_flags & B_BUSY)) 1884 panic("biodone: buffer not busy"); 1885#endif 1886 1887 if (bp->b_flags & B_DONE) { 1888 splx(s); 1889#if !defined(MAX_PERF) 1890 printf("biodone: buffer already done\n"); 1891#endif 1892 return; 1893 } 1894 bp->b_flags |= B_DONE; 1895 1896 if (bp->b_flags & B_FREEBUF) { 1897 brelse(bp); 1898 splx(s); 1899 return; 1900 } 1901 1902 if ((bp->b_flags & B_READ) == 0) { 1903 vwakeup(bp); 1904 } 1905 1906 /* call optional completion function if requested */ 1907 if (bp->b_flags & B_CALL) { 1908 bp->b_flags &= ~B_CALL; 1909 (*bp->b_iodone) (bp); 1910 splx(s); 1911 return; 1912 } 1913 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1914 (*bioops.io_complete)(bp); 1915 1916 if (bp->b_flags & B_VMIO) { 1917 int i, resid; 1918 vm_ooffset_t foff; 1919 vm_page_t m; 1920 vm_object_t obj; 1921 int iosize; 1922 struct vnode *vp = bp->b_vp; 1923 1924 obj = vp->v_object; 1925 1926#if defined(VFS_BIO_DEBUG) 1927 if (vp->v_usecount == 0) { 1928 panic("biodone: zero vnode ref count"); 1929 } 1930 1931 if (vp->v_object == NULL) { 1932 panic("biodone: missing VM object"); 1933 } 1934 1935 if ((vp->v_flag & VOBJBUF) == 0) { 1936 panic("biodone: vnode is not setup for merged cache"); 1937 } 1938#endif 1939 1940 foff = bp->b_offset; 1941#ifdef DIAGNOSTIC 1942 if (bp->b_offset == NOOFFSET) 1943 panic("biodone: no buffer offset"); 1944#endif 1945 1946#if !defined(MAX_PERF) 1947 if (!obj) { 1948 panic("biodone: no object"); 1949 } 1950#endif 1951#if defined(VFS_BIO_DEBUG) 1952 if (obj->paging_in_progress < bp->b_npages) { 1953 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1954 obj->paging_in_progress, bp->b_npages); 1955 } 1956#endif 1957 iosize = bp->b_bufsize; 1958 for (i = 0; i < bp->b_npages; i++) { 1959 int bogusflag = 0; 1960 m = bp->b_pages[i]; 1961 if (m == bogus_page) { 1962 bogusflag = 1; 1963 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1964 if (!m) { 1965#if defined(VFS_BIO_DEBUG) 1966 printf("biodone: page disappeared\n"); 1967#endif 1968 vm_object_pip_subtract(obj, 1); 1969 continue; 1970 } 1971 bp->b_pages[i] = m; 1972 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 1973 } 1974#if defined(VFS_BIO_DEBUG) 1975 if (OFF_TO_IDX(foff) != m->pindex) { 1976 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1977 } 1978#endif 1979 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1980 if (resid > iosize) 1981 resid = iosize; 1982 1983 /* 1984 * In the write case, the valid and clean bits are 1985 * already changed correctly, so we only need to do this 1986 * here in the read case. 1987 */ 1988 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1989 vfs_page_set_valid(bp, foff, i, m); 1990 } 1991 vm_page_flag_clear(m, PG_ZERO); 1992 1993 /* 1994 * when debugging new filesystems or buffer I/O methods, this 1995 * is the most common error that pops up. if you see this, you 1996 * have not set the page busy flag correctly!!! 1997 */ 1998 if (m->busy == 0) { 1999#if !defined(MAX_PERF) 2000 printf("biodone: page busy < 0, " 2001 "pindex: %d, foff: 0x(%x,%x), " 2002 "resid: %d, index: %d\n", 2003 (int) m->pindex, (int)(foff >> 32), 2004 (int) foff & 0xffffffff, resid, i); 2005#endif 2006 if (vp->v_type != VBLK) 2007#if !defined(MAX_PERF) 2008 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2009 bp->b_vp->v_mount->mnt_stat.f_iosize, 2010 (int) bp->b_lblkno, 2011 bp->b_flags, bp->b_npages); 2012 else 2013 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2014 (int) bp->b_lblkno, 2015 bp->b_flags, bp->b_npages); 2016 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2017 m->valid, m->dirty, m->wire_count); 2018#endif 2019 panic("biodone: page busy < 0\n"); 2020 } 2021 vm_page_io_finish(m); 2022 vm_object_pip_subtract(obj, 1); 2023 foff += resid; 2024 iosize -= resid; 2025 } 2026 if (obj && 2027 (obj->paging_in_progress == 0) && 2028 (obj->flags & OBJ_PIPWNT)) { 2029 vm_object_clear_flag(obj, OBJ_PIPWNT); 2030 wakeup(obj); 2031 } 2032 } 2033 /* 2034 * For asynchronous completions, release the buffer now. The brelse 2035 * checks for B_WANTED and will do the wakeup there if necessary - so 2036 * no need to do a wakeup here in the async case. 2037 */ 2038 2039 if (bp->b_flags & B_ASYNC) { 2040 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2041 brelse(bp); 2042 else 2043 bqrelse(bp); 2044 } else { 2045 bp->b_flags &= ~B_WANTED; 2046 wakeup(bp); 2047 } 2048 splx(s); 2049} 2050 2051#if 0 /* not with kirks code */ 2052static int vfs_update_interval = 30; 2053 2054static void 2055vfs_update() 2056{ 2057 while (1) { 2058 tsleep(&vfs_update_wakeup, PUSER, "update", 2059 hz * vfs_update_interval); 2060 vfs_update_wakeup = 0; 2061 sync(curproc, NULL); 2062 } 2063} 2064 2065static int 2066sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2067{ 2068 int error = sysctl_handle_int(oidp, 2069 oidp->oid_arg1, oidp->oid_arg2, req); 2070 if (!error) 2071 wakeup(&vfs_update_wakeup); 2072 return error; 2073} 2074 2075SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2076 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2077 2078#endif 2079 2080 2081/* 2082 * This routine is called in lieu of iodone in the case of 2083 * incomplete I/O. This keeps the busy status for pages 2084 * consistant. 2085 */ 2086void 2087vfs_unbusy_pages(struct buf * bp) 2088{ 2089 int i; 2090 2091 if (bp->b_flags & B_VMIO) { 2092 struct vnode *vp = bp->b_vp; 2093 vm_object_t obj = vp->v_object; 2094 2095 for (i = 0; i < bp->b_npages; i++) { 2096 vm_page_t m = bp->b_pages[i]; 2097 2098 if (m == bogus_page) { 2099 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2100#if !defined(MAX_PERF) 2101 if (!m) { 2102 panic("vfs_unbusy_pages: page missing\n"); 2103 } 2104#endif 2105 bp->b_pages[i] = m; 2106 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2107 } 2108 vm_object_pip_subtract(obj, 1); 2109 vm_page_flag_clear(m, PG_ZERO); 2110 vm_page_io_finish(m); 2111 } 2112 if (obj->paging_in_progress == 0 && 2113 (obj->flags & OBJ_PIPWNT)) { 2114 vm_object_clear_flag(obj, OBJ_PIPWNT); 2115 wakeup(obj); 2116 } 2117 } 2118} 2119 2120/* 2121 * Set NFS' b_validoff and b_validend fields from the valid bits 2122 * of a page. If the consumer is not NFS, and the page is not 2123 * valid for the entire range, clear the B_CACHE flag to force 2124 * the consumer to re-read the page. 2125 */ 2126static void 2127vfs_buf_set_valid(struct buf *bp, 2128 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2129 vm_page_t m) 2130{ 2131 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2132 vm_offset_t svalid, evalid; 2133 int validbits = m->valid >> (((foff+off)&PAGE_MASK)/DEV_BSIZE); 2134 2135 /* 2136 * This only bothers with the first valid range in the 2137 * page. 2138 */ 2139 svalid = off; 2140 while (validbits && !(validbits & 1)) { 2141 svalid += DEV_BSIZE; 2142 validbits >>= 1; 2143 } 2144 evalid = svalid; 2145 while (validbits & 1) { 2146 evalid += DEV_BSIZE; 2147 validbits >>= 1; 2148 } 2149 evalid = min(evalid, off + size); 2150 /* 2151 * Make sure this range is contiguous with the range 2152 * built up from previous pages. If not, then we will 2153 * just use the range from the previous pages. 2154 */ 2155 if (svalid == bp->b_validend) { 2156 bp->b_validoff = min(bp->b_validoff, svalid); 2157 bp->b_validend = max(bp->b_validend, evalid); 2158 } 2159 } else if (!vm_page_is_valid(m, 2160 (vm_offset_t) ((foff + off) & PAGE_MASK), 2161 size)) { 2162 bp->b_flags &= ~B_CACHE; 2163 } 2164} 2165 2166/* 2167 * Set the valid bits in a page, taking care of the b_validoff, 2168 * b_validend fields which NFS uses to optimise small reads. Off is 2169 * the offset within the file and pageno is the page index within the buf. 2170 */ 2171static void 2172vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2173{ 2174 struct vnode *vp = bp->b_vp; 2175 vm_ooffset_t soff, eoff; 2176 2177 soff = off; 2178 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2179 if (eoff > bp->b_offset + bp->b_bufsize) 2180 eoff = bp->b_offset + bp->b_bufsize; 2181 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2182 vm_ooffset_t sv, ev; 2183 vm_page_set_invalid(m, 2184 (vm_offset_t) (soff & PAGE_MASK), 2185 (vm_offset_t) (eoff - soff)); 2186 sv = (bp->b_offset + bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2187 ev = (bp->b_offset + bp->b_validend) & ~(DEV_BSIZE - 1); 2188 soff = qmax(sv, soff); 2189 eoff = qmin(ev, eoff); 2190 } 2191 if (eoff > soff) 2192 vm_page_set_validclean(m, 2193 (vm_offset_t) (soff & PAGE_MASK), 2194 (vm_offset_t) (eoff - soff)); 2195} 2196 2197/* 2198 * This routine is called before a device strategy routine. 2199 * It is used to tell the VM system that paging I/O is in 2200 * progress, and treat the pages associated with the buffer 2201 * almost as being PG_BUSY. Also the object paging_in_progress 2202 * flag is handled to make sure that the object doesn't become 2203 * inconsistant. 2204 */ 2205void 2206vfs_busy_pages(struct buf * bp, int clear_modify) 2207{ 2208 int i, bogus; 2209 2210 if (bp->b_flags & B_VMIO) { 2211 struct vnode *vp = bp->b_vp; 2212 vm_object_t obj = vp->v_object; 2213 vm_ooffset_t foff; 2214 2215 foff = bp->b_offset; 2216#ifdef DIAGNOSTIC 2217 if (bp->b_offset == NOOFFSET) 2218 panic("vfs_busy_pages: no buffer offset"); 2219#endif 2220 2221 vfs_setdirty(bp); 2222 2223retry: 2224 for (i = 0; i < bp->b_npages; i++) { 2225 vm_page_t m = bp->b_pages[i]; 2226 if (vm_page_sleep(m, "vbpage", NULL)) 2227 goto retry; 2228 } 2229 2230 bogus = 0; 2231 for (i = 0; i < bp->b_npages; i++) { 2232 vm_page_t m = bp->b_pages[i]; 2233 2234 vm_page_flag_clear(m, PG_ZERO); 2235 if ((bp->b_flags & B_CLUSTER) == 0) { 2236 vm_object_pip_add(obj, 1); 2237 vm_page_io_start(m); 2238 } 2239 2240 vm_page_protect(m, VM_PROT_NONE); 2241 if (clear_modify) 2242 vfs_page_set_valid(bp, foff, i, m); 2243 else if (m->valid == VM_PAGE_BITS_ALL && 2244 (bp->b_flags & B_CACHE) == 0) { 2245 bp->b_pages[i] = bogus_page; 2246 bogus++; 2247 } 2248 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2249 } 2250 if (bogus) 2251 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2252 } 2253} 2254 2255/* 2256 * Tell the VM system that the pages associated with this buffer 2257 * are clean. This is used for delayed writes where the data is 2258 * going to go to disk eventually without additional VM intevention. 2259 */ 2260void 2261vfs_clean_pages(struct buf * bp) 2262{ 2263 int i; 2264 2265 if (bp->b_flags & B_VMIO) { 2266 vm_ooffset_t foff; 2267 foff = bp->b_offset; 2268 2269#ifdef DIAGNOSTIC 2270 if (bp->b_offset == NOOFFSET) 2271 panic("vfs_clean_pages: no buffer offset"); 2272#endif 2273 2274 for (i = 0; i < bp->b_npages; i++) { 2275 vm_page_t m = bp->b_pages[i]; 2276 vfs_page_set_valid(bp, foff, i, m); 2277 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 2278 } 2279 } 2280} 2281 2282void 2283vfs_bio_clrbuf(struct buf *bp) { 2284 int i, size, mask = 0; 2285 caddr_t sa, ea; 2286 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2287 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 2288 (bp->b_offset & PAGE_MASK) == 0) { 2289 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 2290 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2291 ((bp->b_pages[0]->valid & mask) != mask)) { 2292 bzero(bp->b_data, bp->b_bufsize); 2293 } 2294 bp->b_pages[0]->valid |= mask; 2295 bp->b_resid = 0; 2296 return; 2297 } 2298 ea = sa = bp->b_data; 2299 for(i=0;i<bp->b_npages;i++,sa=ea) { 2300 int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE; 2301 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 2302 ea = (caddr_t)ulmin((u_long)ea, 2303 (u_long)bp->b_data + bp->b_bufsize); 2304 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 2305 if ((bp->b_pages[i]->valid & mask) == mask) 2306 continue; 2307 if ((bp->b_pages[i]->valid & mask) == 0) { 2308 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2309 bzero(sa, ea - sa); 2310 } 2311 } else { 2312 for (; sa < ea; sa += DEV_BSIZE, j++) { 2313 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2314 (bp->b_pages[i]->valid & (1<<j)) == 0) 2315 bzero(sa, DEV_BSIZE); 2316 } 2317 } 2318 bp->b_pages[i]->valid |= mask; 2319 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2320 } 2321 bp->b_resid = 0; 2322 } else { 2323 clrbuf(bp); 2324 } 2325} 2326 2327/* 2328 * vm_hold_load_pages and vm_hold_unload pages get pages into 2329 * a buffers address space. The pages are anonymous and are 2330 * not associated with a file object. 2331 */ 2332void 2333vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2334{ 2335 vm_offset_t pg; 2336 vm_page_t p; 2337 int index; 2338 2339 to = round_page(to); 2340 from = round_page(from); 2341 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2342 2343 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2344 2345tryagain: 2346 2347 p = vm_page_alloc(kernel_object, 2348 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2349 VM_ALLOC_NORMAL); 2350 if (!p) { 2351 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2352 VM_WAIT; 2353 goto tryagain; 2354 } 2355 vm_page_wire(p); 2356 p->valid = VM_PAGE_BITS_ALL; 2357 vm_page_flag_clear(p, PG_ZERO); 2358 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2359 bp->b_pages[index] = p; 2360 vm_page_wakeup(p); 2361 } 2362 bp->b_npages = index; 2363} 2364 2365void 2366vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2367{ 2368 vm_offset_t pg; 2369 vm_page_t p; 2370 int index, newnpages; 2371 2372 from = round_page(from); 2373 to = round_page(to); 2374 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2375 2376 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2377 p = bp->b_pages[index]; 2378 if (p && (index < bp->b_npages)) { 2379#if !defined(MAX_PERF) 2380 if (p->busy) { 2381 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2382 bp->b_blkno, bp->b_lblkno); 2383 } 2384#endif 2385 bp->b_pages[index] = NULL; 2386 pmap_kremove(pg); 2387 vm_page_busy(p); 2388 vm_page_unwire(p, 0); 2389 vm_page_free(p); 2390 } 2391 } 2392 bp->b_npages = newnpages; 2393} 2394 2395 2396#include "opt_ddb.h" 2397#ifdef DDB 2398#include <ddb/ddb.h> 2399 2400DB_SHOW_COMMAND(buffer, db_show_buffer) 2401{ 2402 /* get args */ 2403 struct buf *bp = (struct buf *)addr; 2404 2405 if (!have_addr) { 2406 db_printf("usage: show buffer <addr>\n"); 2407 return; 2408 } 2409 2410 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2411 (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2412 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2413 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2414 "b_blkno = %d, b_pblkno = %d\n", 2415 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2416 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2417 if (bp->b_npages) { 2418 int i; 2419 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2420 for (i = 0; i < bp->b_npages; i++) { 2421 vm_page_t m; 2422 m = bp->b_pages[i]; 2423 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2424 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2425 if ((i + 1) < bp->b_npages) 2426 db_printf(","); 2427 } 2428 db_printf("\n"); 2429 } 2430} 2431#endif /* DDB */ 2432