vnode_pager.c revision 207410
1/*- 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 */ 42 43/* 44 * Page to/from files (vnodes). 45 */ 46 47/* 48 * TODO: 49 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 50 * greatly re-simplify the vnode_pager. 51 */ 52 53#include <sys/cdefs.h> 54__FBSDID("$FreeBSD: head/sys/vm/vnode_pager.c 207410 2010-04-30 00:46:43Z kmacy $"); 55 56#include <sys/param.h> 57#include <sys/systm.h> 58#include <sys/proc.h> 59#include <sys/vnode.h> 60#include <sys/mount.h> 61#include <sys/bio.h> 62#include <sys/buf.h> 63#include <sys/vmmeter.h> 64#include <sys/limits.h> 65#include <sys/conf.h> 66#include <sys/sf_buf.h> 67 68#include <machine/atomic.h> 69 70#include <vm/vm.h> 71#include <vm/vm_object.h> 72#include <vm/vm_page.h> 73#include <vm/vm_pager.h> 74#include <vm/vm_map.h> 75#include <vm/vnode_pager.h> 76#include <vm/vm_extern.h> 77 78static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 79 daddr_t *rtaddress, int *run); 80static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 81static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 82static void vnode_pager_dealloc(vm_object_t); 83static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int); 84static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 85static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 86static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 87 vm_ooffset_t, struct ucred *cred); 88 89struct pagerops vnodepagerops = { 90 .pgo_alloc = vnode_pager_alloc, 91 .pgo_dealloc = vnode_pager_dealloc, 92 .pgo_getpages = vnode_pager_getpages, 93 .pgo_putpages = vnode_pager_putpages, 94 .pgo_haspage = vnode_pager_haspage, 95}; 96 97int vnode_pbuf_freecnt; 98 99/* Create the VM system backing object for this vnode */ 100int 101vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 102{ 103 vm_object_t object; 104 vm_ooffset_t size = isize; 105 struct vattr va; 106 107 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 108 return (0); 109 110 while ((object = vp->v_object) != NULL) { 111 VM_OBJECT_LOCK(object); 112 if (!(object->flags & OBJ_DEAD)) { 113 VM_OBJECT_UNLOCK(object); 114 return (0); 115 } 116 VOP_UNLOCK(vp, 0); 117 vm_object_set_flag(object, OBJ_DISCONNECTWNT); 118 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); 119 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 120 } 121 122 if (size == 0) { 123 if (vn_isdisk(vp, NULL)) { 124 size = IDX_TO_OFF(INT_MAX); 125 } else { 126 if (VOP_GETATTR(vp, &va, td->td_ucred)) 127 return (0); 128 size = va.va_size; 129 } 130 } 131 132 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 133 /* 134 * Dereference the reference we just created. This assumes 135 * that the object is associated with the vp. 136 */ 137 VM_OBJECT_LOCK(object); 138 object->ref_count--; 139 VM_OBJECT_UNLOCK(object); 140 vrele(vp); 141 142 KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 143 144 return (0); 145} 146 147void 148vnode_destroy_vobject(struct vnode *vp) 149{ 150 struct vm_object *obj; 151 152 obj = vp->v_object; 153 if (obj == NULL) 154 return; 155 ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 156 VM_OBJECT_LOCK(obj); 157 if (obj->ref_count == 0) { 158 /* 159 * vclean() may be called twice. The first time 160 * removes the primary reference to the object, 161 * the second time goes one further and is a 162 * special-case to terminate the object. 163 * 164 * don't double-terminate the object 165 */ 166 if ((obj->flags & OBJ_DEAD) == 0) 167 vm_object_terminate(obj); 168 else 169 VM_OBJECT_UNLOCK(obj); 170 } else { 171 /* 172 * Woe to the process that tries to page now :-). 173 */ 174 vm_pager_deallocate(obj); 175 VM_OBJECT_UNLOCK(obj); 176 } 177 vp->v_object = NULL; 178} 179 180 181/* 182 * Allocate (or lookup) pager for a vnode. 183 * Handle is a vnode pointer. 184 * 185 * MPSAFE 186 */ 187vm_object_t 188vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 189 vm_ooffset_t offset, struct ucred *cred) 190{ 191 vm_object_t object; 192 struct vnode *vp; 193 194 /* 195 * Pageout to vnode, no can do yet. 196 */ 197 if (handle == NULL) 198 return (NULL); 199 200 vp = (struct vnode *) handle; 201 202 /* 203 * If the object is being terminated, wait for it to 204 * go away. 205 */ 206retry: 207 while ((object = vp->v_object) != NULL) { 208 VM_OBJECT_LOCK(object); 209 if ((object->flags & OBJ_DEAD) == 0) 210 break; 211 vm_object_set_flag(object, OBJ_DISCONNECTWNT); 212 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); 213 } 214 215 if (vp->v_usecount == 0) 216 panic("vnode_pager_alloc: no vnode reference"); 217 218 if (object == NULL) { 219 /* 220 * Add an object of the appropriate size 221 */ 222 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 223 224 object->un_pager.vnp.vnp_size = size; 225 226 object->handle = handle; 227 VI_LOCK(vp); 228 if (vp->v_object != NULL) { 229 /* 230 * Object has been created while we were sleeping 231 */ 232 VI_UNLOCK(vp); 233 vm_object_destroy(object); 234 goto retry; 235 } 236 vp->v_object = object; 237 VI_UNLOCK(vp); 238 } else { 239 object->ref_count++; 240 VM_OBJECT_UNLOCK(object); 241 } 242 vref(vp); 243 return (object); 244} 245 246/* 247 * The object must be locked. 248 */ 249static void 250vnode_pager_dealloc(object) 251 vm_object_t object; 252{ 253 struct vnode *vp; 254 int refs; 255 256 vp = object->handle; 257 if (vp == NULL) 258 panic("vnode_pager_dealloc: pager already dealloced"); 259 260 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 261 vm_object_pip_wait(object, "vnpdea"); 262 refs = object->ref_count; 263 264 object->handle = NULL; 265 object->type = OBJT_DEAD; 266 if (object->flags & OBJ_DISCONNECTWNT) { 267 vm_object_clear_flag(object, OBJ_DISCONNECTWNT); 268 wakeup(object); 269 } 270 ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 271 vp->v_object = NULL; 272 vp->v_vflag &= ~VV_TEXT; 273 while (refs-- > 0) 274 vunref(vp); 275} 276 277static boolean_t 278vnode_pager_haspage(object, pindex, before, after) 279 vm_object_t object; 280 vm_pindex_t pindex; 281 int *before; 282 int *after; 283{ 284 struct vnode *vp = object->handle; 285 daddr_t bn; 286 int err; 287 daddr_t reqblock; 288 int poff; 289 int bsize; 290 int pagesperblock, blocksperpage; 291 int vfslocked; 292 293 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 294 /* 295 * If no vp or vp is doomed or marked transparent to VM, we do not 296 * have the page. 297 */ 298 if (vp == NULL || vp->v_iflag & VI_DOOMED) 299 return FALSE; 300 /* 301 * If the offset is beyond end of file we do 302 * not have the page. 303 */ 304 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 305 return FALSE; 306 307 bsize = vp->v_mount->mnt_stat.f_iosize; 308 pagesperblock = bsize / PAGE_SIZE; 309 blocksperpage = 0; 310 if (pagesperblock > 0) { 311 reqblock = pindex / pagesperblock; 312 } else { 313 blocksperpage = (PAGE_SIZE / bsize); 314 reqblock = pindex * blocksperpage; 315 } 316 VM_OBJECT_UNLOCK(object); 317 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 318 err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 319 VFS_UNLOCK_GIANT(vfslocked); 320 VM_OBJECT_LOCK(object); 321 if (err) 322 return TRUE; 323 if (bn == -1) 324 return FALSE; 325 if (pagesperblock > 0) { 326 poff = pindex - (reqblock * pagesperblock); 327 if (before) { 328 *before *= pagesperblock; 329 *before += poff; 330 } 331 if (after) { 332 int numafter; 333 *after *= pagesperblock; 334 numafter = pagesperblock - (poff + 1); 335 if (IDX_TO_OFF(pindex + numafter) > 336 object->un_pager.vnp.vnp_size) { 337 numafter = 338 OFF_TO_IDX(object->un_pager.vnp.vnp_size) - 339 pindex; 340 } 341 *after += numafter; 342 } 343 } else { 344 if (before) { 345 *before /= blocksperpage; 346 } 347 348 if (after) { 349 *after /= blocksperpage; 350 } 351 } 352 return TRUE; 353} 354 355/* 356 * Lets the VM system know about a change in size for a file. 357 * We adjust our own internal size and flush any cached pages in 358 * the associated object that are affected by the size change. 359 * 360 * Note: this routine may be invoked as a result of a pager put 361 * operation (possibly at object termination time), so we must be careful. 362 */ 363void 364vnode_pager_setsize(vp, nsize) 365 struct vnode *vp; 366 vm_ooffset_t nsize; 367{ 368 vm_object_t object; 369 vm_page_t m; 370 vm_pindex_t nobjsize; 371 372 if ((object = vp->v_object) == NULL) 373 return; 374/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ 375 VM_OBJECT_LOCK(object); 376 if (nsize == object->un_pager.vnp.vnp_size) { 377 /* 378 * Hasn't changed size 379 */ 380 VM_OBJECT_UNLOCK(object); 381 return; 382 } 383 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 384 if (nsize < object->un_pager.vnp.vnp_size) { 385 /* 386 * File has shrunk. Toss any cached pages beyond the new EOF. 387 */ 388 if (nobjsize < object->size) 389 vm_object_page_remove(object, nobjsize, object->size, 390 FALSE); 391 /* 392 * this gets rid of garbage at the end of a page that is now 393 * only partially backed by the vnode. 394 * 395 * XXX for some reason (I don't know yet), if we take a 396 * completely invalid page and mark it partially valid 397 * it can screw up NFS reads, so we don't allow the case. 398 */ 399 if ((nsize & PAGE_MASK) && 400 (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL && 401 m->valid != 0) { 402 int base = (int)nsize & PAGE_MASK; 403 int size = PAGE_SIZE - base; 404 405 /* 406 * Clear out partial-page garbage in case 407 * the page has been mapped. 408 */ 409 pmap_zero_page_area(m, base, size); 410 411 /* 412 * Update the valid bits to reflect the blocks that 413 * have been zeroed. Some of these valid bits may 414 * have already been set. 415 */ 416 vm_page_set_valid(m, base, size); 417 418 /* 419 * Round "base" to the next block boundary so that the 420 * dirty bit for a partially zeroed block is not 421 * cleared. 422 */ 423 base = roundup2(base, DEV_BSIZE); 424 425 /* 426 * Clear out partial-page dirty bits. 427 * 428 * note that we do not clear out the valid 429 * bits. This would prevent bogus_page 430 * replacement from working properly. 431 */ 432 vm_page_lock(m); 433 vm_page_lock_queues(); 434 vm_page_clear_dirty(m, base, PAGE_SIZE - base); 435 vm_page_unlock_queues(); 436 vm_page_unlock(m); 437 } else if ((nsize & PAGE_MASK) && 438 __predict_false(object->cache != NULL)) { 439 vm_page_cache_free(object, OFF_TO_IDX(nsize), 440 nobjsize); 441 } 442 } 443 object->un_pager.vnp.vnp_size = nsize; 444 object->size = nobjsize; 445 VM_OBJECT_UNLOCK(object); 446} 447 448/* 449 * calculate the linear (byte) disk address of specified virtual 450 * file address 451 */ 452static int 453vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 454 int *run) 455{ 456 int bsize; 457 int err; 458 daddr_t vblock; 459 daddr_t voffset; 460 461 if (address < 0) 462 return -1; 463 464 if (vp->v_iflag & VI_DOOMED) 465 return -1; 466 467 bsize = vp->v_mount->mnt_stat.f_iosize; 468 vblock = address / bsize; 469 voffset = address % bsize; 470 471 err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 472 if (err == 0) { 473 if (*rtaddress != -1) 474 *rtaddress += voffset / DEV_BSIZE; 475 if (run) { 476 *run += 1; 477 *run *= bsize/PAGE_SIZE; 478 *run -= voffset/PAGE_SIZE; 479 } 480 } 481 482 return (err); 483} 484 485/* 486 * small block filesystem vnode pager input 487 */ 488static int 489vnode_pager_input_smlfs(object, m) 490 vm_object_t object; 491 vm_page_t m; 492{ 493 int bits, i; 494 struct vnode *vp; 495 struct bufobj *bo; 496 struct buf *bp; 497 struct sf_buf *sf; 498 daddr_t fileaddr; 499 vm_offset_t bsize; 500 int error = 0; 501 502 vp = object->handle; 503 if (vp->v_iflag & VI_DOOMED) 504 return VM_PAGER_BAD; 505 506 bsize = vp->v_mount->mnt_stat.f_iosize; 507 508 VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 509 510 sf = sf_buf_alloc(m, 0); 511 512 for (i = 0; i < PAGE_SIZE / bsize; i++) { 513 vm_ooffset_t address; 514 515 bits = vm_page_bits(i * bsize, bsize); 516 if (m->valid & bits) 517 continue; 518 519 address = IDX_TO_OFF(m->pindex) + i * bsize; 520 if (address >= object->un_pager.vnp.vnp_size) { 521 fileaddr = -1; 522 } else { 523 error = vnode_pager_addr(vp, address, &fileaddr, NULL); 524 if (error) 525 break; 526 } 527 if (fileaddr != -1) { 528 bp = getpbuf(&vnode_pbuf_freecnt); 529 530 /* build a minimal buffer header */ 531 bp->b_iocmd = BIO_READ; 532 bp->b_iodone = bdone; 533 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 534 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 535 bp->b_rcred = crhold(curthread->td_ucred); 536 bp->b_wcred = crhold(curthread->td_ucred); 537 bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 538 bp->b_blkno = fileaddr; 539 pbgetbo(bo, bp); 540 bp->b_bcount = bsize; 541 bp->b_bufsize = bsize; 542 bp->b_runningbufspace = bp->b_bufsize; 543 atomic_add_long(&runningbufspace, bp->b_runningbufspace); 544 545 /* do the input */ 546 bp->b_iooffset = dbtob(bp->b_blkno); 547 bstrategy(bp); 548 549 bwait(bp, PVM, "vnsrd"); 550 551 if ((bp->b_ioflags & BIO_ERROR) != 0) 552 error = EIO; 553 554 /* 555 * free the buffer header back to the swap buffer pool 556 */ 557 pbrelbo(bp); 558 relpbuf(bp, &vnode_pbuf_freecnt); 559 if (error) 560 break; 561 } else 562 bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 563 KASSERT((m->dirty & bits) == 0, 564 ("vnode_pager_input_smlfs: page %p is dirty", m)); 565 VM_OBJECT_LOCK(object); 566 m->valid |= bits; 567 VM_OBJECT_UNLOCK(object); 568 } 569 sf_buf_free(sf); 570 if (error) { 571 return VM_PAGER_ERROR; 572 } 573 return VM_PAGER_OK; 574} 575 576/* 577 * old style vnode pager input routine 578 */ 579static int 580vnode_pager_input_old(object, m) 581 vm_object_t object; 582 vm_page_t m; 583{ 584 struct uio auio; 585 struct iovec aiov; 586 int error; 587 int size; 588 struct sf_buf *sf; 589 struct vnode *vp; 590 591 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 592 error = 0; 593 594 /* 595 * Return failure if beyond current EOF 596 */ 597 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 598 return VM_PAGER_BAD; 599 } else { 600 size = PAGE_SIZE; 601 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 602 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 603 vp = object->handle; 604 VM_OBJECT_UNLOCK(object); 605 606 /* 607 * Allocate a kernel virtual address and initialize so that 608 * we can use VOP_READ/WRITE routines. 609 */ 610 sf = sf_buf_alloc(m, 0); 611 612 aiov.iov_base = (caddr_t)sf_buf_kva(sf); 613 aiov.iov_len = size; 614 auio.uio_iov = &aiov; 615 auio.uio_iovcnt = 1; 616 auio.uio_offset = IDX_TO_OFF(m->pindex); 617 auio.uio_segflg = UIO_SYSSPACE; 618 auio.uio_rw = UIO_READ; 619 auio.uio_resid = size; 620 auio.uio_td = curthread; 621 622 error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 623 if (!error) { 624 int count = size - auio.uio_resid; 625 626 if (count == 0) 627 error = EINVAL; 628 else if (count != PAGE_SIZE) 629 bzero((caddr_t)sf_buf_kva(sf) + count, 630 PAGE_SIZE - count); 631 } 632 sf_buf_free(sf); 633 634 VM_OBJECT_LOCK(object); 635 } 636 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 637 if (!error) 638 m->valid = VM_PAGE_BITS_ALL; 639 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 640} 641 642/* 643 * generic vnode pager input routine 644 */ 645 646/* 647 * Local media VFS's that do not implement their own VOP_GETPAGES 648 * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 649 * to implement the previous behaviour. 650 * 651 * All other FS's should use the bypass to get to the local media 652 * backing vp's VOP_GETPAGES. 653 */ 654static int 655vnode_pager_getpages(object, m, count, reqpage) 656 vm_object_t object; 657 vm_page_t *m; 658 int count; 659 int reqpage; 660{ 661 int rtval; 662 struct vnode *vp; 663 int bytes = count * PAGE_SIZE; 664 int vfslocked; 665 666 vp = object->handle; 667 VM_OBJECT_UNLOCK(object); 668 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 669 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 670 KASSERT(rtval != EOPNOTSUPP, 671 ("vnode_pager: FS getpages not implemented\n")); 672 VFS_UNLOCK_GIANT(vfslocked); 673 VM_OBJECT_LOCK(object); 674 return rtval; 675} 676 677/* 678 * This is now called from local media FS's to operate against their 679 * own vnodes if they fail to implement VOP_GETPAGES. 680 */ 681int 682vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 683 struct vnode *vp; 684 vm_page_t *m; 685 int bytecount; 686 int reqpage; 687{ 688 vm_object_t object; 689 vm_offset_t kva; 690 off_t foff, tfoff, nextoff; 691 int i, j, size, bsize, first; 692 daddr_t firstaddr, reqblock; 693 struct bufobj *bo; 694 int runpg; 695 int runend; 696 struct buf *bp; 697 int count; 698 int error; 699 700 object = vp->v_object; 701 count = bytecount / PAGE_SIZE; 702 703 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 704 ("vnode_pager_generic_getpages does not support devices")); 705 if (vp->v_iflag & VI_DOOMED) 706 return VM_PAGER_BAD; 707 708 bsize = vp->v_mount->mnt_stat.f_iosize; 709 710 /* get the UNDERLYING device for the file with VOP_BMAP() */ 711 712 /* 713 * originally, we did not check for an error return value -- assuming 714 * an fs always has a bmap entry point -- that assumption is wrong!!! 715 */ 716 foff = IDX_TO_OFF(m[reqpage]->pindex); 717 718 /* 719 * if we can't bmap, use old VOP code 720 */ 721 error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL); 722 if (error == EOPNOTSUPP) { 723 VM_OBJECT_LOCK(object); 724 725 for (i = 0; i < count; i++) 726 if (i != reqpage) { 727 vm_page_lock(m[i]); 728 vm_page_lock_queues(); 729 vm_page_free(m[i]); 730 vm_page_unlock_queues(); 731 vm_page_unlock(m[i]); 732 } 733 PCPU_INC(cnt.v_vnodein); 734 PCPU_INC(cnt.v_vnodepgsin); 735 error = vnode_pager_input_old(object, m[reqpage]); 736 VM_OBJECT_UNLOCK(object); 737 return (error); 738 } else if (error != 0) { 739 VM_OBJECT_LOCK(object); 740 for (i = 0; i < count; i++) 741 if (i != reqpage) { 742 vm_page_lock(m[i]); 743 vm_page_lock_queues(); 744 vm_page_free(m[i]); 745 vm_page_unlock_queues(); 746 vm_page_unlock(m[i]); 747 } 748 VM_OBJECT_UNLOCK(object); 749 return (VM_PAGER_ERROR); 750 751 /* 752 * if the blocksize is smaller than a page size, then use 753 * special small filesystem code. NFS sometimes has a small 754 * blocksize, but it can handle large reads itself. 755 */ 756 } else if ((PAGE_SIZE / bsize) > 1 && 757 (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 758 VM_OBJECT_LOCK(object); 759 for (i = 0; i < count; i++) 760 if (i != reqpage) { 761 vm_page_lock(m[i]); 762 vm_page_lock_queues(); 763 vm_page_free(m[i]); 764 vm_page_unlock_queues(); 765 vm_page_unlock(m[i]); 766 } 767 VM_OBJECT_UNLOCK(object); 768 PCPU_INC(cnt.v_vnodein); 769 PCPU_INC(cnt.v_vnodepgsin); 770 return vnode_pager_input_smlfs(object, m[reqpage]); 771 } 772 773 /* 774 * If we have a completely valid page available to us, we can 775 * clean up and return. Otherwise we have to re-read the 776 * media. 777 */ 778 VM_OBJECT_LOCK(object); 779 if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 780 for (i = 0; i < count; i++) 781 if (i != reqpage) { 782 vm_page_lock(m[i]); 783 vm_page_lock_queues(); 784 vm_page_free(m[i]); 785 vm_page_unlock_queues(); 786 vm_page_unlock(m[i]); 787 } 788 VM_OBJECT_UNLOCK(object); 789 return VM_PAGER_OK; 790 } else if (reqblock == -1) { 791 pmap_zero_page(m[reqpage]); 792 KASSERT(m[reqpage]->dirty == 0, 793 ("vnode_pager_generic_getpages: page %p is dirty", m)); 794 m[reqpage]->valid = VM_PAGE_BITS_ALL; 795 for (i = 0; i < count; i++) 796 if (i != reqpage) { 797 vm_page_lock(m[i]); 798 vm_page_lock_queues(); 799 vm_page_free(m[i]); 800 vm_page_unlock_queues(); 801 vm_page_unlock(m[i]); 802 } 803 VM_OBJECT_UNLOCK(object); 804 return (VM_PAGER_OK); 805 } 806 m[reqpage]->valid = 0; 807 VM_OBJECT_UNLOCK(object); 808 809 /* 810 * here on direct device I/O 811 */ 812 firstaddr = -1; 813 814 /* 815 * calculate the run that includes the required page 816 */ 817 for (first = 0, i = 0; i < count; i = runend) { 818 if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr, 819 &runpg) != 0) { 820 VM_OBJECT_LOCK(object); 821 for (; i < count; i++) 822 if (i != reqpage) { 823 vm_page_lock(m[i]); 824 vm_page_lock_queues(); 825 vm_page_free(m[i]); 826 vm_page_unlock_queues(); 827 vm_page_unlock(m[i]); 828 } 829 VM_OBJECT_UNLOCK(object); 830 return (VM_PAGER_ERROR); 831 } 832 if (firstaddr == -1) { 833 VM_OBJECT_LOCK(object); 834 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 835 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx", 836 (intmax_t)firstaddr, (uintmax_t)(foff >> 32), 837 (uintmax_t)foff, 838 (uintmax_t) 839 (object->un_pager.vnp.vnp_size >> 32), 840 (uintmax_t)object->un_pager.vnp.vnp_size); 841 } 842 vm_page_lock(m[i]); 843 vm_page_lock_queues(); 844 vm_page_free(m[i]); 845 vm_page_unlock_queues(); 846 vm_page_unlock(m[i]); 847 VM_OBJECT_UNLOCK(object); 848 runend = i + 1; 849 first = runend; 850 continue; 851 } 852 runend = i + runpg; 853 if (runend <= reqpage) { 854 VM_OBJECT_LOCK(object); 855 for (j = i; j < runend; j++) { 856 vm_page_lock(m[j]); 857 vm_page_lock_queues(); 858 vm_page_free(m[j]); 859 vm_page_unlock_queues(); 860 vm_page_unlock(m[j]); 861 } 862 VM_OBJECT_UNLOCK(object); 863 } else { 864 if (runpg < (count - first)) { 865 VM_OBJECT_LOCK(object); 866 for (i = first + runpg; i < count; i++) { 867 vm_page_lock(m[i]); 868 vm_page_lock_queues(); 869 vm_page_free(m[i]); 870 vm_page_unlock_queues(); 871 vm_page_unlock(m[i]); 872 } 873 VM_OBJECT_UNLOCK(object); 874 count = first + runpg; 875 } 876 break; 877 } 878 first = runend; 879 } 880 881 /* 882 * the first and last page have been calculated now, move input pages 883 * to be zero based... 884 */ 885 if (first != 0) { 886 m += first; 887 count -= first; 888 reqpage -= first; 889 } 890 891 /* 892 * calculate the file virtual address for the transfer 893 */ 894 foff = IDX_TO_OFF(m[0]->pindex); 895 896 /* 897 * calculate the size of the transfer 898 */ 899 size = count * PAGE_SIZE; 900 KASSERT(count > 0, ("zero count")); 901 if ((foff + size) > object->un_pager.vnp.vnp_size) 902 size = object->un_pager.vnp.vnp_size - foff; 903 KASSERT(size > 0, ("zero size")); 904 905 /* 906 * round up physical size for real devices. 907 */ 908 if (1) { 909 int secmask = bo->bo_bsize - 1; 910 KASSERT(secmask < PAGE_SIZE && secmask > 0, 911 ("vnode_pager_generic_getpages: sector size %d too large", 912 secmask + 1)); 913 size = (size + secmask) & ~secmask; 914 } 915 916 bp = getpbuf(&vnode_pbuf_freecnt); 917 kva = (vm_offset_t) bp->b_data; 918 919 /* 920 * and map the pages to be read into the kva 921 */ 922 pmap_qenter(kva, m, count); 923 924 /* build a minimal buffer header */ 925 bp->b_iocmd = BIO_READ; 926 bp->b_iodone = bdone; 927 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 928 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 929 bp->b_rcred = crhold(curthread->td_ucred); 930 bp->b_wcred = crhold(curthread->td_ucred); 931 bp->b_blkno = firstaddr; 932 pbgetbo(bo, bp); 933 bp->b_bcount = size; 934 bp->b_bufsize = size; 935 bp->b_runningbufspace = bp->b_bufsize; 936 atomic_add_long(&runningbufspace, bp->b_runningbufspace); 937 938 PCPU_INC(cnt.v_vnodein); 939 PCPU_ADD(cnt.v_vnodepgsin, count); 940 941 /* do the input */ 942 bp->b_iooffset = dbtob(bp->b_blkno); 943 bstrategy(bp); 944 945 bwait(bp, PVM, "vnread"); 946 947 if ((bp->b_ioflags & BIO_ERROR) != 0) 948 error = EIO; 949 950 if (!error) { 951 if (size != count * PAGE_SIZE) 952 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 953 } 954 pmap_qremove(kva, count); 955 956 /* 957 * free the buffer header back to the swap buffer pool 958 */ 959 pbrelbo(bp); 960 relpbuf(bp, &vnode_pbuf_freecnt); 961 962 VM_OBJECT_LOCK(object); 963 for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 964 vm_page_t mt; 965 966 nextoff = tfoff + PAGE_SIZE; 967 mt = m[i]; 968 969 vm_page_lock(mt); 970 vm_page_lock_queues(); 971 if (nextoff <= object->un_pager.vnp.vnp_size) { 972 /* 973 * Read filled up entire page. 974 */ 975 mt->valid = VM_PAGE_BITS_ALL; 976 KASSERT(mt->dirty == 0, 977 ("vnode_pager_generic_getpages: page %p is dirty", 978 mt)); 979 KASSERT(!pmap_page_is_mapped(mt), 980 ("vnode_pager_generic_getpages: page %p is mapped", 981 mt)); 982 } else { 983 /* 984 * Read did not fill up entire page. 985 * 986 * Currently we do not set the entire page valid, 987 * we just try to clear the piece that we couldn't 988 * read. 989 */ 990 vm_page_set_valid(mt, 0, 991 object->un_pager.vnp.vnp_size - tfoff); 992 KASSERT((mt->dirty & vm_page_bits(0, 993 object->un_pager.vnp.vnp_size - tfoff)) == 0, 994 ("vnode_pager_generic_getpages: page %p is dirty", 995 mt)); 996 } 997 998 if (i != reqpage) { 999 1000 /* 1001 * whether or not to leave the page activated is up in 1002 * the air, but we should put the page on a page queue 1003 * somewhere. (it already is in the object). Result: 1004 * It appears that empirical results show that 1005 * deactivating pages is best. 1006 */ 1007 1008 /* 1009 * just in case someone was asking for this page we 1010 * now tell them that it is ok to use 1011 */ 1012 if (!error) { 1013 if (mt->oflags & VPO_WANTED) 1014 vm_page_activate(mt); 1015 else 1016 vm_page_deactivate(mt); 1017 vm_page_wakeup(mt); 1018 } else { 1019 vm_page_free(mt); 1020 } 1021 } 1022 vm_page_unlock_queues(); 1023 vm_page_unlock(mt); 1024 } 1025 VM_OBJECT_UNLOCK(object); 1026 if (error) { 1027 printf("vnode_pager_getpages: I/O read error\n"); 1028 } 1029 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 1030} 1031 1032/* 1033 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1034 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1035 * vnode_pager_generic_putpages() to implement the previous behaviour. 1036 * 1037 * All other FS's should use the bypass to get to the local media 1038 * backing vp's VOP_PUTPAGES. 1039 */ 1040static void 1041vnode_pager_putpages(object, m, count, sync, rtvals) 1042 vm_object_t object; 1043 vm_page_t *m; 1044 int count; 1045 boolean_t sync; 1046 int *rtvals; 1047{ 1048 int rtval; 1049 struct vnode *vp; 1050 int bytes = count * PAGE_SIZE; 1051 1052 /* 1053 * Force synchronous operation if we are extremely low on memory 1054 * to prevent a low-memory deadlock. VOP operations often need to 1055 * allocate more memory to initiate the I/O ( i.e. do a BMAP 1056 * operation ). The swapper handles the case by limiting the amount 1057 * of asynchronous I/O, but that sort of solution doesn't scale well 1058 * for the vnode pager without a lot of work. 1059 * 1060 * Also, the backing vnode's iodone routine may not wake the pageout 1061 * daemon up. This should be probably be addressed XXX. 1062 */ 1063 1064 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 1065 sync |= OBJPC_SYNC; 1066 1067 /* 1068 * Call device-specific putpages function 1069 */ 1070 vp = object->handle; 1071 VM_OBJECT_UNLOCK(object); 1072 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 1073 KASSERT(rtval != EOPNOTSUPP, 1074 ("vnode_pager: stale FS putpages\n")); 1075 VM_OBJECT_LOCK(object); 1076} 1077 1078 1079/* 1080 * This is now called from local media FS's to operate against their 1081 * own vnodes if they fail to implement VOP_PUTPAGES. 1082 * 1083 * This is typically called indirectly via the pageout daemon and 1084 * clustering has already typically occured, so in general we ask the 1085 * underlying filesystem to write the data out asynchronously rather 1086 * then delayed. 1087 */ 1088int 1089vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 1090 struct vnode *vp; 1091 vm_page_t *m; 1092 int bytecount; 1093 int flags; 1094 int *rtvals; 1095{ 1096 int i; 1097 vm_object_t object; 1098 int count; 1099 1100 int maxsize, ncount; 1101 vm_ooffset_t poffset; 1102 struct uio auio; 1103 struct iovec aiov; 1104 int error; 1105 int ioflags; 1106 int ppscheck = 0; 1107 static struct timeval lastfail; 1108 static int curfail; 1109 1110 object = vp->v_object; 1111 count = bytecount / PAGE_SIZE; 1112 1113 for (i = 0; i < count; i++) 1114 rtvals[i] = VM_PAGER_AGAIN; 1115 1116 if ((int64_t)m[0]->pindex < 0) { 1117 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n", 1118 (long)m[0]->pindex, (u_long)m[0]->dirty); 1119 rtvals[0] = VM_PAGER_BAD; 1120 return VM_PAGER_BAD; 1121 } 1122 1123 maxsize = count * PAGE_SIZE; 1124 ncount = count; 1125 1126 poffset = IDX_TO_OFF(m[0]->pindex); 1127 1128 /* 1129 * If the page-aligned write is larger then the actual file we 1130 * have to invalidate pages occuring beyond the file EOF. However, 1131 * there is an edge case where a file may not be page-aligned where 1132 * the last page is partially invalid. In this case the filesystem 1133 * may not properly clear the dirty bits for the entire page (which 1134 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1135 * With the page locked we are free to fix-up the dirty bits here. 1136 * 1137 * We do not under any circumstances truncate the valid bits, as 1138 * this will screw up bogus page replacement. 1139 */ 1140 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 1141 if (object->un_pager.vnp.vnp_size > poffset) { 1142 int pgoff; 1143 1144 maxsize = object->un_pager.vnp.vnp_size - poffset; 1145 ncount = btoc(maxsize); 1146 if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1147 vm_page_lock(m[ncount - 1]); 1148 vm_page_lock_queues(); 1149 vm_page_clear_dirty(m[ncount - 1], pgoff, 1150 PAGE_SIZE - pgoff); 1151 vm_page_unlock_queues(); 1152 vm_page_unlock(m[ncount - 1]); 1153 } 1154 } else { 1155 maxsize = 0; 1156 ncount = 0; 1157 } 1158 if (ncount < count) { 1159 for (i = ncount; i < count; i++) { 1160 rtvals[i] = VM_PAGER_BAD; 1161 } 1162 } 1163 } 1164 1165 /* 1166 * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 1167 * rather then a bdwrite() to prevent paging I/O from saturating 1168 * the buffer cache. Dummy-up the sequential heuristic to cause 1169 * large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set, 1170 * the system decides how to cluster. 1171 */ 1172 ioflags = IO_VMIO; 1173 if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) 1174 ioflags |= IO_SYNC; 1175 else if ((flags & VM_PAGER_CLUSTER_OK) == 0) 1176 ioflags |= IO_ASYNC; 1177 ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 1178 ioflags |= IO_SEQMAX << IO_SEQSHIFT; 1179 1180 aiov.iov_base = (caddr_t) 0; 1181 aiov.iov_len = maxsize; 1182 auio.uio_iov = &aiov; 1183 auio.uio_iovcnt = 1; 1184 auio.uio_offset = poffset; 1185 auio.uio_segflg = UIO_NOCOPY; 1186 auio.uio_rw = UIO_WRITE; 1187 auio.uio_resid = maxsize; 1188 auio.uio_td = (struct thread *) 0; 1189 error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); 1190 PCPU_INC(cnt.v_vnodeout); 1191 PCPU_ADD(cnt.v_vnodepgsout, ncount); 1192 1193 if (error) { 1194 if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1))) 1195 printf("vnode_pager_putpages: I/O error %d\n", error); 1196 } 1197 if (auio.uio_resid) { 1198 if (ppscheck || ppsratecheck(&lastfail, &curfail, 1)) 1199 printf("vnode_pager_putpages: residual I/O %zd at %lu\n", 1200 auio.uio_resid, (u_long)m[0]->pindex); 1201 } 1202 for (i = 0; i < ncount; i++) { 1203 rtvals[i] = VM_PAGER_OK; 1204 } 1205 return rtvals[0]; 1206} 1207