vnode_pager.c revision 191478
1139825Simp/*- 21541Srgrimes * Copyright (c) 1990 University of Utah. 31549Srgrimes * Copyright (c) 1991 The Regents of the University of California. 41549Srgrimes * All rights reserved. 59507Sdg * Copyright (c) 1993, 1994 John S. Dyson 69507Sdg * Copyright (c) 1995, David Greenman 71541Srgrimes * 81541Srgrimes * This code is derived from software contributed to Berkeley by 91541Srgrimes * the Systems Programming Group of the University of Utah Computer 101541Srgrimes * Science Department. 111541Srgrimes * 121541Srgrimes * Redistribution and use in source and binary forms, with or without 131541Srgrimes * modification, are permitted provided that the following conditions 141541Srgrimes * are met: 151541Srgrimes * 1. Redistributions of source code must retain the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer. 171541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 181541Srgrimes * notice, this list of conditions and the following disclaimer in the 191541Srgrimes * documentation and/or other materials provided with the distribution. 201541Srgrimes * 3. All advertising materials mentioning features or use of this software 2158705Scharnier * must display the following acknowledgement: 221541Srgrimes * This product includes software developed by the University of 231541Srgrimes * California, Berkeley and its contributors. 241541Srgrimes * 4. Neither the name of the University nor the names of its contributors 251541Srgrimes * may be used to endorse or promote products derived from this software 261541Srgrimes * without specific prior written permission. 271541Srgrimes * 281541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 291541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 301541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 311541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 321541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 331541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 341541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 351541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 361541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 371541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 381541Srgrimes * SUCH DAMAGE. 391541Srgrimes * 401549Srgrimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 411541Srgrimes */ 421541Srgrimes 431541Srgrimes/* 441541Srgrimes * Page to/from files (vnodes). 451541Srgrimes */ 461541Srgrimes 471549Srgrimes/* 481549Srgrimes * TODO: 499507Sdg * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 507695Sdg * greatly re-simplify the vnode_pager. 511549Srgrimes */ 521549Srgrimes 53116226Sobrien#include <sys/cdefs.h> 54116226Sobrien__FBSDID("$FreeBSD: head/sys/vm/vnode_pager.c 191478 2009-04-25 02:59:06Z alc $"); 55116226Sobrien 561541Srgrimes#include <sys/param.h> 571541Srgrimes#include <sys/systm.h> 581541Srgrimes#include <sys/proc.h> 591541Srgrimes#include <sys/vnode.h> 601541Srgrimes#include <sys/mount.h> 6160041Sphk#include <sys/bio.h> 629507Sdg#include <sys/buf.h> 6312662Sdg#include <sys/vmmeter.h> 64140767Sphk#include <sys/limits.h> 6551340Sdillon#include <sys/conf.h> 66127926Salc#include <sys/sf_buf.h> 671541Srgrimes 68148875Sssouhlal#include <machine/atomic.h> 69148875Sssouhlal 701541Srgrimes#include <vm/vm.h> 7112662Sdg#include <vm/vm_object.h> 721541Srgrimes#include <vm/vm_page.h> 739507Sdg#include <vm/vm_pager.h> 7431853Sdyson#include <vm/vm_map.h> 751541Srgrimes#include <vm/vnode_pager.h> 7612662Sdg#include <vm/vm_extern.h> 771541Srgrimes 78163359Salcstatic int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 79163359Salc daddr_t *rtaddress, int *run); 8092727Salfredstatic int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 8192727Salfredstatic int vnode_pager_input_old(vm_object_t object, vm_page_t m); 8292727Salfredstatic void vnode_pager_dealloc(vm_object_t); 8392727Salfredstatic int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int); 8492727Salfredstatic void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 8592727Salfredstatic boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 86140767Sphkstatic vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t); 8711943Sbde 881541Srgrimesstruct pagerops vnodepagerops = { 89118466Sphk .pgo_alloc = vnode_pager_alloc, 90118466Sphk .pgo_dealloc = vnode_pager_dealloc, 91118466Sphk .pgo_getpages = vnode_pager_getpages, 92118466Sphk .pgo_putpages = vnode_pager_putpages, 93118466Sphk .pgo_haspage = vnode_pager_haspage, 941541Srgrimes}; 951541Srgrimes 9679127Sjhbint vnode_pbuf_freecnt; 9710556Sdyson 98140767Sphk/* Create the VM system backing object for this vnode */ 99140767Sphkint 100155177Syarvnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 101140767Sphk{ 102140767Sphk vm_object_t object; 103140767Sphk vm_ooffset_t size = isize; 104140767Sphk struct vattr va; 105140767Sphk 106140767Sphk if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 107140767Sphk return (0); 108140767Sphk 109140767Sphk while ((object = vp->v_object) != NULL) { 110140767Sphk VM_OBJECT_LOCK(object); 111140767Sphk if (!(object->flags & OBJ_DEAD)) { 112140767Sphk VM_OBJECT_UNLOCK(object); 113140767Sphk return (0); 114140767Sphk } 115175294Sattilio VOP_UNLOCK(vp, 0); 116140767Sphk vm_object_set_flag(object, OBJ_DISCONNECTWNT); 117140767Sphk msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); 118175202Sattilio vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 119140767Sphk } 120140767Sphk 121140767Sphk if (size == 0) { 122140767Sphk if (vn_isdisk(vp, NULL)) { 123140767Sphk size = IDX_TO_OFF(INT_MAX); 124140767Sphk } else { 125182371Sattilio if (VOP_GETATTR(vp, &va, td->td_ucred)) 126140767Sphk return (0); 127140767Sphk size = va.va_size; 128140767Sphk } 129140767Sphk } 130140767Sphk 131140767Sphk object = vnode_pager_alloc(vp, size, 0, 0); 132140767Sphk /* 133140767Sphk * Dereference the reference we just created. This assumes 134140767Sphk * that the object is associated with the vp. 135140767Sphk */ 136140767Sphk VM_OBJECT_LOCK(object); 137140767Sphk object->ref_count--; 138140767Sphk VM_OBJECT_UNLOCK(object); 139140767Sphk vrele(vp); 140140767Sphk 141140767Sphk KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 142140767Sphk 143140767Sphk return (0); 144140767Sphk} 145140767Sphk 146140929Sphkvoid 147140929Sphkvnode_destroy_vobject(struct vnode *vp) 148140929Sphk{ 149140929Sphk struct vm_object *obj; 150140929Sphk 151140929Sphk obj = vp->v_object; 152140929Sphk if (obj == NULL) 153140929Sphk return; 154171599Spjd ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 155140929Sphk VM_OBJECT_LOCK(obj); 156140929Sphk if (obj->ref_count == 0) { 157140929Sphk /* 158140929Sphk * vclean() may be called twice. The first time 159140929Sphk * removes the primary reference to the object, 160140929Sphk * the second time goes one further and is a 161140929Sphk * special-case to terminate the object. 162140929Sphk * 163140929Sphk * don't double-terminate the object 164140929Sphk */ 165140929Sphk if ((obj->flags & OBJ_DEAD) == 0) 166140929Sphk vm_object_terminate(obj); 167140929Sphk else 168140929Sphk VM_OBJECT_UNLOCK(obj); 169140929Sphk } else { 170140929Sphk /* 171140929Sphk * Woe to the process that tries to page now :-). 172140929Sphk */ 173140929Sphk vm_pager_deallocate(obj); 174140929Sphk VM_OBJECT_UNLOCK(obj); 175140929Sphk } 176144610Sjeff vp->v_object = NULL; 177140929Sphk} 178140929Sphk 179140929Sphk 1801541Srgrimes/* 1811541Srgrimes * Allocate (or lookup) pager for a vnode. 1821541Srgrimes * Handle is a vnode pointer. 18398604Salc * 18498604Salc * MPSAFE 1851541Srgrimes */ 1869507Sdgvm_object_t 18740286Sdgvnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 18828751Sbde vm_ooffset_t offset) 1891541Srgrimes{ 1909456Sdg vm_object_t object; 1911541Srgrimes struct vnode *vp; 1921541Srgrimes 1931541Srgrimes /* 1941541Srgrimes * Pageout to vnode, no can do yet. 1951541Srgrimes */ 1961541Srgrimes if (handle == NULL) 1971827Sdg return (NULL); 1981541Srgrimes 1999411Sdg vp = (struct vnode *) handle; 2009411Sdg 2011541Srgrimes /* 2029411Sdg * If the object is being terminated, wait for it to 2039411Sdg * go away. 2049411Sdg */ 205179159Supsretry: 206114074Salc while ((object = vp->v_object) != NULL) { 207114074Salc VM_OBJECT_LOCK(object); 208181020Sjhb if ((object->flags & OBJ_DEAD) == 0) 209114074Salc break; 210137297Salc vm_object_set_flag(object, OBJ_DISCONNECTWNT); 211114074Salc msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); 2129507Sdg } 2135455Sdg 21432071Sdyson if (vp->v_usecount == 0) 21532071Sdyson panic("vnode_pager_alloc: no vnode reference"); 21632071Sdyson 2179507Sdg if (object == NULL) { 2181541Srgrimes /* 219179159Sups * Add an object of the appropriate size 2201541Srgrimes */ 22140286Sdg object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 2221827Sdg 22340286Sdg object->un_pager.vnp.vnp_size = size; 2241549Srgrimes 2259507Sdg object->handle = handle; 226179765Sups VI_LOCK(vp); 227179765Sups if (vp->v_object != NULL) { 228179159Sups /* 229179159Sups * Object has been created while we were sleeping 230179159Sups */ 231179765Sups VI_UNLOCK(vp); 232179159Sups vm_object_destroy(object); 233179159Sups goto retry; 234179159Sups } 2359507Sdg vp->v_object = object; 236179765Sups VI_UNLOCK(vp); 237179765Sups } else { 23832286Sdyson object->ref_count++; 239179765Sups VM_OBJECT_UNLOCK(object); 240179765Sups } 241143559Sjeff vref(vp); 2429507Sdg return (object); 2431541Srgrimes} 2441541Srgrimes 245114774Salc/* 246114774Salc * The object must be locked. 247114774Salc */ 24812820Sphkstatic void 2499507Sdgvnode_pager_dealloc(object) 2509507Sdg vm_object_t object; 2511541Srgrimes{ 25279242Sdillon struct vnode *vp = object->handle; 2531541Srgrimes 2549507Sdg if (vp == NULL) 2559507Sdg panic("vnode_pager_dealloc: pager already dealloced"); 2569507Sdg 257114774Salc VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 25833817Sdyson vm_object_pip_wait(object, "vnpdea"); 2591541Srgrimes 2609507Sdg object->handle = NULL; 26133109Sdyson object->type = OBJT_DEAD; 262137297Salc if (object->flags & OBJ_DISCONNECTWNT) { 263137297Salc vm_object_clear_flag(object, OBJ_DISCONNECTWNT); 264137297Salc wakeup(object); 265137297Salc } 266171599Spjd ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 2679507Sdg vp->v_object = NULL; 268140734Sphk vp->v_vflag &= ~VV_TEXT; 2691549Srgrimes} 2701541Srgrimes 27112820Sphkstatic boolean_t 27212767Sdysonvnode_pager_haspage(object, pindex, before, after) 2739507Sdg vm_object_t object; 27412767Sdyson vm_pindex_t pindex; 2759507Sdg int *before; 2769507Sdg int *after; 2771541Srgrimes{ 2789507Sdg struct vnode *vp = object->handle; 27996572Sphk daddr_t bn; 28012423Sphk int err; 28110556Sdyson daddr_t reqblock; 28211701Sdyson int poff; 28311701Sdyson int bsize; 28412914Sdyson int pagesperblock, blocksperpage; 285140723Sjeff int vfslocked; 2861541Srgrimes 287116695Salc VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 28851340Sdillon /* 28951340Sdillon * If no vp or vp is doomed or marked transparent to VM, we do not 29051340Sdillon * have the page. 29151340Sdillon */ 292155384Sjeff if (vp == NULL || vp->v_iflag & VI_DOOMED) 29332585Sdyson return FALSE; 2941541Srgrimes /* 295155384Sjeff * If the offset is beyond end of file we do 2965455Sdg * not have the page. 2971541Srgrimes */ 298155384Sjeff if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 2994797Sdg return FALSE; 3001541Srgrimes 30111576Sdg bsize = vp->v_mount->mnt_stat.f_iosize; 30210556Sdyson pagesperblock = bsize / PAGE_SIZE; 30312914Sdyson blocksperpage = 0; 30412914Sdyson if (pagesperblock > 0) { 30512914Sdyson reqblock = pindex / pagesperblock; 30612914Sdyson } else { 30712914Sdyson blocksperpage = (PAGE_SIZE / bsize); 30812914Sdyson reqblock = pindex * blocksperpage; 30912914Sdyson } 310116695Salc VM_OBJECT_UNLOCK(object); 311140723Sjeff vfslocked = VFS_LOCK_GIANT(vp->v_mount); 312119045Sphk err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 313140723Sjeff VFS_UNLOCK_GIANT(vfslocked); 314116695Salc VM_OBJECT_LOCK(object); 3158876Srgrimes if (err) 3169507Sdg return TRUE; 31792029Seivind if (bn == -1) 31810576Sdyson return FALSE; 31912914Sdyson if (pagesperblock > 0) { 32012914Sdyson poff = pindex - (reqblock * pagesperblock); 32112914Sdyson if (before) { 32212914Sdyson *before *= pagesperblock; 32312914Sdyson *before += poff; 32410669Sdyson } 32512914Sdyson if (after) { 32612914Sdyson int numafter; 32712914Sdyson *after *= pagesperblock; 32812914Sdyson numafter = pagesperblock - (poff + 1); 32999211Srobert if (IDX_TO_OFF(pindex + numafter) > 33099211Srobert object->un_pager.vnp.vnp_size) { 33199211Srobert numafter = 33299211Srobert OFF_TO_IDX(object->un_pager.vnp.vnp_size) - 33399211Srobert pindex; 33412914Sdyson } 33512914Sdyson *after += numafter; 33612914Sdyson } 33712914Sdyson } else { 33812914Sdyson if (before) { 33912914Sdyson *before /= blocksperpage; 34012914Sdyson } 34112914Sdyson 34212914Sdyson if (after) { 34312914Sdyson *after /= blocksperpage; 34412914Sdyson } 34510556Sdyson } 34610576Sdyson return TRUE; 3471541Srgrimes} 3481541Srgrimes 3491541Srgrimes/* 3501541Srgrimes * Lets the VM system know about a change in size for a file. 3519507Sdg * We adjust our own internal size and flush any cached pages in 3521541Srgrimes * the associated object that are affected by the size change. 3531541Srgrimes * 3541541Srgrimes * Note: this routine may be invoked as a result of a pager put 3551541Srgrimes * operation (possibly at object termination time), so we must be careful. 3561541Srgrimes */ 3571541Srgrimesvoid 3581541Srgrimesvnode_pager_setsize(vp, nsize) 3591541Srgrimes struct vnode *vp; 36012767Sdyson vm_ooffset_t nsize; 3611541Srgrimes{ 362116167Salc vm_object_t object; 363116167Salc vm_page_t m; 36438542Sluoqi vm_pindex_t nobjsize; 3651541Srgrimes 366116167Salc if ((object = vp->v_object) == NULL) 3671541Srgrimes return; 368188386Skib/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ 369116167Salc VM_OBJECT_LOCK(object); 370116167Salc if (nsize == object->un_pager.vnp.vnp_size) { 371116167Salc /* 372116167Salc * Hasn't changed size 373116167Salc */ 374116167Salc VM_OBJECT_UNLOCK(object); 3753374Sdg return; 376116167Salc } 37738542Sluoqi nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 3789507Sdg if (nsize < object->un_pager.vnp.vnp_size) { 379116167Salc /* 380116167Salc * File has shrunk. Toss any cached pages beyond the new EOF. 381116167Salc */ 382116167Salc if (nobjsize < object->size) 38338542Sluoqi vm_object_page_remove(object, nobjsize, object->size, 384116167Salc FALSE); 3851827Sdg /* 3861827Sdg * this gets rid of garbage at the end of a page that is now 38787834Sdillon * only partially backed by the vnode. 38887834Sdillon * 38987834Sdillon * XXX for some reason (I don't know yet), if we take a 39087834Sdillon * completely invalid page and mark it partially valid 39187834Sdillon * it can screw up NFS reads, so we don't allow the case. 3921827Sdg */ 393116167Salc if ((nsize & PAGE_MASK) && 394121230Salc (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL && 395121230Salc m->valid != 0) { 396121230Salc int base = (int)nsize & PAGE_MASK; 397121230Salc int size = PAGE_SIZE - base; 39870374Sdillon 399121230Salc /* 400121230Salc * Clear out partial-page garbage in case 401121230Salc * the page has been mapped. 402121230Salc */ 403121230Salc pmap_zero_page_area(m, base, size); 40470374Sdillon 405121230Salc /* 406121230Salc * Clear out partial-page dirty bits. This 407121230Salc * has the side effect of setting the valid 408121230Salc * bits, but that is ok. There are a bunch 409121230Salc * of places in the VM system where we expected 410121230Salc * m->dirty == VM_PAGE_BITS_ALL. The file EOF 411121230Salc * case is one of them. If the page is still 412121230Salc * partially dirty, make it fully dirty. 413121230Salc * 414121230Salc * note that we do not clear out the valid 415121230Salc * bits. This would prevent bogus_page 416121230Salc * replacement from working properly. 417121230Salc */ 418173846Salc vm_page_lock_queues(); 419121230Salc vm_page_set_validclean(m, base, size); 420121230Salc if (m->dirty != 0) 421121230Salc m->dirty = VM_PAGE_BITS_ALL; 422116167Salc vm_page_unlock_queues(); 423172875Salc } else if ((nsize & PAGE_MASK) && 424172875Salc __predict_false(object->cache != NULL)) { 425172875Salc vm_page_cache_free(object, OFF_TO_IDX(nsize), 426172875Salc nobjsize); 4271827Sdg } 4281541Srgrimes } 42912767Sdyson object->un_pager.vnp.vnp_size = nsize; 43038542Sluoqi object->size = nobjsize; 431116167Salc VM_OBJECT_UNLOCK(object); 4321541Srgrimes} 4331541Srgrimes 4341549Srgrimes/* 4351549Srgrimes * calculate the linear (byte) disk address of specified virtual 4361549Srgrimes * file address 4371549Srgrimes */ 438163359Salcstatic int 439163359Salcvnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 440163359Salc int *run) 4411549Srgrimes{ 4425455Sdg int bsize; 4435455Sdg int err; 44412767Sdyson daddr_t vblock; 445146340Sbz daddr_t voffset; 4461549Srgrimes 447138531Salc if (address < 0) 4485455Sdg return -1; 4495455Sdg 450155384Sjeff if (vp->v_iflag & VI_DOOMED) 45111701Sdyson return -1; 45211701Sdyson 4531549Srgrimes bsize = vp->v_mount->mnt_stat.f_iosize; 4541549Srgrimes vblock = address / bsize; 4551549Srgrimes voffset = address % bsize; 4561549Srgrimes 457163359Salc err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 458163359Salc if (err == 0) { 459163359Salc if (*rtaddress != -1) 460163359Salc *rtaddress += voffset / DEV_BSIZE; 46192029Seivind if (run) { 4626151Sdg *run += 1; 4636151Sdg *run *= bsize/PAGE_SIZE; 4646151Sdg *run -= voffset/PAGE_SIZE; 4656151Sdg } 4666151Sdg } 4671549Srgrimes 468163359Salc return (err); 4691549Srgrimes} 4701549Srgrimes 4711549Srgrimes/* 47296755Strhodes * small block filesystem vnode pager input 4731549Srgrimes */ 47412820Sphkstatic int 4759507Sdgvnode_pager_input_smlfs(object, m) 4769507Sdg vm_object_t object; 4771549Srgrimes vm_page_t m; 4781549Srgrimes{ 4795455Sdg int i; 480137726Sphk struct vnode *vp; 481137726Sphk struct bufobj *bo; 4821549Srgrimes struct buf *bp; 483127926Salc struct sf_buf *sf; 484146340Sbz daddr_t fileaddr; 4851549Srgrimes vm_offset_t bsize; 4865455Sdg int error = 0; 4871549Srgrimes 4889507Sdg vp = object->handle; 489155384Sjeff if (vp->v_iflag & VI_DOOMED) 49011701Sdyson return VM_PAGER_BAD; 49111701Sdyson 4921549Srgrimes bsize = vp->v_mount->mnt_stat.f_iosize; 4931549Srgrimes 494137726Sphk VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 4951549Srgrimes 496127926Salc sf = sf_buf_alloc(m, 0); 4971549Srgrimes 4981827Sdg for (i = 0; i < PAGE_SIZE / bsize; i++) { 49986092Sdillon vm_ooffset_t address; 5001827Sdg 50145561Sdt if (vm_page_bits(i * bsize, bsize) & m->valid) 5025455Sdg continue; 5031549Srgrimes 50486092Sdillon address = IDX_TO_OFF(m->pindex) + i * bsize; 50586092Sdillon if (address >= object->un_pager.vnp.vnp_size) { 50686092Sdillon fileaddr = -1; 50786092Sdillon } else { 508163359Salc error = vnode_pager_addr(vp, address, &fileaddr, NULL); 509163359Salc if (error) 510163359Salc break; 51186092Sdillon } 5121827Sdg if (fileaddr != -1) { 51342957Sdillon bp = getpbuf(&vnode_pbuf_freecnt); 5141549Srgrimes 5151827Sdg /* build a minimal buffer header */ 51658345Sphk bp->b_iocmd = BIO_READ; 517119092Sphk bp->b_iodone = bdone; 51884827Sjhb KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 51984827Sjhb KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 52091406Sjhb bp->b_rcred = crhold(curthread->td_ucred); 52191406Sjhb bp->b_wcred = crhold(curthread->td_ucred); 522127926Salc bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 5236626Sdg bp->b_blkno = fileaddr; 524137726Sphk pbgetbo(bo, bp); 5251549Srgrimes bp->b_bcount = bsize; 5261549Srgrimes bp->b_bufsize = bsize; 52770374Sdillon bp->b_runningbufspace = bp->b_bufsize; 528189595Sjhb atomic_add_long(&runningbufspace, bp->b_runningbufspace); 5291827Sdg 5301827Sdg /* do the input */ 531121205Sphk bp->b_iooffset = dbtob(bp->b_blkno); 532136927Sphk bstrategy(bp); 5331549Srgrimes 534119092Sphk bwait(bp, PVM, "vnsrd"); 535119092Sphk 53658934Sphk if ((bp->b_ioflags & BIO_ERROR) != 0) 5371549Srgrimes error = EIO; 5381549Srgrimes 5391827Sdg /* 5401827Sdg * free the buffer header back to the swap buffer pool 5411827Sdg */ 542137726Sphk pbrelbo(bp); 54342957Sdillon relpbuf(bp, &vnode_pbuf_freecnt); 5441827Sdg if (error) 5451549Srgrimes break; 5465455Sdg 547121264Salc VM_OBJECT_LOCK(object); 548107189Salc vm_page_lock_queues(); 54915583Sphk vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 550107189Salc vm_page_unlock_queues(); 551121264Salc VM_OBJECT_UNLOCK(object); 5521549Srgrimes } else { 553121264Salc VM_OBJECT_LOCK(object); 554107189Salc vm_page_lock_queues(); 55515583Sphk vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 556107189Salc vm_page_unlock_queues(); 557121264Salc VM_OBJECT_UNLOCK(object); 558127926Salc bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 5591549Srgrimes } 5601549Srgrimes } 561127926Salc sf_buf_free(sf); 562107347Salc vm_page_lock_queues(); 56360755Speter pmap_clear_modify(m); 564107347Salc vm_page_unlock_queues(); 5651827Sdg if (error) { 5664207Sdg return VM_PAGER_ERROR; 5671549Srgrimes } 5681549Srgrimes return VM_PAGER_OK; 5691549Srgrimes 5701549Srgrimes} 5711549Srgrimes 5721549Srgrimes 5731549Srgrimes/* 574139296Sphk * old style vnode pager input routine 5751549Srgrimes */ 57612820Sphkstatic int 5779507Sdgvnode_pager_input_old(object, m) 5789507Sdg vm_object_t object; 5791549Srgrimes vm_page_t m; 5801549Srgrimes{ 5811541Srgrimes struct uio auio; 5821541Srgrimes struct iovec aiov; 5835455Sdg int error; 5845455Sdg int size; 585127926Salc struct sf_buf *sf; 58677398Sjhb struct vnode *vp; 5871549Srgrimes 588121495Salc VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 5891549Srgrimes error = 0; 5901827Sdg 5911549Srgrimes /* 5921549Srgrimes * Return failure if beyond current EOF 5931549Srgrimes */ 59412767Sdyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 5951549Srgrimes return VM_PAGER_BAD; 5961549Srgrimes } else { 5971549Srgrimes size = PAGE_SIZE; 59812767Sdyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 59912767Sdyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 600121495Salc vp = object->handle; 601121495Salc VM_OBJECT_UNLOCK(object); 6027178Sdg 6035455Sdg /* 6045455Sdg * Allocate a kernel virtual address and initialize so that 6055455Sdg * we can use VOP_READ/WRITE routines. 6065455Sdg */ 607127926Salc sf = sf_buf_alloc(m, 0); 6087178Sdg 609127926Salc aiov.iov_base = (caddr_t)sf_buf_kva(sf); 6101549Srgrimes aiov.iov_len = size; 6111549Srgrimes auio.uio_iov = &aiov; 6121549Srgrimes auio.uio_iovcnt = 1; 61312767Sdyson auio.uio_offset = IDX_TO_OFF(m->pindex); 6141549Srgrimes auio.uio_segflg = UIO_SYSSPACE; 6151549Srgrimes auio.uio_rw = UIO_READ; 6161549Srgrimes auio.uio_resid = size; 61783366Sjulian auio.uio_td = curthread; 6181549Srgrimes 61991406Sjhb error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 6201549Srgrimes if (!error) { 62179242Sdillon int count = size - auio.uio_resid; 6221549Srgrimes 6231549Srgrimes if (count == 0) 6241549Srgrimes error = EINVAL; 6251549Srgrimes else if (count != PAGE_SIZE) 626127926Salc bzero((caddr_t)sf_buf_kva(sf) + count, 627127926Salc PAGE_SIZE - count); 6281549Srgrimes } 629127926Salc sf_buf_free(sf); 630121230Salc 631121230Salc VM_OBJECT_LOCK(object); 6321549Srgrimes } 633107347Salc vm_page_lock_queues(); 63460755Speter pmap_clear_modify(m); 63549945Salc vm_page_undirty(m); 636121230Salc vm_page_unlock_queues(); 63739739Srvb if (!error) 63839739Srvb m->valid = VM_PAGE_BITS_ALL; 6394207Sdg return error ? VM_PAGER_ERROR : VM_PAGER_OK; 6401549Srgrimes} 6411549Srgrimes 6421549Srgrimes/* 6431549Srgrimes * generic vnode pager input routine 6441549Srgrimes */ 64510556Sdyson 64633847Smsmith/* 64776827Salfred * Local media VFS's that do not implement their own VOP_GETPAGES 64899211Srobert * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 64999211Srobert * to implement the previous behaviour. 65033847Smsmith * 65133847Smsmith * All other FS's should use the bypass to get to the local media 65233847Smsmith * backing vp's VOP_GETPAGES. 65333847Smsmith */ 65412820Sphkstatic int 6559507Sdgvnode_pager_getpages(object, m, count, reqpage) 6569507Sdg vm_object_t object; 6571549Srgrimes vm_page_t *m; 6589507Sdg int count; 6599507Sdg int reqpage; 6601549Srgrimes{ 66110556Sdyson int rtval; 66210556Sdyson struct vnode *vp; 66334403Smsmith int bytes = count * PAGE_SIZE; 664140723Sjeff int vfslocked; 66532286Sdyson 66610556Sdyson vp = object->handle; 667116279Salc VM_OBJECT_UNLOCK(object); 668140723Sjeff vfslocked = VFS_LOCK_GIANT(vp->v_mount); 66934403Smsmith rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 67076827Salfred KASSERT(rtval != EOPNOTSUPP, 67176827Salfred ("vnode_pager: FS getpages not implemented\n")); 672140723Sjeff VFS_UNLOCK_GIANT(vfslocked); 673116279Salc VM_OBJECT_LOCK(object); 67433847Smsmith return rtval; 67510556Sdyson} 67610556Sdyson 67733847Smsmith/* 67833847Smsmith * This is now called from local media FS's to operate against their 67933847Smsmith * own vnodes if they fail to implement VOP_GETPAGES. 68033847Smsmith */ 68133847Smsmithint 68233847Smsmithvnode_pager_generic_getpages(vp, m, bytecount, reqpage) 68333847Smsmith struct vnode *vp; 68410556Sdyson vm_page_t *m; 68533847Smsmith int bytecount; 68610556Sdyson int reqpage; 68710556Sdyson{ 68833847Smsmith vm_object_t object; 68912767Sdyson vm_offset_t kva; 69034206Sdyson off_t foff, tfoff, nextoff; 691146340Sbz int i, j, size, bsize, first; 692163140Salc daddr_t firstaddr, reqblock; 693137726Sphk struct bufobj *bo; 6946151Sdg int runpg; 6956151Sdg int runend; 6967178Sdg struct buf *bp; 69733847Smsmith int count; 698163210Salc int error; 6991549Srgrimes 70033847Smsmith object = vp->v_object; 70133847Smsmith count = bytecount / PAGE_SIZE; 70233847Smsmith 703137726Sphk KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 704137726Sphk ("vnode_pager_generic_getpages does not support devices")); 705155384Sjeff if (vp->v_iflag & VI_DOOMED) 70611701Sdyson return VM_PAGER_BAD; 70711701Sdyson 7081549Srgrimes bsize = vp->v_mount->mnt_stat.f_iosize; 7091549Srgrimes 7101549Srgrimes /* get the UNDERLYING device for the file with VOP_BMAP() */ 7111827Sdg 7121549Srgrimes /* 7131827Sdg * originally, we did not check for an error return value -- assuming 7141827Sdg * an fs always has a bmap entry point -- that assumption is wrong!!! 7151549Srgrimes */ 71612767Sdyson foff = IDX_TO_OFF(m[reqpage]->pindex); 7171827Sdg 7181549Srgrimes /* 7191887Sdg * if we can't bmap, use old VOP code 7201549Srgrimes */ 721163210Salc error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL); 722163210Salc if (error == EOPNOTSUPP) { 723116512Salc VM_OBJECT_LOCK(object); 724100832Salc vm_page_lock_queues(); 725100832Salc for (i = 0; i < count; i++) 726100832Salc if (i != reqpage) 72775692Salfred vm_page_free(m[i]); 728100832Salc vm_page_unlock_queues(); 729170292Sattilio PCPU_INC(cnt.v_vnodein); 730170292Sattilio PCPU_INC(cnt.v_vnodepgsin); 731121495Salc error = vnode_pager_input_old(object, m[reqpage]); 732121495Salc VM_OBJECT_UNLOCK(object); 733121495Salc return (error); 734163210Salc } else if (error != 0) { 735163210Salc VM_OBJECT_LOCK(object); 736163210Salc vm_page_lock_queues(); 737163210Salc for (i = 0; i < count; i++) 738163210Salc if (i != reqpage) 739163210Salc vm_page_free(m[i]); 740163210Salc vm_page_unlock_queues(); 741163210Salc VM_OBJECT_UNLOCK(object); 742163210Salc return (VM_PAGER_ERROR); 7431549Srgrimes 7441827Sdg /* 7451827Sdg * if the blocksize is smaller than a page size, then use 7461827Sdg * special small filesystem code. NFS sometimes has a small 7471827Sdg * blocksize, but it can handle large reads itself. 7481827Sdg */ 7491827Sdg } else if ((PAGE_SIZE / bsize) > 1 && 75038866Sbde (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 751116512Salc VM_OBJECT_LOCK(object); 752100832Salc vm_page_lock_queues(); 753100832Salc for (i = 0; i < count; i++) 754100832Salc if (i != reqpage) 75575692Salfred vm_page_free(m[i]); 756100832Salc vm_page_unlock_queues(); 757116512Salc VM_OBJECT_UNLOCK(object); 758170292Sattilio PCPU_INC(cnt.v_vnodein); 759170292Sattilio PCPU_INC(cnt.v_vnodepgsin); 7609507Sdg return vnode_pager_input_smlfs(object, m[reqpage]); 7611549Srgrimes } 76245347Sjulian 7631549Srgrimes /* 76445347Sjulian * If we have a completely valid page available to us, we can 76545347Sjulian * clean up and return. Otherwise we have to re-read the 76645347Sjulian * media. 7671549Srgrimes */ 768121227Salc VM_OBJECT_LOCK(object); 76945347Sjulian if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 770100832Salc vm_page_lock_queues(); 771100832Salc for (i = 0; i < count; i++) 7725455Sdg if (i != reqpage) 77375692Salfred vm_page_free(m[i]); 774100832Salc vm_page_unlock_queues(); 775116512Salc VM_OBJECT_UNLOCK(object); 7765455Sdg return VM_PAGER_OK; 777163140Salc } else if (reqblock == -1) { 778163140Salc pmap_zero_page(m[reqpage]); 779163140Salc vm_page_undirty(m[reqpage]); 780163140Salc m[reqpage]->valid = VM_PAGE_BITS_ALL; 781163140Salc vm_page_lock_queues(); 782163140Salc for (i = 0; i < count; i++) 783163140Salc if (i != reqpage) 784163140Salc vm_page_free(m[i]); 785163140Salc vm_page_unlock_queues(); 786163140Salc VM_OBJECT_UNLOCK(object); 787163140Salc return (VM_PAGER_OK); 7881549Srgrimes } 78945347Sjulian m[reqpage]->valid = 0; 790121227Salc VM_OBJECT_UNLOCK(object); 7917178Sdg 7925455Sdg /* 7935455Sdg * here on direct device I/O 7945455Sdg */ 79592029Seivind firstaddr = -1; 7961549Srgrimes 7971549Srgrimes /* 7986151Sdg * calculate the run that includes the required page 7991549Srgrimes */ 80092029Seivind for (first = 0, i = 0; i < count; i = runend) { 801163359Salc if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr, 802163359Salc &runpg) != 0) { 803163359Salc VM_OBJECT_LOCK(object); 804163359Salc vm_page_lock_queues(); 805163359Salc for (; i < count; i++) 806163359Salc if (i != reqpage) 807163359Salc vm_page_free(m[i]); 808163359Salc vm_page_unlock_queues(); 809163359Salc VM_OBJECT_UNLOCK(object); 810163359Salc return (VM_PAGER_ERROR); 811163359Salc } 8126151Sdg if (firstaddr == -1) { 813116512Salc VM_OBJECT_LOCK(object); 8149507Sdg if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 815146340Sbz panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx", 816146340Sbz (intmax_t)firstaddr, (uintmax_t)(foff >> 32), 817106603Smux (uintmax_t)foff, 818106603Smux (uintmax_t) 819106603Smux (object->un_pager.vnp.vnp_size >> 32), 820106603Smux (uintmax_t)object->un_pager.vnp.vnp_size); 8216151Sdg } 822100832Salc vm_page_lock_queues(); 82375692Salfred vm_page_free(m[i]); 824100832Salc vm_page_unlock_queues(); 825116512Salc VM_OBJECT_UNLOCK(object); 8266151Sdg runend = i + 1; 8276151Sdg first = runend; 8286151Sdg continue; 8291549Srgrimes } 8306151Sdg runend = i + runpg; 8319507Sdg if (runend <= reqpage) { 832116512Salc VM_OBJECT_LOCK(object); 833100832Salc vm_page_lock_queues(); 834100832Salc for (j = i; j < runend; j++) 83575692Salfred vm_page_free(m[j]); 836100832Salc vm_page_unlock_queues(); 837116512Salc VM_OBJECT_UNLOCK(object); 8381549Srgrimes } else { 8399507Sdg if (runpg < (count - first)) { 840116512Salc VM_OBJECT_LOCK(object); 841100832Salc vm_page_lock_queues(); 8429507Sdg for (i = first + runpg; i < count; i++) 84375692Salfred vm_page_free(m[i]); 844100832Salc vm_page_unlock_queues(); 845116512Salc VM_OBJECT_UNLOCK(object); 8466151Sdg count = first + runpg; 8476151Sdg } 8486151Sdg break; 8491549Srgrimes } 8506151Sdg first = runend; 8511549Srgrimes } 8521549Srgrimes 8531549Srgrimes /* 8541827Sdg * the first and last page have been calculated now, move input pages 8551827Sdg * to be zero based... 8561549Srgrimes */ 8571549Srgrimes if (first != 0) { 858163361Salc m += first; 8591549Srgrimes count -= first; 8601549Srgrimes reqpage -= first; 8611549Srgrimes } 8626151Sdg 8631549Srgrimes /* 8641549Srgrimes * calculate the file virtual address for the transfer 8651549Srgrimes */ 86612767Sdyson foff = IDX_TO_OFF(m[0]->pindex); 8671827Sdg 8681549Srgrimes /* 8691549Srgrimes * calculate the size of the transfer 8701549Srgrimes */ 8711549Srgrimes size = count * PAGE_SIZE; 872134892Sphk KASSERT(count > 0, ("zero count")); 8739507Sdg if ((foff + size) > object->un_pager.vnp.vnp_size) 8749507Sdg size = object->un_pager.vnp.vnp_size - foff; 875134892Sphk KASSERT(size > 0, ("zero size")); 8761549Srgrimes 8771549Srgrimes /* 87851340Sdillon * round up physical size for real devices. 8791549Srgrimes */ 880137726Sphk if (1) { 881137726Sphk int secmask = bo->bo_bsize - 1; 882136977Sphk KASSERT(secmask < PAGE_SIZE && secmask > 0, 883136977Sphk ("vnode_pager_generic_getpages: sector size %d too large", 884136977Sphk secmask + 1)); 88551340Sdillon size = (size + secmask) & ~secmask; 88651340Sdillon } 8871549Srgrimes 88842957Sdillon bp = getpbuf(&vnode_pbuf_freecnt); 8895455Sdg kva = (vm_offset_t) bp->b_data; 8901887Sdg 8911549Srgrimes /* 8921549Srgrimes * and map the pages to be read into the kva 8931549Srgrimes */ 8941887Sdg pmap_qenter(kva, m, count); 8951549Srgrimes 8961549Srgrimes /* build a minimal buffer header */ 89758345Sphk bp->b_iocmd = BIO_READ; 898119092Sphk bp->b_iodone = bdone; 89984827Sjhb KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 90084827Sjhb KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 90191406Sjhb bp->b_rcred = crhold(curthread->td_ucred); 90291406Sjhb bp->b_wcred = crhold(curthread->td_ucred); 9036626Sdg bp->b_blkno = firstaddr; 904137726Sphk pbgetbo(bo, bp); 9051549Srgrimes bp->b_bcount = size; 9061549Srgrimes bp->b_bufsize = size; 90770374Sdillon bp->b_runningbufspace = bp->b_bufsize; 908189595Sjhb atomic_add_long(&runningbufspace, bp->b_runningbufspace); 9091549Srgrimes 910170292Sattilio PCPU_INC(cnt.v_vnodein); 911170292Sattilio PCPU_ADD(cnt.v_vnodepgsin, count); 9123612Sdg 9131549Srgrimes /* do the input */ 914121205Sphk bp->b_iooffset = dbtob(bp->b_blkno); 915136927Sphk bstrategy(bp); 9163612Sdg 917119092Sphk bwait(bp, PVM, "vnread"); 9181549Srgrimes 91958934Sphk if ((bp->b_ioflags & BIO_ERROR) != 0) 9201549Srgrimes error = EIO; 9211549Srgrimes 9221549Srgrimes if (!error) { 9231549Srgrimes if (size != count * PAGE_SIZE) 9241827Sdg bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 9251549Srgrimes } 9265455Sdg pmap_qremove(kva, count); 9271549Srgrimes 9281549Srgrimes /* 9291549Srgrimes * free the buffer header back to the swap buffer pool 9301549Srgrimes */ 931137726Sphk pbrelbo(bp); 93242957Sdillon relpbuf(bp, &vnode_pbuf_freecnt); 9331549Srgrimes 934116512Salc VM_OBJECT_LOCK(object); 935100736Salc vm_page_lock_queues(); 93634206Sdyson for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 93734206Sdyson vm_page_t mt; 93834206Sdyson 93934206Sdyson nextoff = tfoff + PAGE_SIZE; 94034206Sdyson mt = m[i]; 94134206Sdyson 94247239Sdt if (nextoff <= object->un_pager.vnp.vnp_size) { 94345347Sjulian /* 94445347Sjulian * Read filled up entire page. 94545347Sjulian */ 94634206Sdyson mt->valid = VM_PAGE_BITS_ALL; 947191478Salc KASSERT(mt->dirty == 0, 948191478Salc ("vnode_pager_generic_getpages: page %p is dirty", 949191478Salc mt)); 950191478Salc KASSERT(!pmap_page_is_mapped(mt), 951191478Salc ("vnode_pager_generic_getpages: page %p is mapped", 952191478Salc mt)); 95334206Sdyson } else { 95445347Sjulian /* 95545347Sjulian * Read did not fill up entire page. Since this 95645347Sjulian * is getpages, the page may be mapped, so we have 95745347Sjulian * to zero the invalid portions of the page even 95845347Sjulian * though we aren't setting them valid. 95945347Sjulian * 96045347Sjulian * Currently we do not set the entire page valid, 96145347Sjulian * we just try to clear the piece that we couldn't 96245347Sjulian * read. 96345347Sjulian */ 96447239Sdt vm_page_set_validclean(mt, 0, 96547239Sdt object->un_pager.vnp.vnp_size - tfoff); 96646349Salc /* handled by vm_fault now */ 96746349Salc /* vm_page_zero_invalid(mt, FALSE); */ 96834206Sdyson } 96934206Sdyson 9701549Srgrimes if (i != reqpage) { 9711827Sdg 9721549Srgrimes /* 9731827Sdg * whether or not to leave the page activated is up in 9741827Sdg * the air, but we should put the page on a page queue 9751827Sdg * somewhere. (it already is in the object). Result: 97658634Scharnier * It appears that empirical results show that 9771827Sdg * deactivating pages is best. 9781549Srgrimes */ 9791827Sdg 9801549Srgrimes /* 9811827Sdg * just in case someone was asking for this page we 9821827Sdg * now tell them that it is ok to use 9831549Srgrimes */ 9841549Srgrimes if (!error) { 985161125Salc if (mt->oflags & VPO_WANTED) 98634206Sdyson vm_page_activate(mt); 98733109Sdyson else 98834206Sdyson vm_page_deactivate(mt); 98938799Sdfr vm_page_wakeup(mt); 9901549Srgrimes } else { 99175692Salfred vm_page_free(mt); 9921549Srgrimes } 9931549Srgrimes } 9941549Srgrimes } 995100736Salc vm_page_unlock_queues(); 996116512Salc VM_OBJECT_UNLOCK(object); 9971549Srgrimes if (error) { 9989507Sdg printf("vnode_pager_getpages: I/O read error\n"); 9991549Srgrimes } 10004207Sdg return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 10011549Srgrimes} 10021549Srgrimes 100333847Smsmith/* 100433847Smsmith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 100533847Smsmith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 100633847Smsmith * vnode_pager_generic_putpages() to implement the previous behaviour. 100733847Smsmith * 100833847Smsmith * All other FS's should use the bypass to get to the local media 100933847Smsmith * backing vp's VOP_PUTPAGES. 101033847Smsmith */ 101143129Sdillonstatic void 101210556Sdysonvnode_pager_putpages(object, m, count, sync, rtvals) 101310556Sdyson vm_object_t object; 101410556Sdyson vm_page_t *m; 101510556Sdyson int count; 101610556Sdyson boolean_t sync; 101710556Sdyson int *rtvals; 101810556Sdyson{ 101910556Sdyson int rtval; 102010556Sdyson struct vnode *vp; 102162976Smckusick struct mount *mp; 102234403Smsmith int bytes = count * PAGE_SIZE; 102318973Sdyson 102444321Salc /* 102544321Salc * Force synchronous operation if we are extremely low on memory 102644321Salc * to prevent a low-memory deadlock. VOP operations often need to 102744321Salc * allocate more memory to initiate the I/O ( i.e. do a BMAP 102844321Salc * operation ). The swapper handles the case by limiting the amount 102944321Salc * of asynchronous I/O, but that sort of solution doesn't scale well 103044321Salc * for the vnode pager without a lot of work. 103144321Salc * 103244321Salc * Also, the backing vnode's iodone routine may not wake the pageout 103344321Salc * daemon up. This should be probably be addressed XXX. 103444321Salc */ 103544321Salc 1036170170Sattilio if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 103744321Salc sync |= OBJPC_SYNC; 103844321Salc 103944321Salc /* 104044321Salc * Call device-specific putpages function 104144321Salc */ 104210556Sdyson vp = object->handle; 1043121455Salc VM_OBJECT_UNLOCK(object); 104462976Smckusick if (vp->v_type != VREG) 104562976Smckusick mp = NULL; 104634403Smsmith rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 104776827Salfred KASSERT(rtval != EOPNOTSUPP, 104876827Salfred ("vnode_pager: stale FS putpages\n")); 1049121455Salc VM_OBJECT_LOCK(object); 105010556Sdyson} 105110556Sdyson 105233847Smsmith 10531549Srgrimes/* 105433847Smsmith * This is now called from local media FS's to operate against their 105545057Seivind * own vnodes if they fail to implement VOP_PUTPAGES. 105670374Sdillon * 105770374Sdillon * This is typically called indirectly via the pageout daemon and 105870374Sdillon * clustering has already typically occured, so in general we ask the 105970374Sdillon * underlying filesystem to write the data out asynchronously rather 106070374Sdillon * then delayed. 10611549Srgrimes */ 106233847Smsmithint 106334206Sdysonvnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 106433847Smsmith struct vnode *vp; 10651549Srgrimes vm_page_t *m; 106633847Smsmith int bytecount; 106734206Sdyson int flags; 10685455Sdg int *rtvals; 10691549Srgrimes{ 10707695Sdg int i; 107133847Smsmith vm_object_t object; 107233847Smsmith int count; 10731549Srgrimes 10747695Sdg int maxsize, ncount; 107512767Sdyson vm_ooffset_t poffset; 10767695Sdg struct uio auio; 10777695Sdg struct iovec aiov; 10787695Sdg int error; 107934206Sdyson int ioflags; 1080151951Sps int ppscheck = 0; 1081151951Sps static struct timeval lastfail; 1082151951Sps static int curfail; 10831549Srgrimes 108433847Smsmith object = vp->v_object; 108533847Smsmith count = bytecount / PAGE_SIZE; 108633847Smsmith 10871827Sdg for (i = 0; i < count; i++) 10881549Srgrimes rtvals[i] = VM_PAGER_AGAIN; 10891549Srgrimes 1090138406Salc if ((int64_t)m[0]->pindex < 0) { 1091119544Smarcel printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n", 1092119544Smarcel (long)m[0]->pindex, (u_long)m[0]->dirty); 10937695Sdg rtvals[0] = VM_PAGER_BAD; 10947695Sdg return VM_PAGER_BAD; 10955455Sdg } 10967178Sdg 10977695Sdg maxsize = count * PAGE_SIZE; 10987695Sdg ncount = count; 10991549Srgrimes 110012767Sdyson poffset = IDX_TO_OFF(m[0]->pindex); 110184854Sdillon 110284854Sdillon /* 110384854Sdillon * If the page-aligned write is larger then the actual file we 110484854Sdillon * have to invalidate pages occuring beyond the file EOF. However, 110584854Sdillon * there is an edge case where a file may not be page-aligned where 110684854Sdillon * the last page is partially invalid. In this case the filesystem 110784854Sdillon * may not properly clear the dirty bits for the entire page (which 110884854Sdillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 110984854Sdillon * With the page locked we are free to fix-up the dirty bits here. 111087834Sdillon * 111187834Sdillon * We do not under any circumstances truncate the valid bits, as 111287834Sdillon * this will screw up bogus page replacement. 111384854Sdillon */ 111412767Sdyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 111584854Sdillon if (object->un_pager.vnp.vnp_size > poffset) { 111684854Sdillon int pgoff; 111784854Sdillon 111812767Sdyson maxsize = object->un_pager.vnp.vnp_size - poffset; 111984854Sdillon ncount = btoc(maxsize); 112084854Sdillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1121119370Salc vm_page_lock_queues(); 112284854Sdillon vm_page_clear_dirty(m[ncount - 1], pgoff, 112384854Sdillon PAGE_SIZE - pgoff); 1124119370Salc vm_page_unlock_queues(); 112584854Sdillon } 112684854Sdillon } else { 11278585Sdg maxsize = 0; 112884854Sdillon ncount = 0; 112984854Sdillon } 11308585Sdg if (ncount < count) { 11318585Sdg for (i = ncount; i < count; i++) { 11327695Sdg rtvals[i] = VM_PAGER_BAD; 11331549Srgrimes } 11341549Srgrimes } 11351541Srgrimes } 11367695Sdg 113770374Sdillon /* 113870374Sdillon * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 113970374Sdillon * rather then a bdwrite() to prevent paging I/O from saturating 1140108358Sdillon * the buffer cache. Dummy-up the sequential heuristic to cause 1141108358Sdillon * large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set, 1142108358Sdillon * the system decides how to cluster. 114370374Sdillon */ 114434206Sdyson ioflags = IO_VMIO; 1145108358Sdillon if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) 1146108358Sdillon ioflags |= IO_SYNC; 1147108358Sdillon else if ((flags & VM_PAGER_CLUSTER_OK) == 0) 1148108358Sdillon ioflags |= IO_ASYNC; 114934206Sdyson ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 1150108358Sdillon ioflags |= IO_SEQMAX << IO_SEQSHIFT; 11511827Sdg 11527695Sdg aiov.iov_base = (caddr_t) 0; 11537695Sdg aiov.iov_len = maxsize; 11547695Sdg auio.uio_iov = &aiov; 11557695Sdg auio.uio_iovcnt = 1; 115612767Sdyson auio.uio_offset = poffset; 11577695Sdg auio.uio_segflg = UIO_NOCOPY; 11587695Sdg auio.uio_rw = UIO_WRITE; 11597695Sdg auio.uio_resid = maxsize; 116083366Sjulian auio.uio_td = (struct thread *) 0; 116191406Sjhb error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); 1162170292Sattilio PCPU_INC(cnt.v_vnodeout); 1163170292Sattilio PCPU_ADD(cnt.v_vnodepgsout, ncount); 11643612Sdg 11658585Sdg if (error) { 1166151951Sps if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1))) 1167151951Sps printf("vnode_pager_putpages: I/O error %d\n", error); 11687695Sdg } 11698585Sdg if (auio.uio_resid) { 1170151951Sps if (ppscheck || ppsratecheck(&lastfail, &curfail, 1)) 1171151951Sps printf("vnode_pager_putpages: residual I/O %d at %lu\n", 1172151951Sps auio.uio_resid, (u_long)m[0]->pindex); 11737695Sdg } 117433936Sdyson for (i = 0; i < ncount; i++) { 117533936Sdyson rtvals[i] = VM_PAGER_OK; 11767695Sdg } 11777695Sdg return rtvals[0]; 11787695Sdg} 1179