vnode_pager.c revision 86092
11541Srgrimes/* 21541Srgrimes * Copyright (c) 1990 University of Utah. 31549Srgrimes * Copyright (c) 1991 The Regents of the University of California. 41549Srgrimes * All rights reserved. 59507Sdg * Copyright (c) 1993, 1994 John S. Dyson 69507Sdg * Copyright (c) 1995, David Greenman 71541Srgrimes * 81541Srgrimes * This code is derived from software contributed to Berkeley by 91541Srgrimes * the Systems Programming Group of the University of Utah Computer 101541Srgrimes * Science Department. 111541Srgrimes * 121541Srgrimes * Redistribution and use in source and binary forms, with or without 131541Srgrimes * modification, are permitted provided that the following conditions 141541Srgrimes * are met: 151541Srgrimes * 1. Redistributions of source code must retain the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer. 171541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 181541Srgrimes * notice, this list of conditions and the following disclaimer in the 191541Srgrimes * documentation and/or other materials provided with the distribution. 201541Srgrimes * 3. All advertising materials mentioning features or use of this software 2158705Scharnier * must display the following acknowledgement: 221541Srgrimes * This product includes software developed by the University of 231541Srgrimes * California, Berkeley and its contributors. 241541Srgrimes * 4. Neither the name of the University nor the names of its contributors 251541Srgrimes * may be used to endorse or promote products derived from this software 261541Srgrimes * without specific prior written permission. 271541Srgrimes * 281541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 291541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 301541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 311541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 321541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 331541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 341541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 351541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 361541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 371541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 381541Srgrimes * SUCH DAMAGE. 391541Srgrimes * 401549Srgrimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 4150477Speter * $FreeBSD: head/sys/vm/vnode_pager.c 86092 2001-11-05 18:58:47Z dillon $ 421541Srgrimes */ 431541Srgrimes 441541Srgrimes/* 451541Srgrimes * Page to/from files (vnodes). 461541Srgrimes */ 471541Srgrimes 481549Srgrimes/* 491549Srgrimes * TODO: 509507Sdg * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 517695Sdg * greatly re-simplify the vnode_pager. 521549Srgrimes */ 531549Srgrimes 541541Srgrimes#include <sys/param.h> 551541Srgrimes#include <sys/systm.h> 561541Srgrimes#include <sys/proc.h> 571541Srgrimes#include <sys/vnode.h> 581541Srgrimes#include <sys/mount.h> 5960041Sphk#include <sys/bio.h> 609507Sdg#include <sys/buf.h> 6112662Sdg#include <sys/vmmeter.h> 6251340Sdillon#include <sys/conf.h> 631541Srgrimes 641541Srgrimes#include <vm/vm.h> 6512662Sdg#include <vm/vm_object.h> 661541Srgrimes#include <vm/vm_page.h> 679507Sdg#include <vm/vm_pager.h> 6831853Sdyson#include <vm/vm_map.h> 691541Srgrimes#include <vm/vnode_pager.h> 7012662Sdg#include <vm/vm_extern.h> 711541Srgrimes 7279127Sjhbstatic void vnode_pager_init __P((void)); 7312820Sphkstatic vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address, 7411943Sbde int *run)); 7512820Sphkstatic void vnode_pager_iodone __P((struct buf *bp)); 7612820Sphkstatic int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m)); 7712820Sphkstatic int vnode_pager_input_old __P((vm_object_t object, vm_page_t m)); 7812820Sphkstatic void vnode_pager_dealloc __P((vm_object_t)); 7912820Sphkstatic int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 8043129Sdillonstatic void vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); 8112820Sphkstatic boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *)); 8211943Sbde 831541Srgrimesstruct pagerops vnodepagerops = { 8479127Sjhb vnode_pager_init, 851541Srgrimes vnode_pager_alloc, 861541Srgrimes vnode_pager_dealloc, 879507Sdg vnode_pager_getpages, 889507Sdg vnode_pager_putpages, 899507Sdg vnode_pager_haspage, 909507Sdg NULL 911541Srgrimes}; 921541Srgrimes 9379127Sjhbint vnode_pbuf_freecnt; 9410556Sdyson 9579127Sjhbvoid 9679127Sjhbvnode_pager_init(void) 9779127Sjhb{ 9842957Sdillon 9979127Sjhb vnode_pbuf_freecnt = nswbuf / 2 + 1; 10079127Sjhb} 10179127Sjhb 1021541Srgrimes/* 1031541Srgrimes * Allocate (or lookup) pager for a vnode. 1041541Srgrimes * Handle is a vnode pointer. 1051541Srgrimes */ 1069507Sdgvm_object_t 10740286Sdgvnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 10828751Sbde vm_ooffset_t offset) 1091541Srgrimes{ 1109456Sdg vm_object_t object; 1111541Srgrimes struct vnode *vp; 1121541Srgrimes 11379224Sdillon GIANT_REQUIRED; 11479224Sdillon 1151541Srgrimes /* 1161541Srgrimes * Pageout to vnode, no can do yet. 1171541Srgrimes */ 1181541Srgrimes if (handle == NULL) 1191827Sdg return (NULL); 1201541Srgrimes 1219411Sdg vp = (struct vnode *) handle; 1229411Sdg 1231541Srgrimes /* 1249411Sdg * Prevent race condition when allocating the object. This 1259411Sdg * can happen with NFS vnodes since the nfsnode isn't locked. 1261541Srgrimes */ 1279411Sdg while (vp->v_flag & VOLOCK) { 1289411Sdg vp->v_flag |= VOWANT; 1299411Sdg tsleep(vp, PVM, "vnpobj", 0); 1309411Sdg } 1319411Sdg vp->v_flag |= VOLOCK; 1329411Sdg 1339411Sdg /* 1349411Sdg * If the object is being terminated, wait for it to 1359411Sdg * go away. 1369411Sdg */ 13713490Sdyson while (((object = vp->v_object) != NULL) && 13813490Sdyson (object->flags & OBJ_DEAD)) { 13979224Sdillon tsleep(object, PVM, "vadead", 0); 1409507Sdg } 1415455Sdg 14232071Sdyson if (vp->v_usecount == 0) 14332071Sdyson panic("vnode_pager_alloc: no vnode reference"); 14432071Sdyson 1459507Sdg if (object == NULL) { 1461541Srgrimes /* 1471541Srgrimes * And an object of the appropriate size 1481541Srgrimes */ 14940286Sdg object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 15032286Sdyson object->flags = 0; 1511827Sdg 15240286Sdg object->un_pager.vnp.vnp_size = size; 1531549Srgrimes 1549507Sdg object->handle = handle; 1559507Sdg vp->v_object = object; 15632286Sdyson vp->v_usecount++; 1571541Srgrimes } else { 15832286Sdyson object->ref_count++; 15932286Sdyson vp->v_usecount++; 1601541Srgrimes } 1619411Sdg 1629411Sdg vp->v_flag &= ~VOLOCK; 1639411Sdg if (vp->v_flag & VOWANT) { 1649411Sdg vp->v_flag &= ~VOWANT; 1659411Sdg wakeup(vp); 1669411Sdg } 1679507Sdg return (object); 1681541Srgrimes} 1691541Srgrimes 17012820Sphkstatic void 1719507Sdgvnode_pager_dealloc(object) 1729507Sdg vm_object_t object; 1731541Srgrimes{ 17479242Sdillon struct vnode *vp = object->handle; 1751541Srgrimes 17679224Sdillon GIANT_REQUIRED; 1779507Sdg if (vp == NULL) 1789507Sdg panic("vnode_pager_dealloc: pager already dealloced"); 1799507Sdg 18033817Sdyson vm_object_pip_wait(object, "vnpdea"); 1811541Srgrimes 1829507Sdg object->handle = NULL; 18333109Sdyson object->type = OBJT_DEAD; 1849507Sdg vp->v_object = NULL; 18533109Sdyson vp->v_flag &= ~(VTEXT | VOBJBUF); 1861549Srgrimes} 1871541Srgrimes 18812820Sphkstatic boolean_t 18912767Sdysonvnode_pager_haspage(object, pindex, before, after) 1909507Sdg vm_object_t object; 19112767Sdyson vm_pindex_t pindex; 1929507Sdg int *before; 1939507Sdg int *after; 1941541Srgrimes{ 1959507Sdg struct vnode *vp = object->handle; 1961541Srgrimes daddr_t bn; 19712423Sphk int err; 19810556Sdyson daddr_t reqblock; 19911701Sdyson int poff; 20011701Sdyson int bsize; 20112914Sdyson int pagesperblock, blocksperpage; 2021541Srgrimes 20379224Sdillon GIANT_REQUIRED; 20451340Sdillon /* 20551340Sdillon * If no vp or vp is doomed or marked transparent to VM, we do not 20651340Sdillon * have the page. 20751340Sdillon */ 20832585Sdyson if ((vp == NULL) || (vp->v_flag & VDOOMED)) 20932585Sdyson return FALSE; 21032585Sdyson 2111541Srgrimes /* 2125455Sdg * If filesystem no longer mounted or offset beyond end of file we do 2135455Sdg * not have the page. 2141541Srgrimes */ 21512767Sdyson if ((vp->v_mount == NULL) || 21681140Sjhb (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 2174797Sdg return FALSE; 2181541Srgrimes 21911576Sdg bsize = vp->v_mount->mnt_stat.f_iosize; 22010556Sdyson pagesperblock = bsize / PAGE_SIZE; 22112914Sdyson blocksperpage = 0; 22212914Sdyson if (pagesperblock > 0) { 22312914Sdyson reqblock = pindex / pagesperblock; 22412914Sdyson } else { 22512914Sdyson blocksperpage = (PAGE_SIZE / bsize); 22612914Sdyson reqblock = pindex * blocksperpage; 22712914Sdyson } 22810556Sdyson err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 22910556Sdyson after, before); 2308876Srgrimes if (err) 2319507Sdg return TRUE; 23210702Sdyson if ( bn == -1) 23310576Sdyson return FALSE; 23412914Sdyson if (pagesperblock > 0) { 23512914Sdyson poff = pindex - (reqblock * pagesperblock); 23612914Sdyson if (before) { 23712914Sdyson *before *= pagesperblock; 23812914Sdyson *before += poff; 23910669Sdyson } 24012914Sdyson if (after) { 24112914Sdyson int numafter; 24212914Sdyson *after *= pagesperblock; 24312914Sdyson numafter = pagesperblock - (poff + 1); 24412914Sdyson if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) { 24512914Sdyson numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex))); 24612914Sdyson } 24712914Sdyson *after += numafter; 24812914Sdyson } 24912914Sdyson } else { 25012914Sdyson if (before) { 25112914Sdyson *before /= blocksperpage; 25212914Sdyson } 25312914Sdyson 25412914Sdyson if (after) { 25512914Sdyson *after /= blocksperpage; 25612914Sdyson } 25710556Sdyson } 25810576Sdyson return TRUE; 2591541Srgrimes} 2601541Srgrimes 2611541Srgrimes/* 2621541Srgrimes * Lets the VM system know about a change in size for a file. 2639507Sdg * We adjust our own internal size and flush any cached pages in 2641541Srgrimes * the associated object that are affected by the size change. 2651541Srgrimes * 2661541Srgrimes * Note: this routine may be invoked as a result of a pager put 2671541Srgrimes * operation (possibly at object termination time), so we must be careful. 2681541Srgrimes */ 2691541Srgrimesvoid 2701541Srgrimesvnode_pager_setsize(vp, nsize) 2711541Srgrimes struct vnode *vp; 27212767Sdyson vm_ooffset_t nsize; 2731541Srgrimes{ 27438542Sluoqi vm_pindex_t nobjsize; 2759507Sdg vm_object_t object = vp->v_object; 2761541Srgrimes 27779224Sdillon GIANT_REQUIRED; 27879224Sdillon 2799507Sdg if (object == NULL) 2801541Srgrimes return; 2811827Sdg 2821541Srgrimes /* 2831541Srgrimes * Hasn't changed size 2841541Srgrimes */ 2859507Sdg if (nsize == object->un_pager.vnp.vnp_size) 2863374Sdg return; 2871827Sdg 28838542Sluoqi nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 28938542Sluoqi 2901541Srgrimes /* 2911827Sdg * File has shrunk. Toss any cached pages beyond the new EOF. 2921541Srgrimes */ 2939507Sdg if (nsize < object->un_pager.vnp.vnp_size) { 29438542Sluoqi vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size); 29538542Sluoqi if (nobjsize < object->size) { 29638542Sluoqi vm_object_page_remove(object, nobjsize, object->size, 29738542Sluoqi FALSE); 2985455Sdg } 2991827Sdg /* 3001827Sdg * this gets rid of garbage at the end of a page that is now 3011827Sdg * only partially backed by the vnode... 3021827Sdg */ 3031827Sdg if (nsize & PAGE_MASK) { 3041827Sdg vm_offset_t kva; 3051827Sdg vm_page_t m; 3061827Sdg 30712767Sdyson m = vm_page_lookup(object, OFF_TO_IDX(nsize)); 3081827Sdg if (m) { 30970374Sdillon int base = (int)nsize & PAGE_MASK; 31070374Sdillon int size = PAGE_SIZE - base; 31170374Sdillon 31270374Sdillon /* 31370374Sdillon * Clear out partial-page garbage in case 31470374Sdillon * the page has been mapped. 31570374Sdillon */ 3161827Sdg kva = vm_pager_map_page(m); 31770374Sdillon bzero((caddr_t)kva + base, size); 3181827Sdg vm_pager_unmap_page(kva); 31970374Sdillon 32070374Sdillon /* 32170374Sdillon * Clear out partial-page dirty bits. This 32270374Sdillon * has the side effect of setting the valid 32370374Sdillon * bits, but that is ok. There are a bunch 32470374Sdillon * of places in the VM system where we expected 32570374Sdillon * m->dirty == VM_PAGE_BITS_ALL. The file EOF 32670374Sdillon * case is one of them. If the page is still 32770374Sdillon * partially dirty, make it fully dirty. 32870374Sdillon */ 32970374Sdillon vm_page_set_validclean(m, base, size); 33070374Sdillon if (m->dirty != 0) 33170374Sdillon m->dirty = VM_PAGE_BITS_ALL; 3321827Sdg } 3331827Sdg } 3341541Srgrimes } 33512767Sdyson object->un_pager.vnp.vnp_size = nsize; 33638542Sluoqi object->size = nobjsize; 3371541Srgrimes} 3381541Srgrimes 3391549Srgrimes/* 3401549Srgrimes * calculate the linear (byte) disk address of specified virtual 3411549Srgrimes * file address 3421549Srgrimes */ 34312820Sphkstatic vm_offset_t 3446151Sdgvnode_pager_addr(vp, address, run) 3451549Srgrimes struct vnode *vp; 34612767Sdyson vm_ooffset_t address; 3476151Sdg int *run; 3481549Srgrimes{ 3495455Sdg int rtaddress; 3505455Sdg int bsize; 35112767Sdyson daddr_t block; 3521549Srgrimes struct vnode *rtvp; 3535455Sdg int err; 35412767Sdyson daddr_t vblock; 35512767Sdyson int voffset; 3561549Srgrimes 35779224Sdillon GIANT_REQUIRED; 3585455Sdg if ((int) address < 0) 3595455Sdg return -1; 3605455Sdg 36111701Sdyson if (vp->v_mount == NULL) 36211701Sdyson return -1; 36311701Sdyson 3641549Srgrimes bsize = vp->v_mount->mnt_stat.f_iosize; 3651549Srgrimes vblock = address / bsize; 3661549Srgrimes voffset = address % bsize; 3671549Srgrimes 36810551Sdyson err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 3691549Srgrimes 3706151Sdg if (err || (block == -1)) 3711549Srgrimes rtaddress = -1; 3726151Sdg else { 3736626Sdg rtaddress = block + voffset / DEV_BSIZE; 3746151Sdg if( run) { 3756151Sdg *run += 1; 3766151Sdg *run *= bsize/PAGE_SIZE; 3776151Sdg *run -= voffset/PAGE_SIZE; 3786151Sdg } 3796151Sdg } 3801549Srgrimes 3811549Srgrimes return rtaddress; 3821549Srgrimes} 3831549Srgrimes 3841549Srgrimes/* 3851549Srgrimes * interrupt routine for I/O completion 3861549Srgrimes */ 38712820Sphkstatic void 3881549Srgrimesvnode_pager_iodone(bp) 3891549Srgrimes struct buf *bp; 3901549Srgrimes{ 3911549Srgrimes bp->b_flags |= B_DONE; 3929507Sdg wakeup(bp); 3931549Srgrimes} 3941549Srgrimes 3951549Srgrimes/* 3961549Srgrimes * small block file system vnode pager input 3971549Srgrimes */ 39812820Sphkstatic int 3999507Sdgvnode_pager_input_smlfs(object, m) 4009507Sdg vm_object_t object; 4011549Srgrimes vm_page_t m; 4021549Srgrimes{ 4035455Sdg int i; 4045455Sdg int s; 4051549Srgrimes struct vnode *dp, *vp; 4061549Srgrimes struct buf *bp; 4071549Srgrimes vm_offset_t kva; 4085455Sdg int fileaddr; 4091549Srgrimes vm_offset_t bsize; 4105455Sdg int error = 0; 4111549Srgrimes 41279224Sdillon GIANT_REQUIRED; 41379224Sdillon 4149507Sdg vp = object->handle; 41511701Sdyson if (vp->v_mount == NULL) 41611701Sdyson return VM_PAGER_BAD; 41711701Sdyson 4181549Srgrimes bsize = vp->v_mount->mnt_stat.f_iosize; 4191549Srgrimes 42010551Sdyson VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 4211549Srgrimes 4221549Srgrimes kva = vm_pager_map_page(m); 4231549Srgrimes 4241827Sdg for (i = 0; i < PAGE_SIZE / bsize; i++) { 42586092Sdillon vm_ooffset_t address; 4261827Sdg 42745561Sdt if (vm_page_bits(i * bsize, bsize) & m->valid) 4285455Sdg continue; 4291549Srgrimes 43086092Sdillon address = IDX_TO_OFF(m->pindex) + i * bsize; 43186092Sdillon if (address >= object->un_pager.vnp.vnp_size) { 43286092Sdillon fileaddr = -1; 43386092Sdillon } else { 43486092Sdillon fileaddr = vnode_pager_addr(vp, address, NULL); 43586092Sdillon } 4361827Sdg if (fileaddr != -1) { 43742957Sdillon bp = getpbuf(&vnode_pbuf_freecnt); 4381549Srgrimes 4391827Sdg /* build a minimal buffer header */ 44058345Sphk bp->b_iocmd = BIO_READ; 4411549Srgrimes bp->b_iodone = vnode_pager_iodone; 44284827Sjhb KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 44384827Sjhb KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 44484827Sjhb bp->b_rcred = crhold(curthread->td_proc->p_ucred); 44584827Sjhb bp->b_wcred = crhold(curthread->td_proc->p_ucred); 44631493Sphk bp->b_data = (caddr_t) kva + i * bsize; 4476626Sdg bp->b_blkno = fileaddr; 4485455Sdg pbgetvp(dp, bp); 4491549Srgrimes bp->b_bcount = bsize; 4501549Srgrimes bp->b_bufsize = bsize; 45170374Sdillon bp->b_runningbufspace = bp->b_bufsize; 45270374Sdillon runningbufspace += bp->b_runningbufspace; 4531827Sdg 4541827Sdg /* do the input */ 45558349Sphk BUF_STRATEGY(bp); 4561549Srgrimes 45733758Sdyson /* we definitely need to be at splvm here */ 4581549Srgrimes 45933758Sdyson s = splvm(); 4601549Srgrimes while ((bp->b_flags & B_DONE) == 0) { 4619356Sdg tsleep(bp, PVM, "vnsrd", 0); 4621549Srgrimes } 4631549Srgrimes splx(s); 46458934Sphk if ((bp->b_ioflags & BIO_ERROR) != 0) 4651549Srgrimes error = EIO; 4661549Srgrimes 4671827Sdg /* 4681827Sdg * free the buffer header back to the swap buffer pool 4691827Sdg */ 47042957Sdillon relpbuf(bp, &vnode_pbuf_freecnt); 4711827Sdg if (error) 4721549Srgrimes break; 4735455Sdg 47415583Sphk vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 4751549Srgrimes } else { 47615583Sphk vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 4771549Srgrimes bzero((caddr_t) kva + i * bsize, bsize); 4781549Srgrimes } 4791549Srgrimes } 4801549Srgrimes vm_pager_unmap_page(kva); 48160755Speter pmap_clear_modify(m); 48238799Sdfr vm_page_flag_clear(m, PG_ZERO); 4831827Sdg if (error) { 4844207Sdg return VM_PAGER_ERROR; 4851549Srgrimes } 4861549Srgrimes return VM_PAGER_OK; 4871549Srgrimes 4881549Srgrimes} 4891549Srgrimes 4901549Srgrimes 4911549Srgrimes/* 4921549Srgrimes * old style vnode pager output routine 4931549Srgrimes */ 49412820Sphkstatic int 4959507Sdgvnode_pager_input_old(object, m) 4969507Sdg vm_object_t object; 4971549Srgrimes vm_page_t m; 4981549Srgrimes{ 4991541Srgrimes struct uio auio; 5001541Srgrimes struct iovec aiov; 5015455Sdg int error; 5025455Sdg int size; 5031549Srgrimes vm_offset_t kva; 50477398Sjhb struct vnode *vp; 5051549Srgrimes 50679224Sdillon GIANT_REQUIRED; 5071549Srgrimes error = 0; 5081827Sdg 5091549Srgrimes /* 5101549Srgrimes * Return failure if beyond current EOF 5111549Srgrimes */ 51212767Sdyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 5131549Srgrimes return VM_PAGER_BAD; 5141549Srgrimes } else { 5151549Srgrimes size = PAGE_SIZE; 51612767Sdyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 51712767Sdyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 5187178Sdg 5195455Sdg /* 5205455Sdg * Allocate a kernel virtual address and initialize so that 5215455Sdg * we can use VOP_READ/WRITE routines. 5225455Sdg */ 5231549Srgrimes kva = vm_pager_map_page(m); 5247178Sdg 52577398Sjhb vp = object->handle; 5261827Sdg aiov.iov_base = (caddr_t) kva; 5271549Srgrimes aiov.iov_len = size; 5281549Srgrimes auio.uio_iov = &aiov; 5291549Srgrimes auio.uio_iovcnt = 1; 53012767Sdyson auio.uio_offset = IDX_TO_OFF(m->pindex); 5311549Srgrimes auio.uio_segflg = UIO_SYSSPACE; 5321549Srgrimes auio.uio_rw = UIO_READ; 5331549Srgrimes auio.uio_resid = size; 53483366Sjulian auio.uio_td = curthread; 5351549Srgrimes 53683366Sjulian error = VOP_READ(vp, &auio, 0, curthread->td_proc->p_ucred); 5371549Srgrimes if (!error) { 53879242Sdillon int count = size - auio.uio_resid; 5391549Srgrimes 5401549Srgrimes if (count == 0) 5411549Srgrimes error = EINVAL; 5421549Srgrimes else if (count != PAGE_SIZE) 5431827Sdg bzero((caddr_t) kva + count, PAGE_SIZE - count); 5441549Srgrimes } 5451549Srgrimes vm_pager_unmap_page(kva); 5461549Srgrimes } 54760755Speter pmap_clear_modify(m); 54849945Salc vm_page_undirty(m); 54938799Sdfr vm_page_flag_clear(m, PG_ZERO); 55039739Srvb if (!error) 55139739Srvb m->valid = VM_PAGE_BITS_ALL; 5524207Sdg return error ? VM_PAGER_ERROR : VM_PAGER_OK; 5531549Srgrimes} 5541549Srgrimes 5551549Srgrimes/* 5561549Srgrimes * generic vnode pager input routine 5571549Srgrimes */ 55810556Sdyson 55933847Smsmith/* 56076827Salfred * Local media VFS's that do not implement their own VOP_GETPAGES 56176827Salfred * should have their VOP_GETPAGES should call to 56233847Smsmith * vnode_pager_generic_getpages() to implement the previous behaviour. 56333847Smsmith * 56433847Smsmith * All other FS's should use the bypass to get to the local media 56533847Smsmith * backing vp's VOP_GETPAGES. 56633847Smsmith */ 56712820Sphkstatic int 5689507Sdgvnode_pager_getpages(object, m, count, reqpage) 5699507Sdg vm_object_t object; 5701549Srgrimes vm_page_t *m; 5719507Sdg int count; 5729507Sdg int reqpage; 5731549Srgrimes{ 57410556Sdyson int rtval; 57510556Sdyson struct vnode *vp; 57634403Smsmith int bytes = count * PAGE_SIZE; 57732286Sdyson 57879224Sdillon GIANT_REQUIRED; 57910556Sdyson vp = object->handle; 58034403Smsmith rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 58176827Salfred KASSERT(rtval != EOPNOTSUPP, 58276827Salfred ("vnode_pager: FS getpages not implemented\n")); 58333847Smsmith return rtval; 58410556Sdyson} 58510556Sdyson 58633847Smsmith 58733847Smsmith/* 58833847Smsmith * This is now called from local media FS's to operate against their 58933847Smsmith * own vnodes if they fail to implement VOP_GETPAGES. 59033847Smsmith */ 59133847Smsmithint 59233847Smsmithvnode_pager_generic_getpages(vp, m, bytecount, reqpage) 59333847Smsmith struct vnode *vp; 59410556Sdyson vm_page_t *m; 59533847Smsmith int bytecount; 59610556Sdyson int reqpage; 59710556Sdyson{ 59833847Smsmith vm_object_t object; 59912767Sdyson vm_offset_t kva; 60034206Sdyson off_t foff, tfoff, nextoff; 6019507Sdg int i, size, bsize, first, firstaddr; 60233847Smsmith struct vnode *dp; 6036151Sdg int runpg; 6046151Sdg int runend; 6057178Sdg struct buf *bp; 6065455Sdg int s; 60733847Smsmith int count; 6085455Sdg int error = 0; 6091549Srgrimes 61079224Sdillon GIANT_REQUIRED; 61133847Smsmith object = vp->v_object; 61233847Smsmith count = bytecount / PAGE_SIZE; 61333847Smsmith 61411701Sdyson if (vp->v_mount == NULL) 61511701Sdyson return VM_PAGER_BAD; 61611701Sdyson 6171549Srgrimes bsize = vp->v_mount->mnt_stat.f_iosize; 6181549Srgrimes 6191549Srgrimes /* get the UNDERLYING device for the file with VOP_BMAP() */ 6201827Sdg 6211549Srgrimes /* 6221827Sdg * originally, we did not check for an error return value -- assuming 6231827Sdg * an fs always has a bmap entry point -- that assumption is wrong!!! 6241549Srgrimes */ 62512767Sdyson foff = IDX_TO_OFF(m[reqpage]->pindex); 6261827Sdg 6271549Srgrimes /* 6281887Sdg * if we can't bmap, use old VOP code 6291549Srgrimes */ 63010551Sdyson if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 6311549Srgrimes for (i = 0; i < count; i++) { 6321549Srgrimes if (i != reqpage) { 63375692Salfred vm_page_free(m[i]); 6341549Srgrimes } 6351549Srgrimes } 6363612Sdg cnt.v_vnodein++; 6373612Sdg cnt.v_vnodepgsin++; 6389507Sdg return vnode_pager_input_old(object, m[reqpage]); 6391549Srgrimes 6401827Sdg /* 6411827Sdg * if the blocksize is smaller than a page size, then use 6421827Sdg * special small filesystem code. NFS sometimes has a small 6431827Sdg * blocksize, but it can handle large reads itself. 6441827Sdg */ 6451827Sdg } else if ((PAGE_SIZE / bsize) > 1 && 64638866Sbde (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 6471549Srgrimes for (i = 0; i < count; i++) { 6481549Srgrimes if (i != reqpage) { 64975692Salfred vm_page_free(m[i]); 6501549Srgrimes } 6511549Srgrimes } 6523612Sdg cnt.v_vnodein++; 6533612Sdg cnt.v_vnodepgsin++; 6549507Sdg return vnode_pager_input_smlfs(object, m[reqpage]); 6551549Srgrimes } 65645347Sjulian 6571549Srgrimes /* 65845347Sjulian * If we have a completely valid page available to us, we can 65945347Sjulian * clean up and return. Otherwise we have to re-read the 66045347Sjulian * media. 6611549Srgrimes */ 66225930Sdfr 66345347Sjulian if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 6645455Sdg for (i = 0; i < count; i++) { 6655455Sdg if (i != reqpage) 66675692Salfred vm_page_free(m[i]); 6671549Srgrimes } 6685455Sdg return VM_PAGER_OK; 6691549Srgrimes } 67045347Sjulian m[reqpage]->valid = 0; 6717178Sdg 6725455Sdg /* 6735455Sdg * here on direct device I/O 6745455Sdg */ 6751549Srgrimes 6766151Sdg firstaddr = -1; 6771549Srgrimes /* 6786151Sdg * calculate the run that includes the required page 6791549Srgrimes */ 6806151Sdg for(first = 0, i = 0; i < count; i = runend) { 68112767Sdyson firstaddr = vnode_pager_addr(vp, 68212767Sdyson IDX_TO_OFF(m[i]->pindex), &runpg); 6836151Sdg if (firstaddr == -1) { 6849507Sdg if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 68537562Sbde /* XXX no %qd in kernel. */ 68637562Sbde panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: 0x%lx%08lx, vnp_size: 0x%lx%08lx", 68737562Sbde firstaddr, (u_long)(foff >> 32), 68837562Sbde (u_long)(u_int32_t)foff, 68937562Sbde (u_long)(u_int32_t) 69037562Sbde (object->un_pager.vnp.vnp_size >> 32), 69137562Sbde (u_long)(u_int32_t) 69237562Sbde object->un_pager.vnp.vnp_size); 6936151Sdg } 69475692Salfred vm_page_free(m[i]); 6956151Sdg runend = i + 1; 6966151Sdg first = runend; 6976151Sdg continue; 6981549Srgrimes } 6996151Sdg runend = i + runpg; 7009507Sdg if (runend <= reqpage) { 7016151Sdg int j; 7029507Sdg for (j = i; j < runend; j++) { 70375692Salfred vm_page_free(m[j]); 7046151Sdg } 7051549Srgrimes } else { 7069507Sdg if (runpg < (count - first)) { 7079507Sdg for (i = first + runpg; i < count; i++) 70875692Salfred vm_page_free(m[i]); 7096151Sdg count = first + runpg; 7106151Sdg } 7116151Sdg break; 7121549Srgrimes } 7136151Sdg first = runend; 7141549Srgrimes } 7151549Srgrimes 7161549Srgrimes /* 7171827Sdg * the first and last page have been calculated now, move input pages 7181827Sdg * to be zero based... 7191549Srgrimes */ 7201549Srgrimes if (first != 0) { 7211549Srgrimes for (i = first; i < count; i++) { 7221549Srgrimes m[i - first] = m[i]; 7231549Srgrimes } 7241549Srgrimes count -= first; 7251549Srgrimes reqpage -= first; 7261549Srgrimes } 7276151Sdg 7281549Srgrimes /* 7291549Srgrimes * calculate the file virtual address for the transfer 7301549Srgrimes */ 73112767Sdyson foff = IDX_TO_OFF(m[0]->pindex); 7321827Sdg 7331549Srgrimes /* 7341549Srgrimes * calculate the size of the transfer 7351549Srgrimes */ 7361549Srgrimes size = count * PAGE_SIZE; 7379507Sdg if ((foff + size) > object->un_pager.vnp.vnp_size) 7389507Sdg size = object->un_pager.vnp.vnp_size - foff; 7391549Srgrimes 7401549Srgrimes /* 74151340Sdillon * round up physical size for real devices. 7421549Srgrimes */ 74351340Sdillon if (dp->v_type == VBLK || dp->v_type == VCHR) { 74451340Sdillon int secmask = dp->v_rdev->si_bsize_phys - 1; 74551340Sdillon KASSERT(secmask < PAGE_SIZE, ("vnode_pager_generic_getpages: sector size %d too large\n", secmask + 1)); 74651340Sdillon size = (size + secmask) & ~secmask; 74751340Sdillon } 7481549Srgrimes 74942957Sdillon bp = getpbuf(&vnode_pbuf_freecnt); 7505455Sdg kva = (vm_offset_t) bp->b_data; 7511887Sdg 7521549Srgrimes /* 7531549Srgrimes * and map the pages to be read into the kva 7541549Srgrimes */ 7551887Sdg pmap_qenter(kva, m, count); 7561549Srgrimes 7571549Srgrimes /* build a minimal buffer header */ 75858345Sphk bp->b_iocmd = BIO_READ; 7591549Srgrimes bp->b_iodone = vnode_pager_iodone; 7601549Srgrimes /* B_PHYS is not set, but it is nice to fill this in */ 76184827Sjhb KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 76284827Sjhb KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 76384827Sjhb bp->b_rcred = crhold(curthread->td_proc->p_ucred); 76484827Sjhb bp->b_wcred = crhold(curthread->td_proc->p_ucred); 7656626Sdg bp->b_blkno = firstaddr; 7665455Sdg pbgetvp(dp, bp); 7671549Srgrimes bp->b_bcount = size; 7681549Srgrimes bp->b_bufsize = size; 76970374Sdillon bp->b_runningbufspace = bp->b_bufsize; 77070374Sdillon runningbufspace += bp->b_runningbufspace; 7711549Srgrimes 7723612Sdg cnt.v_vnodein++; 7733612Sdg cnt.v_vnodepgsin += count; 7743612Sdg 7751549Srgrimes /* do the input */ 77658349Sphk BUF_STRATEGY(bp); 7773612Sdg 77833758Sdyson s = splvm(); 77933758Sdyson /* we definitely need to be at splvm here */ 7801549Srgrimes 7811549Srgrimes while ((bp->b_flags & B_DONE) == 0) { 7829356Sdg tsleep(bp, PVM, "vnread", 0); 7831549Srgrimes } 7841549Srgrimes splx(s); 78558934Sphk if ((bp->b_ioflags & BIO_ERROR) != 0) 7861549Srgrimes error = EIO; 7871549Srgrimes 7881549Srgrimes if (!error) { 7891549Srgrimes if (size != count * PAGE_SIZE) 7901827Sdg bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 7911549Srgrimes } 7925455Sdg pmap_qremove(kva, count); 7931549Srgrimes 7941549Srgrimes /* 7951549Srgrimes * free the buffer header back to the swap buffer pool 7961549Srgrimes */ 79742957Sdillon relpbuf(bp, &vnode_pbuf_freecnt); 7981549Srgrimes 79934206Sdyson for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 80034206Sdyson vm_page_t mt; 80134206Sdyson 80234206Sdyson nextoff = tfoff + PAGE_SIZE; 80334206Sdyson mt = m[i]; 80434206Sdyson 80547239Sdt if (nextoff <= object->un_pager.vnp.vnp_size) { 80645347Sjulian /* 80745347Sjulian * Read filled up entire page. 80845347Sjulian */ 80934206Sdyson mt->valid = VM_PAGE_BITS_ALL; 81049945Salc vm_page_undirty(mt); /* should be an assert? XXX */ 81160755Speter pmap_clear_modify(mt); 81234206Sdyson } else { 81345347Sjulian /* 81445347Sjulian * Read did not fill up entire page. Since this 81545347Sjulian * is getpages, the page may be mapped, so we have 81645347Sjulian * to zero the invalid portions of the page even 81745347Sjulian * though we aren't setting them valid. 81845347Sjulian * 81945347Sjulian * Currently we do not set the entire page valid, 82045347Sjulian * we just try to clear the piece that we couldn't 82145347Sjulian * read. 82245347Sjulian */ 82347239Sdt vm_page_set_validclean(mt, 0, 82447239Sdt object->un_pager.vnp.vnp_size - tfoff); 82546349Salc /* handled by vm_fault now */ 82646349Salc /* vm_page_zero_invalid(mt, FALSE); */ 82734206Sdyson } 82834206Sdyson 82938799Sdfr vm_page_flag_clear(mt, PG_ZERO); 8301549Srgrimes if (i != reqpage) { 8311827Sdg 8321549Srgrimes /* 8331827Sdg * whether or not to leave the page activated is up in 8341827Sdg * the air, but we should put the page on a page queue 8351827Sdg * somewhere. (it already is in the object). Result: 83658634Scharnier * It appears that empirical results show that 8371827Sdg * deactivating pages is best. 8381549Srgrimes */ 8391827Sdg 8401549Srgrimes /* 8411827Sdg * just in case someone was asking for this page we 8421827Sdg * now tell them that it is ok to use 8431549Srgrimes */ 8441549Srgrimes if (!error) { 84534206Sdyson if (mt->flags & PG_WANTED) 84634206Sdyson vm_page_activate(mt); 84733109Sdyson else 84834206Sdyson vm_page_deactivate(mt); 84938799Sdfr vm_page_wakeup(mt); 8501549Srgrimes } else { 85175692Salfred vm_page_free(mt); 8521549Srgrimes } 8531549Srgrimes } 8541549Srgrimes } 8551549Srgrimes if (error) { 8569507Sdg printf("vnode_pager_getpages: I/O read error\n"); 8571549Srgrimes } 8584207Sdg return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 8591549Srgrimes} 8601549Srgrimes 86133847Smsmith/* 86233847Smsmith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 86333847Smsmith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 86433847Smsmith * vnode_pager_generic_putpages() to implement the previous behaviour. 86533847Smsmith * 86633847Smsmith * All other FS's should use the bypass to get to the local media 86733847Smsmith * backing vp's VOP_PUTPAGES. 86833847Smsmith */ 86943129Sdillonstatic void 87010556Sdysonvnode_pager_putpages(object, m, count, sync, rtvals) 87110556Sdyson vm_object_t object; 87210556Sdyson vm_page_t *m; 87310556Sdyson int count; 87410556Sdyson boolean_t sync; 87510556Sdyson int *rtvals; 87610556Sdyson{ 87710556Sdyson int rtval; 87810556Sdyson struct vnode *vp; 87962976Smckusick struct mount *mp; 88034403Smsmith int bytes = count * PAGE_SIZE; 88118973Sdyson 88279224Sdillon GIANT_REQUIRED; 88344321Salc /* 88444321Salc * Force synchronous operation if we are extremely low on memory 88544321Salc * to prevent a low-memory deadlock. VOP operations often need to 88644321Salc * allocate more memory to initiate the I/O ( i.e. do a BMAP 88744321Salc * operation ). The swapper handles the case by limiting the amount 88844321Salc * of asynchronous I/O, but that sort of solution doesn't scale well 88944321Salc * for the vnode pager without a lot of work. 89044321Salc * 89144321Salc * Also, the backing vnode's iodone routine may not wake the pageout 89244321Salc * daemon up. This should be probably be addressed XXX. 89344321Salc */ 89444321Salc 89544321Salc if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 89644321Salc sync |= OBJPC_SYNC; 89744321Salc 89844321Salc /* 89944321Salc * Call device-specific putpages function 90044321Salc */ 90144321Salc 90210556Sdyson vp = object->handle; 90362976Smckusick if (vp->v_type != VREG) 90462976Smckusick mp = NULL; 90562976Smckusick (void)vn_start_write(vp, &mp, V_WAIT); 90634403Smsmith rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 90776827Salfred KASSERT(rtval != EOPNOTSUPP, 90876827Salfred ("vnode_pager: stale FS putpages\n")); 90962976Smckusick vn_finished_write(mp); 91010556Sdyson} 91110556Sdyson 91233847Smsmith 9131549Srgrimes/* 91433847Smsmith * This is now called from local media FS's to operate against their 91545057Seivind * own vnodes if they fail to implement VOP_PUTPAGES. 91670374Sdillon * 91770374Sdillon * This is typically called indirectly via the pageout daemon and 91870374Sdillon * clustering has already typically occured, so in general we ask the 91970374Sdillon * underlying filesystem to write the data out asynchronously rather 92070374Sdillon * then delayed. 9211549Srgrimes */ 92233847Smsmithint 92334206Sdysonvnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 92433847Smsmith struct vnode *vp; 9251549Srgrimes vm_page_t *m; 92633847Smsmith int bytecount; 92734206Sdyson int flags; 9285455Sdg int *rtvals; 9291549Srgrimes{ 9307695Sdg int i; 93133847Smsmith vm_object_t object; 93233847Smsmith int count; 9331549Srgrimes 9347695Sdg int maxsize, ncount; 93512767Sdyson vm_ooffset_t poffset; 9367695Sdg struct uio auio; 9377695Sdg struct iovec aiov; 9387695Sdg int error; 93934206Sdyson int ioflags; 9401549Srgrimes 94179224Sdillon GIANT_REQUIRED; 94233847Smsmith object = vp->v_object; 94333847Smsmith count = bytecount / PAGE_SIZE; 94433847Smsmith 9451827Sdg for (i = 0; i < count; i++) 9461549Srgrimes rtvals[i] = VM_PAGER_AGAIN; 9471549Srgrimes 94812767Sdyson if ((int) m[0]->pindex < 0) { 94948409Speter printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n", 95048409Speter (long)m[0]->pindex, m[0]->dirty); 9517695Sdg rtvals[0] = VM_PAGER_BAD; 9527695Sdg return VM_PAGER_BAD; 9535455Sdg } 9547178Sdg 9557695Sdg maxsize = count * PAGE_SIZE; 9567695Sdg ncount = count; 9571549Srgrimes 95812767Sdyson poffset = IDX_TO_OFF(m[0]->pindex); 95984854Sdillon 96084854Sdillon /* 96184854Sdillon * If the page-aligned write is larger then the actual file we 96284854Sdillon * have to invalidate pages occuring beyond the file EOF. However, 96384854Sdillon * there is an edge case where a file may not be page-aligned where 96484854Sdillon * the last page is partially invalid. In this case the filesystem 96584854Sdillon * may not properly clear the dirty bits for the entire page (which 96684854Sdillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 96784854Sdillon * With the page locked we are free to fix-up the dirty bits here. 96884854Sdillon */ 96912767Sdyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 97084854Sdillon if (object->un_pager.vnp.vnp_size > poffset) { 97184854Sdillon int pgoff; 97284854Sdillon 97312767Sdyson maxsize = object->un_pager.vnp.vnp_size - poffset; 97484854Sdillon ncount = btoc(maxsize); 97584854Sdillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 97684854Sdillon vm_page_clear_dirty(m[ncount - 1], pgoff, 97784854Sdillon PAGE_SIZE - pgoff); 97884854Sdillon } 97984854Sdillon } else { 9808585Sdg maxsize = 0; 98184854Sdillon ncount = 0; 98284854Sdillon } 9838585Sdg if (ncount < count) { 9848585Sdg for (i = ncount; i < count; i++) { 9857695Sdg rtvals[i] = VM_PAGER_BAD; 9861549Srgrimes } 9871549Srgrimes } 9881541Srgrimes } 9897695Sdg 99070374Sdillon /* 99170374Sdillon * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 99270374Sdillon * rather then a bdwrite() to prevent paging I/O from saturating 99370374Sdillon * the buffer cache. 99470374Sdillon */ 99534206Sdyson ioflags = IO_VMIO; 99670374Sdillon ioflags |= (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) ? IO_SYNC: IO_ASYNC; 99734206Sdyson ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 9981827Sdg 9997695Sdg aiov.iov_base = (caddr_t) 0; 10007695Sdg aiov.iov_len = maxsize; 10017695Sdg auio.uio_iov = &aiov; 10027695Sdg auio.uio_iovcnt = 1; 100312767Sdyson auio.uio_offset = poffset; 10047695Sdg auio.uio_segflg = UIO_NOCOPY; 10057695Sdg auio.uio_rw = UIO_WRITE; 10067695Sdg auio.uio_resid = maxsize; 100783366Sjulian auio.uio_td = (struct thread *) 0; 100883366Sjulian error = VOP_WRITE(vp, &auio, ioflags, curthread->td_proc->p_ucred); 10093612Sdg cnt.v_vnodeout++; 10107695Sdg cnt.v_vnodepgsout += ncount; 10113612Sdg 10128585Sdg if (error) { 10139507Sdg printf("vnode_pager_putpages: I/O error %d\n", error); 10147695Sdg } 10158585Sdg if (auio.uio_resid) { 101637555Sbde printf("vnode_pager_putpages: residual I/O %d at %lu\n", 101737555Sbde auio.uio_resid, (u_long)m[0]->pindex); 10187695Sdg } 101933936Sdyson for (i = 0; i < ncount; i++) { 102033936Sdyson rtvals[i] = VM_PAGER_OK; 10217695Sdg } 10227695Sdg return rtvals[0]; 10237695Sdg} 10241549Srgrimes 10257695Sdgstruct vnode * 10269507Sdgvnode_pager_lock(object) 10279507Sdg vm_object_t object; 10289507Sdg{ 102983366Sjulian struct thread *td = curthread; /* XXX */ 103022521Sdyson 103179224Sdillon GIANT_REQUIRED; 103279224Sdillon 10339507Sdg for (; object != NULL; object = object->backing_object) { 10349507Sdg if (object->type != OBJT_VNODE) 10357695Sdg continue; 103677094Sjhb if (object->flags & OBJ_DEAD) { 103732585Sdyson return NULL; 103877094Sjhb } 10391549Srgrimes 104077094Sjhb /* XXX; If object->handle can change, we need to cache it. */ 104132585Sdyson while (vget(object->handle, 104283366Sjulian LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, td)){ 104334611Sdyson if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE)) 104434611Sdyson return NULL; 104532585Sdyson printf("vnode_pager_lock: retrying\n"); 104632585Sdyson } 10479507Sdg return object->handle; 10481549Srgrimes } 10499507Sdg return NULL; 10507695Sdg} 1051