vnode_pager.c revision 21987
1/*
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1993, 1994 John S. Dyson
6 * Copyright (c) 1995, David Greenman
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)vnode_pager.c	7.5 (Berkeley) 4/20/91
41 *	$FreeBSD: head/sys/vm/vnode_pager.c 21987 1997-01-24 22:20:23Z dg $
42 */
43
44/*
45 * Page to/from files (vnodes).
46 */
47
48/*
49 * TODO:
50 *	Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
51 *	greatly re-simplify the vnode_pager.
52 */
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/kernel.h>
57#include <sys/proc.h>
58#include <sys/malloc.h>
59#include <sys/vnode.h>
60#include <sys/uio.h>
61#include <sys/mount.h>
62#include <sys/buf.h>
63#include <sys/vmmeter.h>
64
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/vm_prot.h>
68#include <vm/vm_object.h>
69#include <vm/vm_page.h>
70#include <vm/vm_pager.h>
71#include <vm/vnode_pager.h>
72#include <vm/vm_extern.h>
73
74static vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address,
75					 int *run));
76static void vnode_pager_iodone __P((struct buf *bp));
77static int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m));
78static int vnode_pager_input_old __P((vm_object_t object, vm_page_t m));
79static void vnode_pager_dealloc __P((vm_object_t));
80static int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
81static int vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
82static boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
83
84struct pagerops vnodepagerops = {
85	NULL,
86	vnode_pager_alloc,
87	vnode_pager_dealloc,
88	vnode_pager_getpages,
89	vnode_pager_putpages,
90	vnode_pager_haspage,
91	NULL
92};
93
94static int vnode_pager_leaf_getpages __P((vm_object_t object, vm_page_t *m,
95					  int count, int reqpage));
96static int vnode_pager_leaf_putpages __P((vm_object_t object, vm_page_t *m,
97					  int count, boolean_t sync,
98					  int *rtvals));
99
100/*
101 * Allocate (or lookup) pager for a vnode.
102 * Handle is a vnode pointer.
103 */
104vm_object_t
105vnode_pager_alloc(handle, size, prot, offset)
106	void *handle;
107	vm_size_t size;
108	vm_prot_t prot;
109	vm_ooffset_t offset;
110{
111	vm_object_t object;
112	struct vnode *vp;
113
114	/*
115	 * Pageout to vnode, no can do yet.
116	 */
117	if (handle == NULL)
118		return (NULL);
119
120	vp = (struct vnode *) handle;
121
122	/*
123	 * Prevent race condition when allocating the object. This
124	 * can happen with NFS vnodes since the nfsnode isn't locked.
125	 */
126	while (vp->v_flag & VOLOCK) {
127		vp->v_flag |= VOWANT;
128		tsleep(vp, PVM, "vnpobj", 0);
129	}
130	vp->v_flag |= VOLOCK;
131
132	/*
133	 * If the object is being terminated, wait for it to
134	 * go away.
135	 */
136	while (((object = vp->v_object) != NULL) &&
137		(object->flags & OBJ_DEAD)) {
138		tsleep(object, PVM, "vadead", 0);
139	}
140
141	if (object == NULL) {
142		/*
143		 * And an object of the appropriate size
144		 */
145		object = vm_object_allocate(OBJT_VNODE, size);
146		if (vp->v_type == VREG)
147			object->flags = OBJ_CANPERSIST;
148		else
149			object->flags = 0;
150
151		if (vp->v_usecount == 0)
152			panic("vnode_pager_alloc: no vnode reference");
153		/*
154		 * Hold a reference to the vnode and initialize object data.
155		 */
156		vp->v_usecount++;
157		object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE;
158
159		object->handle = handle;
160		vp->v_object = object;
161	} else {
162		/*
163		 * vm_object_reference() will remove the object from the cache if
164		 * found and gain a reference to the object.
165		 */
166		vm_object_reference(object);
167	}
168
169	if (vp->v_type == VREG)
170		vp->v_flag |= VVMIO;
171
172	vp->v_flag &= ~VOLOCK;
173	if (vp->v_flag & VOWANT) {
174		vp->v_flag &= ~VOWANT;
175		wakeup(vp);
176	}
177	return (object);
178}
179
180static void
181vnode_pager_dealloc(object)
182	vm_object_t object;
183{
184	register struct vnode *vp = object->handle;
185
186	if (vp == NULL)
187		panic("vnode_pager_dealloc: pager already dealloced");
188
189	if (object->paging_in_progress) {
190		int s = splbio();
191		while (object->paging_in_progress) {
192			object->flags |= OBJ_PIPWNT;
193			tsleep(object, PVM, "vnpdea", 0);
194		}
195		splx(s);
196	}
197
198	object->handle = NULL;
199
200	vp->v_object = NULL;
201	vp->v_flag &= ~(VTEXT | VVMIO);
202	vp->v_flag |= VAGE;
203	vrele(vp);
204}
205
206static boolean_t
207vnode_pager_haspage(object, pindex, before, after)
208	vm_object_t object;
209	vm_pindex_t pindex;
210	int *before;
211	int *after;
212{
213	struct vnode *vp = object->handle;
214	daddr_t bn;
215	int err;
216	daddr_t reqblock;
217	int poff;
218	int bsize;
219	int pagesperblock, blocksperpage;
220
221	/*
222	 * If filesystem no longer mounted or offset beyond end of file we do
223	 * not have the page.
224	 */
225	if ((vp->v_mount == NULL) ||
226		(IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size))
227		return FALSE;
228
229	bsize = vp->v_mount->mnt_stat.f_iosize;
230	pagesperblock = bsize / PAGE_SIZE;
231	blocksperpage = 0;
232	if (pagesperblock > 0) {
233		reqblock = pindex / pagesperblock;
234	} else {
235		blocksperpage = (PAGE_SIZE / bsize);
236		reqblock = pindex * blocksperpage;
237	}
238	err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
239		after, before);
240	if (err)
241		return TRUE;
242	if ( bn == -1)
243		return FALSE;
244	if (pagesperblock > 0) {
245		poff = pindex - (reqblock * pagesperblock);
246		if (before) {
247			*before *= pagesperblock;
248			*before += poff;
249		}
250		if (after) {
251			int numafter;
252			*after *= pagesperblock;
253			numafter = pagesperblock - (poff + 1);
254			if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) {
255				numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex)));
256			}
257			*after += numafter;
258		}
259	} else {
260		if (before) {
261			*before /= blocksperpage;
262		}
263
264		if (after) {
265			*after /= blocksperpage;
266		}
267	}
268	return TRUE;
269}
270
271/*
272 * Lets the VM system know about a change in size for a file.
273 * We adjust our own internal size and flush any cached pages in
274 * the associated object that are affected by the size change.
275 *
276 * Note: this routine may be invoked as a result of a pager put
277 * operation (possibly at object termination time), so we must be careful.
278 */
279void
280vnode_pager_setsize(vp, nsize)
281	struct vnode *vp;
282	vm_ooffset_t nsize;
283{
284	vm_object_t object = vp->v_object;
285
286	if (object == NULL)
287		return;
288
289	/*
290	 * Hasn't changed size
291	 */
292	if (nsize == object->un_pager.vnp.vnp_size)
293		return;
294
295	/*
296	 * File has shrunk. Toss any cached pages beyond the new EOF.
297	 */
298	if (nsize < object->un_pager.vnp.vnp_size) {
299		vm_ooffset_t nsizerounded;
300		nsizerounded = IDX_TO_OFF(OFF_TO_IDX(nsize + PAGE_MASK));
301		if (nsizerounded < object->un_pager.vnp.vnp_size) {
302			vm_object_page_remove(object,
303				OFF_TO_IDX(nsize + PAGE_MASK),
304				OFF_TO_IDX(object->un_pager.vnp.vnp_size),
305				FALSE);
306		}
307		/*
308		 * this gets rid of garbage at the end of a page that is now
309		 * only partially backed by the vnode...
310		 */
311		if (nsize & PAGE_MASK) {
312			vm_offset_t kva;
313			vm_page_t m;
314
315			m = vm_page_lookup(object, OFF_TO_IDX(nsize));
316			if (m) {
317				kva = vm_pager_map_page(m);
318				bzero((caddr_t) kva + (nsize & PAGE_MASK),
319				    (int) (round_page(nsize) - nsize));
320				vm_pager_unmap_page(kva);
321			}
322		}
323	}
324	object->un_pager.vnp.vnp_size = nsize;
325	object->size = OFF_TO_IDX(nsize + PAGE_MASK);
326}
327
328void
329vnode_pager_umount(mp)
330	register struct mount *mp;
331{
332	struct vnode *vp, *nvp;
333
334loop:
335	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
336		/*
337		 * Vnode can be reclaimed by getnewvnode() while we
338		 * traverse the list.
339		 */
340		if (vp->v_mount != mp)
341			goto loop;
342
343		/*
344		 * Save the next pointer now since uncaching may terminate the
345		 * object and render vnode invalid
346		 */
347		nvp = vp->v_mntvnodes.le_next;
348
349		if (vp->v_object != NULL) {
350			VOP_LOCK(vp);
351			vnode_pager_uncache(vp);
352			VOP_UNLOCK(vp);
353		}
354	}
355}
356
357/*
358 * Remove vnode associated object from the object cache.
359 * This routine must be called with the vnode locked.
360 *
361 * XXX unlock the vnode.
362 * We must do this since uncaching the object may result in its
363 * destruction which may initiate paging activity which may necessitate
364 * re-locking the vnode.
365 */
366void
367vnode_pager_uncache(vp)
368	struct vnode *vp;
369{
370	vm_object_t object;
371
372	/*
373	 * Not a mapped vnode
374	 */
375	object = vp->v_object;
376	if (object == NULL)
377		return;
378
379	vm_object_reference(object);
380
381	/*
382	 * XXX We really should handle locking on
383	 * VBLK devices...
384	 */
385	if (vp->v_type != VBLK)
386		VOP_UNLOCK(vp);
387	pager_cache(object, FALSE);
388	if (vp->v_type != VBLK)
389		VOP_LOCK(vp);
390	return;
391}
392
393
394void
395vnode_pager_freepage(m)
396	vm_page_t m;
397{
398	PAGE_WAKEUP(m);
399	vm_page_free(m);
400}
401
402/*
403 * calculate the linear (byte) disk address of specified virtual
404 * file address
405 */
406static vm_offset_t
407vnode_pager_addr(vp, address, run)
408	struct vnode *vp;
409	vm_ooffset_t address;
410	int *run;
411{
412	int rtaddress;
413	int bsize;
414	daddr_t block;
415	struct vnode *rtvp;
416	int err;
417	daddr_t vblock;
418	int voffset;
419
420	if ((int) address < 0)
421		return -1;
422
423	if (vp->v_mount == NULL)
424		return -1;
425
426	bsize = vp->v_mount->mnt_stat.f_iosize;
427	vblock = address / bsize;
428	voffset = address % bsize;
429
430	err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL);
431
432	if (err || (block == -1))
433		rtaddress = -1;
434	else {
435		rtaddress = block + voffset / DEV_BSIZE;
436		if( run) {
437			*run += 1;
438			*run *= bsize/PAGE_SIZE;
439			*run -= voffset/PAGE_SIZE;
440		}
441	}
442
443	return rtaddress;
444}
445
446/*
447 * interrupt routine for I/O completion
448 */
449static void
450vnode_pager_iodone(bp)
451	struct buf *bp;
452{
453	bp->b_flags |= B_DONE;
454	wakeup(bp);
455}
456
457/*
458 * small block file system vnode pager input
459 */
460static int
461vnode_pager_input_smlfs(object, m)
462	vm_object_t object;
463	vm_page_t m;
464{
465	int i;
466	int s;
467	struct vnode *dp, *vp;
468	struct buf *bp;
469	vm_offset_t kva;
470	int fileaddr;
471	vm_offset_t bsize;
472	int error = 0;
473
474	vp = object->handle;
475	if (vp->v_mount == NULL)
476		return VM_PAGER_BAD;
477
478	bsize = vp->v_mount->mnt_stat.f_iosize;
479
480
481	VOP_BMAP(vp, 0, &dp, 0, NULL, NULL);
482
483	kva = vm_pager_map_page(m);
484
485	for (i = 0; i < PAGE_SIZE / bsize; i++) {
486
487		if ((vm_page_bits(IDX_TO_OFF(m->pindex) + i * bsize, bsize) & m->valid))
488			continue;
489
490		fileaddr = vnode_pager_addr(vp,
491			IDX_TO_OFF(m->pindex) + i * bsize, (int *)0);
492		if (fileaddr != -1) {
493			bp = getpbuf();
494
495			/* build a minimal buffer header */
496			bp->b_flags = B_BUSY | B_READ | B_CALL;
497			bp->b_iodone = vnode_pager_iodone;
498			bp->b_proc = curproc;
499			bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
500			if (bp->b_rcred != NOCRED)
501				crhold(bp->b_rcred);
502			if (bp->b_wcred != NOCRED)
503				crhold(bp->b_wcred);
504			bp->b_un.b_addr = (caddr_t) kva + i * bsize;
505			bp->b_blkno = fileaddr;
506			pbgetvp(dp, bp);
507			bp->b_bcount = bsize;
508			bp->b_bufsize = bsize;
509
510			/* do the input */
511			VOP_STRATEGY(bp);
512
513			/* we definitely need to be at splbio here */
514
515			s = splbio();
516			while ((bp->b_flags & B_DONE) == 0) {
517				tsleep(bp, PVM, "vnsrd", 0);
518			}
519			splx(s);
520			if ((bp->b_flags & B_ERROR) != 0)
521				error = EIO;
522
523			/*
524			 * free the buffer header back to the swap buffer pool
525			 */
526			relpbuf(bp);
527			if (error)
528				break;
529
530			vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
531		} else {
532			vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
533			bzero((caddr_t) kva + i * bsize, bsize);
534		}
535	}
536	vm_pager_unmap_page(kva);
537	pmap_clear_modify(VM_PAGE_TO_PHYS(m));
538	m->flags &= ~PG_ZERO;
539	if (error) {
540		return VM_PAGER_ERROR;
541	}
542	return VM_PAGER_OK;
543
544}
545
546
547/*
548 * old style vnode pager output routine
549 */
550static int
551vnode_pager_input_old(object, m)
552	vm_object_t object;
553	vm_page_t m;
554{
555	struct uio auio;
556	struct iovec aiov;
557	int error;
558	int size;
559	vm_offset_t kva;
560
561	error = 0;
562
563	/*
564	 * Return failure if beyond current EOF
565	 */
566	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
567		return VM_PAGER_BAD;
568	} else {
569		size = PAGE_SIZE;
570		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
571			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
572
573		/*
574		 * Allocate a kernel virtual address and initialize so that
575		 * we can use VOP_READ/WRITE routines.
576		 */
577		kva = vm_pager_map_page(m);
578
579		aiov.iov_base = (caddr_t) kva;
580		aiov.iov_len = size;
581		auio.uio_iov = &aiov;
582		auio.uio_iovcnt = 1;
583		auio.uio_offset = IDX_TO_OFF(m->pindex);
584		auio.uio_segflg = UIO_SYSSPACE;
585		auio.uio_rw = UIO_READ;
586		auio.uio_resid = size;
587		auio.uio_procp = (struct proc *) 0;
588
589		error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred);
590		if (!error) {
591			register int count = size - auio.uio_resid;
592
593			if (count == 0)
594				error = EINVAL;
595			else if (count != PAGE_SIZE)
596				bzero((caddr_t) kva + count, PAGE_SIZE - count);
597		}
598		vm_pager_unmap_page(kva);
599	}
600	pmap_clear_modify(VM_PAGE_TO_PHYS(m));
601	m->dirty = 0;
602	m->flags &= ~PG_ZERO;
603	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
604}
605
606/*
607 * generic vnode pager input routine
608 */
609
610static int
611vnode_pager_getpages(object, m, count, reqpage)
612	vm_object_t object;
613	vm_page_t *m;
614	int count;
615	int reqpage;
616{
617	int rtval;
618	struct vnode *vp;
619	if (object->flags & OBJ_VNODE_GONE)
620		return VM_PAGER_ERROR;
621	vp = object->handle;
622	rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0);
623	if (rtval == EOPNOTSUPP)
624		return vnode_pager_leaf_getpages(object, m, count, reqpage);
625	else
626		return rtval;
627}
628
629static int
630vnode_pager_leaf_getpages(object, m, count, reqpage)
631	vm_object_t object;
632	vm_page_t *m;
633	int count;
634	int reqpage;
635{
636	vm_offset_t kva;
637	off_t foff;
638	int i, size, bsize, first, firstaddr;
639	struct vnode *dp, *vp;
640	int runpg;
641	int runend;
642	struct buf *bp;
643	int s;
644	int error = 0;
645
646	vp = object->handle;
647	if (vp->v_mount == NULL)
648		return VM_PAGER_BAD;
649
650	bsize = vp->v_mount->mnt_stat.f_iosize;
651
652	/* get the UNDERLYING device for the file with VOP_BMAP() */
653
654	/*
655	 * originally, we did not check for an error return value -- assuming
656	 * an fs always has a bmap entry point -- that assumption is wrong!!!
657	 */
658	foff = IDX_TO_OFF(m[reqpage]->pindex);
659
660	/*
661	 * if we can't bmap, use old VOP code
662	 */
663	if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) {
664		for (i = 0; i < count; i++) {
665			if (i != reqpage) {
666				vnode_pager_freepage(m[i]);
667			}
668		}
669		cnt.v_vnodein++;
670		cnt.v_vnodepgsin++;
671		return vnode_pager_input_old(object, m[reqpage]);
672
673		/*
674		 * if the blocksize is smaller than a page size, then use
675		 * special small filesystem code.  NFS sometimes has a small
676		 * blocksize, but it can handle large reads itself.
677		 */
678	} else if ((PAGE_SIZE / bsize) > 1 &&
679	    (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
680
681		for (i = 0; i < count; i++) {
682			if (i != reqpage) {
683				vnode_pager_freepage(m[i]);
684			}
685		}
686		cnt.v_vnodein++;
687		cnt.v_vnodepgsin++;
688		return vnode_pager_input_smlfs(object, m[reqpage]);
689	}
690	/*
691	 * if ANY DEV_BSIZE blocks are valid on a large filesystem block
692	 * then, the entire page is valid --
693	 */
694	if (m[reqpage]->valid) {
695		m[reqpage]->valid = VM_PAGE_BITS_ALL;
696		for (i = 0; i < count; i++) {
697			if (i != reqpage)
698				vnode_pager_freepage(m[i]);
699		}
700		return VM_PAGER_OK;
701	}
702
703	/*
704	 * here on direct device I/O
705	 */
706
707	firstaddr = -1;
708	/*
709	 * calculate the run that includes the required page
710	 */
711	for(first = 0, i = 0; i < count; i = runend) {
712		firstaddr = vnode_pager_addr(vp,
713			IDX_TO_OFF(m[i]->pindex), &runpg);
714		if (firstaddr == -1) {
715			if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
716				panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d",
717			   	 firstaddr, foff, object->un_pager.vnp.vnp_size);
718			}
719			vnode_pager_freepage(m[i]);
720			runend = i + 1;
721			first = runend;
722			continue;
723		}
724		runend = i + runpg;
725		if (runend <= reqpage) {
726			int j;
727			for (j = i; j < runend; j++) {
728				vnode_pager_freepage(m[j]);
729			}
730		} else {
731			if (runpg < (count - first)) {
732				for (i = first + runpg; i < count; i++)
733					vnode_pager_freepage(m[i]);
734				count = first + runpg;
735			}
736			break;
737		}
738		first = runend;
739	}
740
741	/*
742	 * the first and last page have been calculated now, move input pages
743	 * to be zero based...
744	 */
745	if (first != 0) {
746		for (i = first; i < count; i++) {
747			m[i - first] = m[i];
748		}
749		count -= first;
750		reqpage -= first;
751	}
752
753	/*
754	 * calculate the file virtual address for the transfer
755	 */
756	foff = IDX_TO_OFF(m[0]->pindex);
757
758	/*
759	 * calculate the size of the transfer
760	 */
761	size = count * PAGE_SIZE;
762	if ((foff + size) > object->un_pager.vnp.vnp_size)
763		size = object->un_pager.vnp.vnp_size - foff;
764
765	/*
766	 * round up physical size for real devices
767	 */
768	if (dp->v_type == VBLK || dp->v_type == VCHR)
769		size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
770
771	bp = getpbuf();
772	kva = (vm_offset_t) bp->b_data;
773
774	/*
775	 * and map the pages to be read into the kva
776	 */
777	pmap_qenter(kva, m, count);
778
779	/* build a minimal buffer header */
780	bp->b_flags = B_BUSY | B_READ | B_CALL;
781	bp->b_iodone = vnode_pager_iodone;
782	/* B_PHYS is not set, but it is nice to fill this in */
783	bp->b_proc = curproc;
784	bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
785	if (bp->b_rcred != NOCRED)
786		crhold(bp->b_rcred);
787	if (bp->b_wcred != NOCRED)
788		crhold(bp->b_wcred);
789	bp->b_blkno = firstaddr;
790	pbgetvp(dp, bp);
791	bp->b_bcount = size;
792	bp->b_bufsize = size;
793
794	cnt.v_vnodein++;
795	cnt.v_vnodepgsin += count;
796
797	/* do the input */
798	VOP_STRATEGY(bp);
799
800	s = splbio();
801	/* we definitely need to be at splbio here */
802
803	while ((bp->b_flags & B_DONE) == 0) {
804		tsleep(bp, PVM, "vnread", 0);
805	}
806	splx(s);
807	if ((bp->b_flags & B_ERROR) != 0)
808		error = EIO;
809
810	if (!error) {
811		if (size != count * PAGE_SIZE)
812			bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
813	}
814	pmap_qremove(kva, count);
815
816	/*
817	 * free the buffer header back to the swap buffer pool
818	 */
819	relpbuf(bp);
820
821	for (i = 0; i < count; i++) {
822		pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
823		m[i]->dirty = 0;
824		m[i]->valid = VM_PAGE_BITS_ALL;
825		m[i]->flags &= ~PG_ZERO;
826		if (i != reqpage) {
827
828			/*
829			 * whether or not to leave the page activated is up in
830			 * the air, but we should put the page on a page queue
831			 * somewhere. (it already is in the object). Result:
832			 * It appears that emperical results show that
833			 * deactivating pages is best.
834			 */
835
836			/*
837			 * just in case someone was asking for this page we
838			 * now tell them that it is ok to use
839			 */
840			if (!error) {
841				vm_page_deactivate(m[i]);
842				PAGE_WAKEUP(m[i]);
843			} else {
844				vnode_pager_freepage(m[i]);
845			}
846		}
847	}
848	if (error) {
849		printf("vnode_pager_getpages: I/O read error\n");
850	}
851	return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
852}
853
854static int
855vnode_pager_putpages(object, m, count, sync, rtvals)
856	vm_object_t object;
857	vm_page_t *m;
858	int count;
859	boolean_t sync;
860	int *rtvals;
861{
862	int rtval;
863	struct vnode *vp;
864
865	if (object->flags & OBJ_VNODE_GONE)
866		return VM_PAGER_ERROR;
867
868	vp = object->handle;
869	rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0);
870	if (rtval == EOPNOTSUPP)
871		return vnode_pager_leaf_putpages(object, m, count, sync, rtvals);
872	else
873		return rtval;
874}
875
876/*
877 * generic vnode pager output routine
878 */
879static int
880vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
881	vm_object_t object;
882	vm_page_t *m;
883	int count;
884	boolean_t sync;
885	int *rtvals;
886{
887	int i;
888
889	struct vnode *vp;
890	int maxsize, ncount;
891	vm_ooffset_t poffset;
892	struct uio auio;
893	struct iovec aiov;
894	int error;
895
896	vp = object->handle;;
897	for (i = 0; i < count; i++)
898		rtvals[i] = VM_PAGER_AGAIN;
899
900	if ((int) m[0]->pindex < 0) {
901		printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->pindex, m[0]->dirty);
902		rtvals[0] = VM_PAGER_BAD;
903		return VM_PAGER_BAD;
904	}
905
906	maxsize = count * PAGE_SIZE;
907	ncount = count;
908
909	poffset = IDX_TO_OFF(m[0]->pindex);
910	if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
911		if (object->un_pager.vnp.vnp_size > poffset)
912			maxsize = object->un_pager.vnp.vnp_size - poffset;
913		else
914			maxsize = 0;
915		ncount = btoc(maxsize);
916		if (ncount < count) {
917			for (i = ncount; i < count; i++) {
918				rtvals[i] = VM_PAGER_BAD;
919			}
920#ifdef BOGUS
921			if (ncount == 0) {
922				printf("vnode_pager_putpages: write past end of file: %d, %lu\n",
923					poffset,
924					(unsigned long) object->un_pager.vnp.vnp_size);
925				return rtvals[0];
926			}
927#endif
928		}
929	}
930
931	for (i = 0; i < count; i++) {
932		m[i]->busy++;
933		m[i]->flags &= ~PG_BUSY;
934	}
935
936	aiov.iov_base = (caddr_t) 0;
937	aiov.iov_len = maxsize;
938	auio.uio_iov = &aiov;
939	auio.uio_iovcnt = 1;
940	auio.uio_offset = poffset;
941	auio.uio_segflg = UIO_NOCOPY;
942	auio.uio_rw = UIO_WRITE;
943	auio.uio_resid = maxsize;
944	auio.uio_procp = (struct proc *) 0;
945	error = VOP_WRITE(vp, &auio, IO_VMIO|(sync?IO_SYNC:0), curproc->p_ucred);
946	cnt.v_vnodeout++;
947	cnt.v_vnodepgsout += ncount;
948
949	if (error) {
950		printf("vnode_pager_putpages: I/O error %d\n", error);
951	}
952	if (auio.uio_resid) {
953		printf("vnode_pager_putpages: residual I/O %d at %ld\n",
954			auio.uio_resid, m[0]->pindex);
955	}
956	for (i = 0; i < count; i++) {
957		m[i]->busy--;
958		if (i < ncount) {
959			rtvals[i] = VM_PAGER_OK;
960		}
961		if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED))
962			wakeup(m[i]);
963	}
964	return rtvals[0];
965}
966
967struct vnode *
968vnode_pager_lock(object)
969	vm_object_t object;
970{
971	for (; object != NULL; object = object->backing_object) {
972		if (object->type != OBJT_VNODE)
973			continue;
974
975		VOP_LOCK(object->handle);
976		return object->handle;
977	}
978	return NULL;
979}
980