vnode_pager.c revision 4207
1/*
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1993,1994 John S. Dyson
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vnode_pager.c	7.5 (Berkeley) 4/20/91
40 *	$Id: vnode_pager.c,v 1.14 1994/10/15 13:33:09 davidg Exp $
41 */
42
43/*
44 * Page to/from files (vnodes).
45 *
46 * TODO:
47 *	pageouts
48 *	fix credential use (uses current process credentials now)
49 */
50
51/*
52 * MODIFICATIONS:
53 * John S. Dyson  08 Dec 93
54 *
55 * This file in conjunction with some vm_fault mods, eliminate the performance
56 * advantage for using the buffer cache and minimize memory copies.
57 *
58 * 1) Supports multiple - block reads
59 * 2) Bypasses buffer cache for reads
60 *
61 * TODO:
62 *
63 * 1) Totally bypass buffer cache for reads
64 *    (Currently will still sometimes use buffer cache for reads)
65 * 2) Bypass buffer cache for writes
66 *    (Code does not support it, but mods are simple)
67 */
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/proc.h>
72#include <sys/malloc.h>
73#include <sys/vnode.h>
74#include <sys/uio.h>
75#include <sys/mount.h>
76
77#include <vm/vm.h>
78#include <vm/vm_page.h>
79#include <vm/vnode_pager.h>
80
81#include <sys/buf.h>
82#include <miscfs/specfs/specdev.h>
83
84int     vnode_pager_putmulti();
85
86void    vnode_pager_init();
87vm_pager_t vnode_pager_alloc(caddr_t, vm_offset_t, vm_prot_t, vm_offset_t);
88void    vnode_pager_dealloc();
89int     vnode_pager_getpage();
90int     vnode_pager_getmulti();
91int     vnode_pager_putpage();
92boolean_t vnode_pager_haspage();
93
94struct pagerops vnodepagerops = {
95	vnode_pager_init,
96	vnode_pager_alloc,
97	vnode_pager_dealloc,
98	vnode_pager_getpage,
99	vnode_pager_getmulti,
100	vnode_pager_putpage,
101	vnode_pager_putmulti,
102	vnode_pager_haspage
103};
104
105
106
107static int vnode_pager_input(vn_pager_t vnp, vm_page_t * m, int count, int reqpage);
108static int vnode_pager_output(vn_pager_t vnp, vm_page_t * m, int count, int *rtvals);
109
110extern vm_map_t pager_map;
111
112struct pagerlst vnode_pager_list;	/* list of managed vnodes */
113
114#define MAXBP (PAGE_SIZE/DEV_BSIZE);
115
116void
117vnode_pager_init()
118{
119	TAILQ_INIT(&vnode_pager_list);
120}
121
122/*
123 * Allocate (or lookup) pager for a vnode.
124 * Handle is a vnode pointer.
125 */
126vm_pager_t
127vnode_pager_alloc(handle, size, prot, offset)
128	caddr_t handle;
129	vm_size_t size;
130	vm_prot_t prot;
131	vm_offset_t offset;
132{
133	register vm_pager_t pager;
134	register vn_pager_t vnp;
135	vm_object_t object;
136	struct vattr vattr;
137	struct vnode *vp;
138	struct proc *p = curproc;	/* XXX */
139
140	/*
141	 * Pageout to vnode, no can do yet.
142	 */
143	if (handle == NULL)
144		return (NULL);
145
146	/*
147	 * Vnodes keep a pointer to any associated pager so no need to lookup
148	 * with vm_pager_lookup.
149	 */
150	vp = (struct vnode *) handle;
151	object = (vm_object_t) vp->v_vmdata;
152	pager = NULL;
153	if( object != NULL)
154		pager = object->pager;
155	if (pager == NULL) {
156
157		/*
158		 * Allocate pager structures
159		 */
160		pager = (vm_pager_t) malloc(sizeof *pager, M_VMPAGER, M_WAITOK);
161		if (pager == NULL)
162			return (NULL);
163		vnp = (vn_pager_t) malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK);
164		if (vnp == NULL) {
165			free((caddr_t) pager, M_VMPAGER);
166			return (NULL);
167		}
168
169		/*
170		 * And an object of the appropriate size
171		 */
172		if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) {
173			object = vm_object_allocate(round_page(vattr.va_size));
174			vm_object_enter(object, pager);
175			vm_object_setpager(object, pager, 0, TRUE);
176		} else {
177			free((caddr_t) vnp, M_VMPGDATA);
178			free((caddr_t) pager, M_VMPAGER);
179			return (NULL);
180		}
181
182		/*
183		 * Hold a reference to the vnode and initialize pager data.
184		 */
185		VREF(vp);
186		vnp->vnp_flags = 0;
187		vnp->vnp_vp = vp;
188		vnp->vnp_size = vattr.va_size;
189
190		TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list);
191		pager->pg_handle = handle;
192		pager->pg_type = PG_VNODE;
193		pager->pg_ops = &vnodepagerops;
194		pager->pg_data = (caddr_t) vnp;
195		vp->v_vmdata = (caddr_t) object;
196	} else {
197
198		/*
199		 * vm_object_lookup() will remove the object from the cache if
200		 * found and also gain a reference to the object.
201		 */
202		(void) vm_object_lookup(pager);
203	}
204	return (pager);
205}
206
207void
208vnode_pager_dealloc(pager)
209	vm_pager_t pager;
210{
211	register vn_pager_t vnp = (vn_pager_t) pager->pg_data;
212	register struct vnode *vp;
213
214	vp = vnp->vnp_vp;
215	if (vp) {
216		vp->v_vmdata = NULL;
217		vp->v_flag &= ~(VTEXT|VVMIO);
218		vrele(vp);
219	}
220	TAILQ_REMOVE(&vnode_pager_list, pager, pg_list);
221	free((caddr_t) vnp, M_VMPGDATA);
222	free((caddr_t) pager, M_VMPAGER);
223}
224
225int
226vnode_pager_getmulti(pager, m, count, reqpage, sync)
227	vm_pager_t pager;
228	vm_page_t *m;
229	int     count;
230	int     reqpage;
231	boolean_t sync;
232{
233
234	return vnode_pager_input((vn_pager_t) pager->pg_data, m, count, reqpage);
235}
236
237int
238vnode_pager_getpage(pager, m, sync)
239	vm_pager_t pager;
240	vm_page_t m;
241	boolean_t sync;
242{
243
244	vm_page_t marray[1];
245
246	if (pager == NULL)
247		return FALSE;
248	marray[0] = m;
249
250	return vnode_pager_input((vn_pager_t) pager->pg_data, marray, 1, 0);
251}
252
253boolean_t
254vnode_pager_putpage(pager, m, sync)
255	vm_pager_t pager;
256	vm_page_t m;
257	boolean_t sync;
258{
259	vm_page_t marray[1];
260	int     rtvals[1];
261
262	if (pager == NULL)
263		return FALSE;
264	marray[0] = m;
265	vnode_pager_output((vn_pager_t) pager->pg_data, marray, 1, rtvals);
266	return rtvals[0];
267}
268
269int
270vnode_pager_putmulti(pager, m, c, sync, rtvals)
271	vm_pager_t pager;
272	vm_page_t *m;
273	int     c;
274	boolean_t sync;
275	int    *rtvals;
276{
277	return vnode_pager_output((vn_pager_t) pager->pg_data, m, c, rtvals);
278}
279
280
281boolean_t
282vnode_pager_haspage(pager, offset)
283	vm_pager_t pager;
284	vm_offset_t offset;
285{
286	register vn_pager_t vnp = (vn_pager_t) pager->pg_data;
287	daddr_t bn;
288	int     err;
289
290	/*
291	 * Offset beyond end of file, do not have the page
292	 */
293	if (offset >= vnp->vnp_size) {
294		return (FALSE);
295	}
296
297	/*
298	 * Read the index to find the disk block to read from.  If there is no
299	 * block, report that we don't have this data.
300	 *
301	 * Assumes that the vnode has whole page or nothing.
302	 */
303	err = VOP_BMAP(vnp->vnp_vp,
304		       offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
305		       (struct vnode **) 0, &bn, 0);
306/*
307	printf("vnode_pager_haspage: (%d)0x%x: err: %d, bn: %d\n",
308		offset, offset, err, bn);
309*/
310	if (err) {
311		return (TRUE);
312	}
313	return ((long) bn < 0 ? FALSE : TRUE);
314}
315
316/*
317 * Lets the VM system know about a change in size for a file.
318 * If this vnode is mapped into some address space (i.e. we have a pager
319 * for it) we adjust our own internal size and flush any cached pages in
320 * the associated object that are affected by the size change.
321 *
322 * Note: this routine may be invoked as a result of a pager put
323 * operation (possibly at object termination time), so we must be careful.
324 */
325void
326vnode_pager_setsize(vp, nsize)
327	struct vnode *vp;
328	u_long  nsize;
329{
330	register vn_pager_t vnp;
331	register vm_object_t object;
332	vm_pager_t pager;
333
334	/*
335	 * Not a mapped vnode
336	 */
337	if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL)
338		return;
339
340	/*
341	 * Hasn't changed size
342	 */
343	object = (vm_object_t) vp->v_vmdata;
344	if( object == NULL)
345		return;
346	if( (pager = object->pager) == NULL)
347		return;
348	vnp = (vn_pager_t) pager->pg_data;
349	if (nsize == vnp->vnp_size)
350		return;
351
352	/*
353	 * No object. This can happen during object termination since
354	 * vm_object_page_clean is called after the object has been removed
355	 * from the hash table, and clean may cause vnode write operations
356	 * which can wind up back here.
357	 */
358	object = vm_object_lookup(pager);
359	if (object == NULL)
360		return;
361
362	/*
363	 * File has shrunk. Toss any cached pages beyond the new EOF.
364	 */
365	if (nsize < vnp->vnp_size) {
366		vm_object_lock(object);
367		vm_object_page_remove(object,
368			     round_page((vm_offset_t) nsize), vnp->vnp_size);
369		vm_object_unlock(object);
370
371		/*
372		 * this gets rid of garbage at the end of a page that is now
373		 * only partially backed by the vnode...
374		 */
375		if (nsize & PAGE_MASK) {
376			vm_offset_t kva;
377			vm_page_t m;
378
379			m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize));
380			if (m) {
381				kva = vm_pager_map_page(m);
382				bzero((caddr_t) kva + (nsize & PAGE_MASK),
383				      round_page(nsize) - nsize);
384				vm_pager_unmap_page(kva);
385			}
386		}
387	} else {
388
389		/*
390		 * this allows the filesystem and VM cache to stay in sync if
391		 * the VM page hasn't been modified...  After the page is
392		 * removed -- it will be faulted back in from the filesystem
393		 * cache.
394		 */
395		if (vnp->vnp_size & PAGE_MASK) {
396			vm_page_t m;
397
398			m = vm_page_lookup(object, trunc_page(vnp->vnp_size));
399			if (m && (m->flags & PG_CLEAN)) {
400				vm_object_lock(object);
401				vm_object_page_remove(object,
402					       vnp->vnp_size, vnp->vnp_size);
403				vm_object_unlock(object);
404			}
405		}
406	}
407	vnp->vnp_size = (vm_offset_t) nsize;
408	object->size = round_page(nsize);
409
410	vm_object_deallocate(object);
411}
412
413void
414vnode_pager_umount(mp)
415	register struct mount *mp;
416{
417	register vm_pager_t pager, npager;
418	struct vnode *vp;
419
420	pager = vnode_pager_list.tqh_first;
421	while (pager) {
422
423		/*
424		 * Save the next pointer now since uncaching may terminate the
425		 * object and render pager invalid
426		 */
427		vp = ((vn_pager_t) pager->pg_data)->vnp_vp;
428		npager = pager->pg_list.tqe_next;
429		if (mp == (struct mount *) 0 || vp->v_mount == mp)
430			(void) vnode_pager_uncache(vp);
431		pager = npager;
432	}
433}
434
435/*
436 * Remove vnode associated object from the object cache.
437 *
438 * Note: this routine may be invoked as a result of a pager put
439 * operation (possibly at object termination time), so we must be careful.
440 */
441boolean_t
442vnode_pager_uncache(vp)
443	register struct vnode *vp;
444{
445	register vm_object_t object;
446	boolean_t uncached, locked;
447	vm_pager_t pager;
448
449	/*
450	 * Not a mapped vnode
451	 */
452	object = (vm_object_t) vp->v_vmdata;
453	if( object == NULL)
454		return(TRUE);
455	pager = object->pager;
456	if (pager == NULL)
457		return (TRUE);
458
459	/*
460	 * Unlock the vnode if it is currently locked. We do this since
461	 * uncaching the object may result in its destruction which may
462	 * initiate paging activity which may necessitate locking the vnode.
463	 */
464	locked = VOP_ISLOCKED(vp);
465	if (locked)
466		VOP_UNLOCK(vp);
467
468	/*
469	 * Must use vm_object_lookup() as it actually removes the object from
470	 * the cache list.
471	 */
472	object = vm_object_lookup(pager);
473	if (object) {
474		uncached = (object->ref_count <= 1);
475		pager_cache(object, FALSE);
476	} else
477		uncached = TRUE;
478	if (locked)
479		VOP_LOCK(vp);
480	return (uncached);
481}
482
483
484void
485vnode_pager_freepage(m)
486	vm_page_t m;
487{
488	PAGE_WAKEUP(m);
489	vm_page_free(m);
490}
491
492/*
493 * calculate the linear (byte) disk address of specified virtual
494 * file address
495 */
496vm_offset_t
497vnode_pager_addr(vp, address)
498	struct vnode *vp;
499	vm_offset_t address;
500{
501	int     rtaddress;
502	int     bsize;
503	vm_offset_t block;
504	struct vnode *rtvp;
505	int     err;
506	int     vblock, voffset;
507
508	bsize = vp->v_mount->mnt_stat.f_iosize;
509	vblock = address / bsize;
510	voffset = address % bsize;
511
512	err = VOP_BMAP(vp, vblock, &rtvp, &block, 0);
513
514	if (err)
515		rtaddress = -1;
516	else
517		rtaddress = block * DEV_BSIZE + voffset;
518
519	return rtaddress;
520}
521
522/*
523 * interrupt routine for I/O completion
524 */
525void
526vnode_pager_iodone(bp)
527	struct buf *bp;
528{
529	bp->b_flags |= B_DONE;
530	wakeup((caddr_t) bp);
531	if( bp->b_flags & B_ASYNC) {
532		vm_offset_t paddr;
533		vm_page_t m;
534		vm_object_t obj = 0;
535		int i;
536		int npages;
537
538		paddr = (vm_offset_t) bp->b_data;
539		if( bp->b_bufsize != bp->b_bcount)
540			bzero( bp->b_data + bp->b_bcount,
541				bp->b_bufsize - bp->b_bcount);
542
543		npages = (bp->b_bufsize + PAGE_SIZE - 1) / PAGE_SIZE;
544/*
545		printf("bcount: %d, bufsize: %d, npages: %d\n",
546			bp->b_bcount, bp->b_bufsize, npages);
547*/
548		for( i = 0; i < npages; i++) {
549			m = PHYS_TO_VM_PAGE(pmap_kextract(paddr + i * PAGE_SIZE));
550			obj = m->object;
551			if( m) {
552				m->flags |= PG_CLEAN;
553				m->flags &= ~(PG_LAUNDRY|PG_FAKE);
554				PAGE_WAKEUP(m);
555			} else {
556				panic("vnode_pager_iodone: page is gone!!!");
557			}
558		}
559		pmap_qremove( paddr, npages);
560		if( obj) {
561			--obj->paging_in_progress;
562			if( obj->paging_in_progress == 0)
563				wakeup((caddr_t) obj);
564		} else {
565			panic("vnode_pager_iodone: object is gone???");
566		}
567		HOLDRELE(bp->b_vp);
568		relpbuf(bp);
569	}
570}
571
572/*
573 * small block file system vnode pager input
574 */
575int
576vnode_pager_input_smlfs(vnp, m)
577	vn_pager_t vnp;
578	vm_page_t m;
579{
580	int     i;
581	int     s;
582	vm_offset_t paging_offset;
583	struct vnode *dp, *vp;
584	struct buf *bp;
585	vm_offset_t foff;
586	vm_offset_t kva;
587	int     fileaddr;
588	int     block;
589	vm_offset_t bsize;
590	int     error = 0;
591
592	paging_offset = m->object->paging_offset;
593	vp = vnp->vnp_vp;
594	bsize = vp->v_mount->mnt_stat.f_iosize;
595	foff = m->offset + paging_offset;
596
597	VOP_BMAP(vp, foff, &dp, 0, 0);
598
599	kva = vm_pager_map_page(m);
600
601	for (i = 0; i < PAGE_SIZE / bsize; i++) {
602
603		/*
604		 * calculate logical block and offset
605		 */
606		block = foff / bsize + i;
607		s = splbio();
608		while ((bp = incore(vp, block)) != 0) {
609			int     amount;
610
611			/*
612			 * wait until the buffer is avail or gone
613			 */
614			if (bp->b_flags & B_BUSY) {
615				bp->b_flags |= B_WANTED;
616				tsleep((caddr_t) bp, PVM, "vnwblk", 0);
617				continue;
618			}
619			amount = bsize;
620			if ((foff + bsize) > vnp->vnp_size)
621				amount = vnp->vnp_size - foff;
622
623			/*
624			 * make sure that this page is in the buffer
625			 */
626			if ((amount > 0) && amount <= bp->b_bcount) {
627				bp->b_flags |= B_BUSY;
628				splx(s);
629
630				/*
631				 * copy the data from the buffer
632				 */
633				bcopy(bp->b_un.b_addr, (caddr_t) kva + i * bsize, amount);
634				if (amount < bsize) {
635					bzero((caddr_t) kva + amount, bsize - amount);
636				}
637				bp->b_flags &= ~B_BUSY;
638				wakeup((caddr_t) bp);
639				goto nextblock;
640			}
641			break;
642		}
643		splx(s);
644		fileaddr = vnode_pager_addr(vp, foff + i * bsize);
645		if (fileaddr != -1) {
646			bp = getpbuf();
647			VHOLD(vp);
648
649			/* build a minimal buffer header */
650			bp->b_flags = B_BUSY | B_READ | B_CALL;
651			bp->b_iodone = vnode_pager_iodone;
652			bp->b_proc = curproc;
653			bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
654			if (bp->b_rcred != NOCRED)
655				crhold(bp->b_rcred);
656			if (bp->b_wcred != NOCRED)
657				crhold(bp->b_wcred);
658			bp->b_un.b_addr = (caddr_t) kva + i * bsize;
659			bp->b_blkno = fileaddr / DEV_BSIZE;
660			bgetvp(dp, bp);
661			bp->b_bcount = bsize;
662			bp->b_bufsize = bsize;
663
664			/* do the input */
665			VOP_STRATEGY(bp);
666
667			/* we definitely need to be at splbio here */
668
669			s = splbio();
670			while ((bp->b_flags & B_DONE) == 0) {
671				tsleep((caddr_t) bp, PVM, "vnsrd", 0);
672			}
673			splx(s);
674			if ((bp->b_flags & B_ERROR) != 0)
675				error = EIO;
676
677			/*
678			 * free the buffer header back to the swap buffer pool
679			 */
680			relpbuf(bp);
681			HOLDRELE(vp);
682			if (error)
683				break;
684		} else {
685			bzero((caddr_t) kva + i * bsize, bsize);
686		}
687nextblock:
688	}
689	vm_pager_unmap_page(kva);
690	if (error) {
691		return VM_PAGER_ERROR;
692	}
693	pmap_clear_modify(VM_PAGE_TO_PHYS(m));
694	m->flags |= PG_CLEAN;
695	m->flags &= ~PG_LAUNDRY;
696	return VM_PAGER_OK;
697
698}
699
700
701/*
702 * old style vnode pager output routine
703 */
704int
705vnode_pager_input_old(vnp, m)
706	vn_pager_t vnp;
707	vm_page_t m;
708{
709	struct uio auio;
710	struct iovec aiov;
711	int     error;
712	int     size;
713	vm_offset_t foff;
714	vm_offset_t kva;
715
716	error = 0;
717	foff = m->offset + m->object->paging_offset;
718
719	/*
720	 * Return failure if beyond current EOF
721	 */
722	if (foff >= vnp->vnp_size) {
723		return VM_PAGER_BAD;
724	} else {
725		size = PAGE_SIZE;
726		if (foff + size > vnp->vnp_size)
727			size = vnp->vnp_size - foff;
728/*
729 * Allocate a kernel virtual address and initialize so that
730 * we can use VOP_READ/WRITE routines.
731 */
732		kva = vm_pager_map_page(m);
733		aiov.iov_base = (caddr_t) kva;
734		aiov.iov_len = size;
735		auio.uio_iov = &aiov;
736		auio.uio_iovcnt = 1;
737		auio.uio_offset = foff;
738		auio.uio_segflg = UIO_SYSSPACE;
739		auio.uio_rw = UIO_READ;
740		auio.uio_resid = size;
741		auio.uio_procp = (struct proc *) 0;
742
743		error = VOP_READ(vnp->vnp_vp, &auio, 0, curproc->p_ucred);
744		if (!error) {
745			register int count = size - auio.uio_resid;
746
747			if (count == 0)
748				error = EINVAL;
749			else if (count != PAGE_SIZE)
750				bzero((caddr_t) kva + count, PAGE_SIZE - count);
751		}
752		vm_pager_unmap_page(kva);
753	}
754	pmap_clear_modify(VM_PAGE_TO_PHYS(m));
755	m->flags |= PG_CLEAN;
756	m->flags &= ~PG_LAUNDRY;
757	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
758}
759
760/*
761 * generic vnode pager input routine
762 */
763int
764vnode_pager_input(vnp, m, count, reqpage)
765	register vn_pager_t vnp;
766	vm_page_t *m;
767	int     count, reqpage;
768{
769	int     i;
770	vm_offset_t kva, foff;
771	int     size, sizea;
772	vm_object_t object;
773	vm_offset_t paging_offset;
774	struct vnode *dp, *vp;
775	int     bsize;
776
777	int     first, last;
778	int     reqaddr, firstaddr;
779	int     block, offset;
780
781	struct buf *bp, *bpa;
782	int	counta;
783	int     s;
784	int     failflag;
785
786	int     error = 0;
787
788	object = m[reqpage]->object;	/* all vm_page_t items are in same
789					 * object */
790	paging_offset = object->paging_offset;
791
792	vp = vnp->vnp_vp;
793	bsize = vp->v_mount->mnt_stat.f_iosize;
794
795	/* get the UNDERLYING device for the file with VOP_BMAP() */
796
797	/*
798	 * originally, we did not check for an error return value -- assuming
799	 * an fs always has a bmap entry point -- that assumption is wrong!!!
800	 */
801	foff = m[reqpage]->offset + paging_offset;
802
803	/*
804	 * if we can't bmap, use old VOP code
805	 */
806	if (VOP_BMAP(vp, foff, &dp, 0, 0)) {
807		for (i = 0; i < count; i++) {
808			if (i != reqpage) {
809				vnode_pager_freepage(m[i]);
810			}
811		}
812		cnt.v_vnodein++;
813		cnt.v_vnodepgsin++;
814		return vnode_pager_input_old(vnp, m[reqpage]);
815
816		/*
817		 * if the blocksize is smaller than a page size, then use
818		 * special small filesystem code.  NFS sometimes has a small
819		 * blocksize, but it can handle large reads itself.
820		 */
821	} else if ((PAGE_SIZE / bsize) > 1 &&
822		   (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
823
824		for (i = 0; i < count; i++) {
825			if (i != reqpage) {
826				vnode_pager_freepage(m[i]);
827			}
828		}
829		cnt.v_vnodein++;
830		cnt.v_vnodepgsin++;
831		return vnode_pager_input_smlfs(vnp, m[reqpage]);
832	}
833/*
834 * here on direct device I/O
835 */
836
837
838#ifdef NOTYET
839	if( (vp->v_flag & VVMIO) == 0) {
840#endif
841	/*
842	 * This pathetic hack gets data from the buffer cache, if it's there.
843	 * I believe that this is not really necessary, and the ends can be
844	 * gotten by defaulting to the normal vfs read behavior, but this
845	 * might be more efficient, because the will NOT invoke read-aheads
846	 * and one of the purposes of this code is to bypass the buffer cache
847	 * and keep from flushing it by reading in a program.
848	 */
849
850		/*
851		 * calculate logical block and offset
852		 */
853		block = foff / bsize;
854		offset = foff % bsize;
855		s = splbio();
856
857		/*
858		 * if we have a buffer in core, then try to use it
859		 */
860		while ((bp = incore(vp, block)) != 0) {
861			int     amount;
862
863			/*
864			 * wait until the buffer is avail or gone
865			 */
866			if (bp->b_flags & B_BUSY) {
867				bp->b_flags |= B_WANTED;
868				tsleep((caddr_t) bp, PVM, "vnwblk", 0);
869				continue;
870			}
871			amount = PAGE_SIZE;
872			if ((foff + amount) > vnp->vnp_size)
873				amount = vnp->vnp_size - foff;
874
875			/*
876			 * make sure that this page is in the buffer
877			 */
878			if ((amount > 0) && (offset + amount) <= bp->b_bcount) {
879				bp->b_flags |= B_BUSY;
880				splx(s);
881				kva = kmem_alloc_wait( pager_map, PAGE_SIZE);
882
883				/*
884				 * map the requested page
885				 */
886				pmap_qenter(kva, &m[reqpage], 1);
887
888				/*
889				 * copy the data from the buffer
890				 */
891				bcopy(bp->b_un.b_addr + offset, (caddr_t) kva, amount);
892				if (amount < PAGE_SIZE) {
893					bzero((caddr_t) kva + amount, PAGE_SIZE - amount);
894				}
895
896				/*
897				 * unmap the page and free the kva
898				 */
899				pmap_qremove( kva, 1);
900				kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
901
902				/*
903				 * release the buffer back to the block subsystem
904				 */
905				bp->b_flags &= ~B_BUSY;
906				wakeup((caddr_t) bp);
907
908				/*
909				 * we did not have to do any work to get the requested
910				 * page, the read behind/ahead does not justify a read
911				 */
912				for (i = 0; i < count; i++) {
913					if (i != reqpage) {
914						vnode_pager_freepage(m[i]);
915					}
916				}
917				count = 1;
918				reqpage = 0;
919				m[0] = m[reqpage];
920
921				/*
922				 * sorry for the goto
923				 */
924				goto finishup;
925			}
926
927			/*
928			 * buffer is nowhere to be found, read from the disk
929			 */
930			break;
931		}
932		splx(s);
933#ifdef NOTYET
934	}
935#endif
936
937	reqaddr = vnode_pager_addr(vp, foff);
938	s = splbio();
939
940	/*
941	 * Make sure that our I/O request is contiguous. Scan backward and
942	 * stop for the first discontiguous entry or stop for a page being in
943	 * buffer cache.
944	 */
945	failflag = 0;
946	first = reqpage;
947	for (i = reqpage - 1; i >= 0; --i) {
948		if (failflag ||
949#ifdef NOTYET
950		    ((vp->v_flag & VVMIO) == 0 && incore(vp, (foff + (i - reqpage) * PAGE_SIZE) / bsize)) ||
951#else
952		    (incore(vp, (foff + (i - reqpage) * PAGE_SIZE) / bsize)) ||
953#endif
954		    (vnode_pager_addr(vp, m[i]->offset + paging_offset))
955		    != reqaddr + (i - reqpage) * PAGE_SIZE) {
956			vnode_pager_freepage(m[i]);
957			failflag = 1;
958		} else {
959			first = i;
960		}
961	}
962
963	/*
964	 * Scan forward and stop for the first non-contiguous entry or stop
965	 * for a page being in buffer cache.
966	 */
967	failflag = 0;
968	last = reqpage + 1;
969	for (i = reqpage + 1; i < count; i++) {
970		if (failflag ||
971#ifdef NOTYET
972		    ((vp->v_flag & VVMIO) == 0 && incore(vp, (foff + (i - reqpage) * PAGE_SIZE) / bsize)) ||
973#else
974		    (incore(vp, (foff + (i - reqpage) * PAGE_SIZE) / bsize)) ||
975#endif
976		    (vnode_pager_addr(vp, m[i]->offset + paging_offset))
977		    != reqaddr + (i - reqpage) * PAGE_SIZE) {
978			vnode_pager_freepage(m[i]);
979			failflag = 1;
980		} else {
981			last = i + 1;
982		}
983	}
984	splx(s);
985
986	/*
987	 * the first and last page have been calculated now, move input pages
988	 * to be zero based...
989	 */
990	count = last;
991	if (first != 0) {
992		for (i = first; i < count; i++) {
993			m[i - first] = m[i];
994		}
995		count -= first;
996		reqpage -= first;
997	}
998
999	/*
1000	 * calculate the file virtual address for the transfer
1001	 */
1002	foff = m[0]->offset + paging_offset;
1003
1004	/*
1005	 * and get the disk physical address (in bytes)
1006	 */
1007	firstaddr = vnode_pager_addr(vp, foff);
1008
1009	/*
1010	 * calculate the size of the transfer
1011	 */
1012	size = count * PAGE_SIZE;
1013	if ((foff + size) > vnp->vnp_size)
1014		size = vnp->vnp_size - foff;
1015
1016	/*
1017	 * round up physical size for real devices
1018	 */
1019	if (dp->v_type == VBLK || dp->v_type == VCHR)
1020		size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1021
1022	counta = 0;
1023	if( count*PAGE_SIZE > bsize)
1024		counta = (count - reqpage) - 1;
1025	bpa = 0;
1026	sizea = 0;
1027	if( counta) {
1028		bpa = getpbuf();
1029		count -= counta;
1030		sizea = size - count*PAGE_SIZE;
1031		size = count * PAGE_SIZE;
1032	}
1033
1034	bp = getpbuf();
1035	kva = (vm_offset_t)bp->b_data;
1036
1037	/*
1038	 * and map the pages to be read into the kva
1039	 */
1040	pmap_qenter(kva, m, count);
1041	VHOLD(vp);
1042
1043	/* build a minimal buffer header */
1044	bp->b_flags = B_BUSY | B_READ | B_CALL;
1045	bp->b_iodone = vnode_pager_iodone;
1046	/* B_PHYS is not set, but it is nice to fill this in */
1047	bp->b_proc = curproc;
1048	bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
1049	if (bp->b_rcred != NOCRED)
1050		crhold(bp->b_rcred);
1051	if (bp->b_wcred != NOCRED)
1052		crhold(bp->b_wcred);
1053	bp->b_blkno = firstaddr / DEV_BSIZE;
1054	bgetvp(dp, bp);
1055	bp->b_bcount = size;
1056	bp->b_bufsize = size;
1057
1058	cnt.v_vnodein++;
1059	cnt.v_vnodepgsin += count;
1060
1061	/* do the input */
1062	VOP_STRATEGY(bp);
1063
1064	if( counta) {
1065		for(i=0;i<counta;i++) {
1066			vm_page_deactivate(m[count+i]);
1067		}
1068		pmap_qenter((vm_offset_t)bpa->b_data, &m[count], counta);
1069		++m[count]->object->paging_in_progress;
1070		VHOLD(vp);
1071		bpa->b_flags = B_BUSY | B_READ | B_CALL | B_ASYNC;
1072		bpa->b_iodone = vnode_pager_iodone;
1073		/* B_PHYS is not set, but it is nice to fill this in */
1074		bpa->b_proc = curproc;
1075		bpa->b_rcred = bpa->b_wcred = bpa->b_proc->p_ucred;
1076		if (bpa->b_rcred != NOCRED)
1077			crhold(bpa->b_rcred);
1078		if (bpa->b_wcred != NOCRED)
1079			crhold(bpa->b_wcred);
1080		bpa->b_blkno = (firstaddr + count * PAGE_SIZE) / DEV_BSIZE;
1081		bgetvp(dp, bpa);
1082		bpa->b_bcount = sizea;
1083		bpa->b_bufsize = counta*PAGE_SIZE;
1084
1085		cnt.v_vnodepgsin += counta;
1086		VOP_STRATEGY(bpa);
1087	}
1088
1089	s = splbio();
1090	/* we definitely need to be at splbio here */
1091
1092	while ((bp->b_flags & B_DONE) == 0) {
1093		tsleep((caddr_t) bp, PVM, "vnread", 0);
1094	}
1095	splx(s);
1096	if ((bp->b_flags & B_ERROR) != 0)
1097		error = EIO;
1098
1099	if (!error) {
1100		if (size != count * PAGE_SIZE)
1101			bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
1102	}
1103	pmap_qremove( kva, count);
1104
1105	/*
1106	 * free the buffer header back to the swap buffer pool
1107	 */
1108	relpbuf(bp);
1109	HOLDRELE(vp);
1110
1111finishup:
1112	for (i = 0; i < count; i++) {
1113		pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1114		m[i]->flags |= PG_CLEAN;
1115		m[i]->flags &= ~PG_LAUNDRY;
1116		if (i != reqpage) {
1117
1118			/*
1119			 * whether or not to leave the page activated is up in
1120			 * the air, but we should put the page on a page queue
1121			 * somewhere. (it already is in the object). Result:
1122			 * It appears that emperical results show that
1123			 * deactivating pages is best.
1124			 */
1125
1126			/*
1127			 * just in case someone was asking for this page we
1128			 * now tell them that it is ok to use
1129			 */
1130			if (!error) {
1131				vm_page_deactivate(m[i]);
1132				PAGE_WAKEUP(m[i]);
1133				m[i]->flags &= ~PG_FAKE;
1134			} else {
1135				vnode_pager_freepage(m[i]);
1136			}
1137		}
1138	}
1139	if (error) {
1140		printf("vnode_pager_input: I/O read error\n");
1141	}
1142	return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
1143}
1144
1145/*
1146 * old-style vnode pager output routine
1147 */
1148int
1149vnode_pager_output_old(vnp, m)
1150	register vn_pager_t vnp;
1151	vm_page_t m;
1152{
1153	vm_offset_t foff;
1154	vm_offset_t kva;
1155	vm_offset_t size;
1156	struct iovec aiov;
1157	struct uio auio;
1158	struct vnode *vp;
1159	int     error;
1160
1161	vp = vnp->vnp_vp;
1162	foff = m->offset + m->object->paging_offset;
1163
1164	/*
1165	 * Return failure if beyond current EOF
1166	 */
1167	if (foff >= vnp->vnp_size) {
1168		return VM_PAGER_BAD;
1169	} else {
1170		size = PAGE_SIZE;
1171		if (foff + size > vnp->vnp_size)
1172			size = vnp->vnp_size - foff;
1173/*
1174 * Allocate a kernel virtual address and initialize so that
1175 * we can use VOP_WRITE routines.
1176 */
1177		kva = vm_pager_map_page(m);
1178		aiov.iov_base = (caddr_t) kva;
1179		aiov.iov_len = size;
1180		auio.uio_iov = &aiov;
1181		auio.uio_iovcnt = 1;
1182		auio.uio_offset = foff;
1183		auio.uio_segflg = UIO_SYSSPACE;
1184		auio.uio_rw = UIO_WRITE;
1185		auio.uio_resid = size;
1186		auio.uio_procp = (struct proc *) 0;
1187
1188		error = VOP_WRITE(vp, &auio, 0, curproc->p_ucred);
1189
1190		if (!error) {
1191			if ((size - auio.uio_resid) == 0) {
1192				error = EINVAL;
1193			}
1194		}
1195		vm_pager_unmap_page(kva);
1196		return error ? VM_PAGER_ERROR: VM_PAGER_OK;
1197	}
1198}
1199
1200/*
1201 * vnode pager output on a small-block file system
1202 */
1203int
1204vnode_pager_output_smlfs(vnp, m)
1205	vn_pager_t vnp;
1206	vm_page_t m;
1207{
1208	int     i;
1209	int     s;
1210	vm_offset_t paging_offset;
1211	struct vnode *dp, *vp;
1212	struct buf *bp;
1213	vm_offset_t foff;
1214	vm_offset_t kva;
1215	int     fileaddr;
1216	vm_offset_t bsize;
1217	int     error = 0;
1218
1219	paging_offset = m->object->paging_offset;
1220	vp = vnp->vnp_vp;
1221	bsize = vp->v_mount->mnt_stat.f_iosize;
1222	foff = m->offset + paging_offset;
1223
1224	VOP_BMAP(vp, foff, &dp, 0, 0);
1225	kva = vm_pager_map_page(m);
1226	for (i = 0; !error && i < (PAGE_SIZE / bsize); i++) {
1227
1228		/*
1229		 * calculate logical block and offset
1230		 */
1231		fileaddr = vnode_pager_addr(vp, foff + i * bsize);
1232		if (fileaddr != -1) {
1233			s = splbio();
1234			bp = incore(vp, (foff / bsize) + i);
1235			if (bp) {
1236				bp = getblk(vp, (foff / bsize) + i, bp->b_bufsize, 0, 0);
1237				bp->b_flags |= B_INVAL;
1238				brelse(bp);
1239			}
1240			splx(s);
1241
1242			bp = getpbuf();
1243			VHOLD(vp);
1244
1245			/* build a minimal buffer header */
1246			bp->b_flags = B_BUSY | B_CALL | B_WRITE;
1247			bp->b_iodone = vnode_pager_iodone;
1248			bp->b_proc = curproc;
1249			bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
1250			if (bp->b_rcred != NOCRED)
1251				crhold(bp->b_rcred);
1252			if (bp->b_wcred != NOCRED)
1253				crhold(bp->b_wcred);
1254			bp->b_un.b_addr = (caddr_t) kva + i * bsize;
1255			bp->b_blkno = fileaddr / DEV_BSIZE;
1256			bgetvp(dp, bp);
1257			++dp->v_numoutput;
1258			/* for NFS */
1259			bp->b_dirtyoff = 0;
1260			bp->b_dirtyend = bsize;
1261			bp->b_bcount = bsize;
1262			bp->b_bufsize = bsize;
1263
1264			/* do the input */
1265			VOP_STRATEGY(bp);
1266
1267			/* we definitely need to be at splbio here */
1268
1269			s = splbio();
1270			while ((bp->b_flags & B_DONE) == 0) {
1271				tsleep((caddr_t) bp, PVM, "vnswrt", 0);
1272			}
1273			splx(s);
1274			if ((bp->b_flags & B_ERROR) != 0)
1275				error = EIO;
1276
1277			/*
1278			 * free the buffer header back to the swap buffer pool
1279			 */
1280			relpbuf(bp);
1281			HOLDRELE(vp);
1282		}
1283	}
1284	vm_pager_unmap_page(kva);
1285	if (error)
1286		return VM_PAGER_ERROR;
1287	else
1288		return VM_PAGER_OK;
1289}
1290
1291/*
1292 * generic vnode pager output routine
1293 */
1294int
1295vnode_pager_output(vnp, m, count, rtvals)
1296	vn_pager_t vnp;
1297	vm_page_t *m;
1298	int     count;
1299	int    *rtvals;
1300{
1301	int     i, j;
1302	vm_offset_t kva, foff;
1303	int     size;
1304	vm_object_t object;
1305	vm_offset_t paging_offset;
1306	struct vnode *dp, *vp;
1307	struct buf *bp;
1308	vm_offset_t reqaddr;
1309	int     bsize;
1310	int     s;
1311
1312	int     error = 0;
1313
1314retryoutput:
1315	object = m[0]->object;	/* all vm_page_t items are in same object */
1316	paging_offset = object->paging_offset;
1317
1318	vp = vnp->vnp_vp;
1319	bsize = vp->v_mount->mnt_stat.f_iosize;
1320
1321	for (i = 0; i < count; i++)
1322		rtvals[i] = VM_PAGER_AGAIN;
1323
1324	/*
1325	 * if the filesystem does not have a bmap, then use the old code
1326	 */
1327	if (VOP_BMAP(vp, m[0]->offset + paging_offset, &dp, 0, 0)) {
1328
1329		rtvals[0] = vnode_pager_output_old(vnp, m[0]);
1330
1331		pmap_clear_modify(VM_PAGE_TO_PHYS(m[0]));
1332		m[0]->flags |= PG_CLEAN;
1333		m[0]->flags &= ~PG_LAUNDRY;
1334		cnt.v_vnodeout++;
1335		cnt.v_vnodepgsout++;
1336		return rtvals[0];
1337	}
1338
1339	/*
1340	 * if the filesystem has a small blocksize, then use the small block
1341	 * filesystem output code
1342	 */
1343	if ((bsize < PAGE_SIZE) &&
1344	    (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
1345
1346		for (i = 0; i < count; i++) {
1347			rtvals[i] = vnode_pager_output_smlfs(vnp, m[i]);
1348			if (rtvals[i] == VM_PAGER_OK) {
1349				pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1350				m[i]->flags |= PG_CLEAN;
1351				m[i]->flags &= ~PG_LAUNDRY;
1352			}
1353		}
1354		cnt.v_vnodeout++;
1355		cnt.v_vnodepgsout += count;
1356		return rtvals[0];
1357	}
1358
1359	for (i = 0; i < count; i++) {
1360		foff = m[i]->offset + paging_offset;
1361		if (foff >= vnp->vnp_size) {
1362			for (j = i; j < count; j++)
1363				rtvals[j] = VM_PAGER_BAD;
1364			count = i;
1365			break;
1366		}
1367	}
1368	if (count == 0) {
1369		return rtvals[0];
1370	}
1371	foff = m[0]->offset + paging_offset;
1372	reqaddr = vnode_pager_addr(vp, foff);
1373
1374	/*
1375	 * Scan forward and stop for the first non-contiguous entry or stop
1376	 * for a page being in buffer cache.
1377	 */
1378	for (i = 1; i < count; i++) {
1379		if (vnode_pager_addr(vp, m[i]->offset + paging_offset)
1380		    != reqaddr + i * PAGE_SIZE) {
1381			count = i;
1382			break;
1383		}
1384	}
1385
1386	/*
1387	 * calculate the size of the transfer
1388	 */
1389	size = count * PAGE_SIZE;
1390	if ((foff + size) > vnp->vnp_size)
1391		size = vnp->vnp_size - foff;
1392
1393	/*
1394	 * round up physical size for real devices
1395	 */
1396	if (dp->v_type == VBLK || dp->v_type == VCHR)
1397		size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1398
1399	bp = getpbuf();
1400	kva = (vm_offset_t)bp->b_data;
1401	/*
1402	 * and map the pages to be read into the kva
1403	 */
1404	pmap_qenter(kva, m, count);
1405#if 0
1406	printf("vnode: writing foff: %d, devoff: %d, size: %d\n",
1407		foff, reqaddr, size);
1408#endif
1409
1410	/*
1411	 * next invalidate the incore vfs_bio data
1412	 */
1413#ifdef NOTYET
1414	if( (vp->v_flag & VVMIO) == 0) {
1415#endif
1416		for (i = 0; i < count; i++) {
1417			int     filblock = (foff + i * PAGE_SIZE) / bsize;
1418			struct buf *fbp;
1419
1420			s = splbio();
1421			fbp = incore(vp, filblock);
1422			if (fbp) {
1423				fbp = getblk(vp, filblock, fbp->b_bufsize, 0, 0);
1424				if (fbp->b_flags & B_DELWRI) {
1425					if (fbp->b_bufsize <= PAGE_SIZE)
1426						fbp->b_flags &= ~B_DELWRI;
1427					else {
1428						bwrite(fbp);
1429						fbp = getblk(vp, filblock,
1430							     fbp->b_bufsize, 0, 0);
1431					}
1432				}
1433				fbp->b_flags |= B_INVAL;
1434				brelse(fbp);
1435			}
1436			splx(s);
1437		}
1438#ifdef NOTYET
1439	}
1440#endif
1441
1442
1443	VHOLD(vp);
1444	/* build a minimal buffer header */
1445	bp->b_flags = B_BUSY | B_WRITE | B_CALL;
1446	bp->b_iodone = vnode_pager_iodone;
1447	/* B_PHYS is not set, but it is nice to fill this in */
1448	bp->b_proc = curproc;
1449	bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
1450
1451	if (bp->b_rcred != NOCRED)
1452		crhold(bp->b_rcred);
1453	if (bp->b_wcred != NOCRED)
1454		crhold(bp->b_wcred);
1455	bp->b_blkno = reqaddr / DEV_BSIZE;
1456	bgetvp(dp, bp);
1457	++dp->v_numoutput;
1458
1459	/* for NFS */
1460	bp->b_dirtyoff = 0;
1461	bp->b_dirtyend = size;
1462
1463	bp->b_bcount = size;
1464	bp->b_bufsize = size;
1465
1466	cnt.v_vnodeout++;
1467	cnt.v_vnodepgsout += count;
1468
1469	/* do the output */
1470	VOP_STRATEGY(bp);
1471
1472	s = splbio();
1473
1474	/* we definitely need to be at splbio here */
1475
1476	while ((bp->b_flags & B_DONE) == 0) {
1477		tsleep((caddr_t) bp, PVM, "vnwrite", 0);
1478	}
1479	splx(s);
1480
1481	if ((bp->b_flags & B_ERROR) != 0)
1482		error = EIO;
1483
1484	pmap_qremove( kva, count);
1485
1486	/*
1487	 * free the buffer header back to the swap buffer pool
1488	 */
1489	relpbuf(bp);
1490	HOLDRELE(vp);
1491
1492	if (!error) {
1493		for (i = 0; i < count; i++) {
1494			pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1495			m[i]->flags |= PG_CLEAN;
1496			m[i]->flags &= ~PG_LAUNDRY;
1497			rtvals[i] = VM_PAGER_OK;
1498		}
1499	} else if (count != 1) {
1500		error = 0;
1501		count = 1;
1502		goto retryoutput;
1503	}
1504	if (error) {
1505		printf("vnode_pager_output: I/O write error\n");
1506	}
1507	return (error ? VM_PAGER_ERROR: VM_PAGER_OK);
1508}
1509