vnode_pager.c revision 143505
1/*-
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1993, 1994 John S. Dyson
6 * Copyright (c) 1995, David Greenman
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)vnode_pager.c	7.5 (Berkeley) 4/20/91
41 */
42
43/*
44 * Page to/from files (vnodes).
45 */
46
47/*
48 * TODO:
49 *	Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
50 *	greatly re-simplify the vnode_pager.
51 */
52
53#include <sys/cdefs.h>
54__FBSDID("$FreeBSD: head/sys/vm/vnode_pager.c 143505 2005-03-13 12:05:05Z jeff $");
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/proc.h>
59#include <sys/vnode.h>
60#include <sys/mount.h>
61#include <sys/bio.h>
62#include <sys/buf.h>
63#include <sys/vmmeter.h>
64#include <sys/limits.h>
65#include <sys/conf.h>
66#include <sys/sf_buf.h>
67
68#include <vm/vm.h>
69#include <vm/vm_object.h>
70#include <vm/vm_page.h>
71#include <vm/vm_pager.h>
72#include <vm/vm_map.h>
73#include <vm/vnode_pager.h>
74#include <vm/vm_extern.h>
75
76static void vnode_pager_init(void);
77static vm_offset_t vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
78					 int *run);
79static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
80static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
81static void vnode_pager_dealloc(vm_object_t);
82static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
83static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
84static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
85static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
86
87struct pagerops vnodepagerops = {
88	.pgo_init =	vnode_pager_init,
89	.pgo_alloc =	vnode_pager_alloc,
90	.pgo_dealloc =	vnode_pager_dealloc,
91	.pgo_getpages =	vnode_pager_getpages,
92	.pgo_putpages =	vnode_pager_putpages,
93	.pgo_haspage =	vnode_pager_haspage,
94};
95
96int vnode_pbuf_freecnt;
97
98static void
99vnode_pager_init(void)
100{
101
102	vnode_pbuf_freecnt = nswbuf / 2 + 1;
103}
104
105/* Create the VM system backing object for this vnode */
106int
107vnode_create_vobject(struct vnode *vp, size_t isize, struct thread *td)
108{
109	vm_object_t object;
110	vm_ooffset_t size = isize;
111	struct vattr va;
112
113	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
114		return (0);
115
116	while ((object = vp->v_object) != NULL) {
117		VM_OBJECT_LOCK(object);
118		if (!(object->flags & OBJ_DEAD)) {
119			VM_OBJECT_UNLOCK(object);
120			return (0);
121		}
122		VOP_UNLOCK(vp, 0, td);
123		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
124		msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0);
125		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
126	}
127
128	if (size == 0) {
129		if (vn_isdisk(vp, NULL)) {
130			size = IDX_TO_OFF(INT_MAX);
131		} else {
132			if (VOP_GETATTR(vp, &va, td->td_ucred, td) != 0)
133				return (0);
134			size = va.va_size;
135		}
136	}
137
138	object = vnode_pager_alloc(vp, size, 0, 0);
139	/*
140	 * Dereference the reference we just created.  This assumes
141	 * that the object is associated with the vp.
142	 */
143	VM_OBJECT_LOCK(object);
144	object->ref_count--;
145	VM_OBJECT_UNLOCK(object);
146	vrele(vp);
147
148	KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
149
150	return (0);
151}
152
153void
154vnode_destroy_vobject(struct vnode *vp)
155{
156	struct vm_object *obj;
157
158	obj = vp->v_object;
159	if (obj == NULL)
160		return;
161	ASSERT_VOP_LOCKED(vp, "vnode_destroy_vobject");
162	vp->v_object = NULL;
163	VM_OBJECT_LOCK(obj);
164	if (obj->ref_count == 0) {
165		/*
166		 * vclean() may be called twice. The first time
167		 * removes the primary reference to the object,
168		 * the second time goes one further and is a
169		 * special-case to terminate the object.
170		 *
171		 * don't double-terminate the object
172		 */
173		if ((obj->flags & OBJ_DEAD) == 0)
174			vm_object_terminate(obj);
175		else
176			VM_OBJECT_UNLOCK(obj);
177	} else {
178		/*
179		 * Woe to the process that tries to page now :-).
180		 */
181		vm_pager_deallocate(obj);
182		VM_OBJECT_UNLOCK(obj);
183	}
184}
185
186
187/*
188 * Allocate (or lookup) pager for a vnode.
189 * Handle is a vnode pointer.
190 *
191 * MPSAFE
192 */
193vm_object_t
194vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
195		  vm_ooffset_t offset)
196{
197	vm_object_t object;
198	struct vnode *vp;
199
200	/*
201	 * Pageout to vnode, no can do yet.
202	 */
203	if (handle == NULL)
204		return (NULL);
205
206	vp = (struct vnode *) handle;
207
208	ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc");
209
210	/*
211	 * Prevent race condition when allocating the object. This
212	 * can happen with NFS vnodes since the nfsnode isn't locked.
213	 */
214	VI_LOCK(vp);
215	while (vp->v_iflag & VI_OLOCK) {
216		vp->v_iflag |= VI_OWANT;
217		msleep(vp, VI_MTX(vp), PVM, "vnpobj", 0);
218	}
219	vp->v_iflag |= VI_OLOCK;
220	VI_UNLOCK(vp);
221
222	/*
223	 * If the object is being terminated, wait for it to
224	 * go away.
225	 */
226	while ((object = vp->v_object) != NULL) {
227		VM_OBJECT_LOCK(object);
228		if ((object->flags & OBJ_DEAD) == 0)
229			break;
230		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
231		msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
232	}
233
234	if (vp->v_usecount == 0)
235		panic("vnode_pager_alloc: no vnode reference");
236
237	if (object == NULL) {
238		/*
239		 * And an object of the appropriate size
240		 */
241		object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
242
243		object->un_pager.vnp.vnp_size = size;
244
245		object->handle = handle;
246		vp->v_object = object;
247	} else {
248		object->ref_count++;
249		VM_OBJECT_UNLOCK(object);
250	}
251	VI_LOCK(vp);
252	vp->v_usecount++;
253	vp->v_iflag &= ~VI_OLOCK;
254	if (vp->v_iflag & VI_OWANT) {
255		vp->v_iflag &= ~VI_OWANT;
256		wakeup(vp);
257	}
258	VI_UNLOCK(vp);
259	return (object);
260}
261
262/*
263 *	The object must be locked.
264 */
265static void
266vnode_pager_dealloc(object)
267	vm_object_t object;
268{
269	struct vnode *vp = object->handle;
270
271	if (vp == NULL)
272		panic("vnode_pager_dealloc: pager already dealloced");
273
274	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
275	vm_object_pip_wait(object, "vnpdea");
276
277	object->handle = NULL;
278	object->type = OBJT_DEAD;
279	if (object->flags & OBJ_DISCONNECTWNT) {
280		vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
281		wakeup(object);
282	}
283	ASSERT_VOP_LOCKED(vp, "vnode_pager_dealloc");
284	vp->v_object = NULL;
285	vp->v_vflag &= ~VV_TEXT;
286}
287
288static boolean_t
289vnode_pager_haspage(object, pindex, before, after)
290	vm_object_t object;
291	vm_pindex_t pindex;
292	int *before;
293	int *after;
294{
295	struct vnode *vp = object->handle;
296	daddr_t bn;
297	int err;
298	daddr_t reqblock;
299	int poff;
300	int bsize;
301	int pagesperblock, blocksperpage;
302	int vfslocked;
303
304	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
305	/*
306	 * If no vp or vp is doomed or marked transparent to VM, we do not
307	 * have the page.
308	 */
309	if (vp == NULL)
310		return FALSE;
311
312	VI_LOCK(vp);
313	if (vp->v_iflag & VI_DOOMED) {
314		VI_UNLOCK(vp);
315		return FALSE;
316	}
317	VI_UNLOCK(vp);
318	/*
319	 * If filesystem no longer mounted or offset beyond end of file we do
320	 * not have the page.
321	 */
322	if ((vp->v_mount == NULL) ||
323	    (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size))
324		return FALSE;
325
326	bsize = vp->v_mount->mnt_stat.f_iosize;
327	pagesperblock = bsize / PAGE_SIZE;
328	blocksperpage = 0;
329	if (pagesperblock > 0) {
330		reqblock = pindex / pagesperblock;
331	} else {
332		blocksperpage = (PAGE_SIZE / bsize);
333		reqblock = pindex * blocksperpage;
334	}
335	VM_OBJECT_UNLOCK(object);
336	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
337	err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
338	VFS_UNLOCK_GIANT(vfslocked);
339	VM_OBJECT_LOCK(object);
340	if (err)
341		return TRUE;
342	if (bn == -1)
343		return FALSE;
344	if (pagesperblock > 0) {
345		poff = pindex - (reqblock * pagesperblock);
346		if (before) {
347			*before *= pagesperblock;
348			*before += poff;
349		}
350		if (after) {
351			int numafter;
352			*after *= pagesperblock;
353			numafter = pagesperblock - (poff + 1);
354			if (IDX_TO_OFF(pindex + numafter) >
355			    object->un_pager.vnp.vnp_size) {
356				numafter =
357		    		    OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
358				    pindex;
359			}
360			*after += numafter;
361		}
362	} else {
363		if (before) {
364			*before /= blocksperpage;
365		}
366
367		if (after) {
368			*after /= blocksperpage;
369		}
370	}
371	return TRUE;
372}
373
374/*
375 * Lets the VM system know about a change in size for a file.
376 * We adjust our own internal size and flush any cached pages in
377 * the associated object that are affected by the size change.
378 *
379 * Note: this routine may be invoked as a result of a pager put
380 * operation (possibly at object termination time), so we must be careful.
381 */
382void
383vnode_pager_setsize(vp, nsize)
384	struct vnode *vp;
385	vm_ooffset_t nsize;
386{
387	vm_object_t object;
388	vm_page_t m;
389	vm_pindex_t nobjsize;
390
391	if ((object = vp->v_object) == NULL)
392		return;
393	VM_OBJECT_LOCK(object);
394	if (nsize == object->un_pager.vnp.vnp_size) {
395		/*
396		 * Hasn't changed size
397		 */
398		VM_OBJECT_UNLOCK(object);
399		return;
400	}
401	nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
402	if (nsize < object->un_pager.vnp.vnp_size) {
403		/*
404		 * File has shrunk. Toss any cached pages beyond the new EOF.
405		 */
406		if (nobjsize < object->size)
407			vm_object_page_remove(object, nobjsize, object->size,
408			    FALSE);
409		/*
410		 * this gets rid of garbage at the end of a page that is now
411		 * only partially backed by the vnode.
412		 *
413		 * XXX for some reason (I don't know yet), if we take a
414		 * completely invalid page and mark it partially valid
415		 * it can screw up NFS reads, so we don't allow the case.
416		 */
417		if ((nsize & PAGE_MASK) &&
418		    (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
419		    m->valid != 0) {
420			int base = (int)nsize & PAGE_MASK;
421			int size = PAGE_SIZE - base;
422
423			/*
424			 * Clear out partial-page garbage in case
425			 * the page has been mapped.
426			 */
427			pmap_zero_page_area(m, base, size);
428
429			/*
430			 * XXX work around SMP data integrity race
431			 * by unmapping the page from user processes.
432			 * The garbage we just cleared may be mapped
433			 * to a user process running on another cpu
434			 * and this code is not running through normal
435			 * I/O channels which handle SMP issues for
436			 * us, so unmap page to synchronize all cpus.
437			 *
438			 * XXX should vm_pager_unmap_page() have
439			 * dealt with this?
440			 */
441			vm_page_lock_queues();
442			pmap_remove_all(m);
443
444			/*
445			 * Clear out partial-page dirty bits.  This
446			 * has the side effect of setting the valid
447			 * bits, but that is ok.  There are a bunch
448			 * of places in the VM system where we expected
449			 * m->dirty == VM_PAGE_BITS_ALL.  The file EOF
450			 * case is one of them.  If the page is still
451			 * partially dirty, make it fully dirty.
452			 *
453			 * note that we do not clear out the valid
454			 * bits.  This would prevent bogus_page
455			 * replacement from working properly.
456			 */
457			vm_page_set_validclean(m, base, size);
458			if (m->dirty != 0)
459				m->dirty = VM_PAGE_BITS_ALL;
460			vm_page_unlock_queues();
461		}
462	}
463	object->un_pager.vnp.vnp_size = nsize;
464	object->size = nobjsize;
465	VM_OBJECT_UNLOCK(object);
466}
467
468/*
469 * calculate the linear (byte) disk address of specified virtual
470 * file address
471 */
472static vm_offset_t
473vnode_pager_addr(vp, address, run)
474	struct vnode *vp;
475	vm_ooffset_t address;
476	int *run;
477{
478	int rtaddress;
479	int bsize;
480	daddr_t block;
481	int err;
482	daddr_t vblock;
483	int voffset;
484
485	if (address < 0)
486		return -1;
487
488	if (vp->v_mount == NULL)
489		return -1;
490
491	bsize = vp->v_mount->mnt_stat.f_iosize;
492	vblock = address / bsize;
493	voffset = address % bsize;
494
495	err = VOP_BMAP(vp, vblock, NULL, &block, run, NULL);
496
497	if (err || (block == -1))
498		rtaddress = -1;
499	else {
500		rtaddress = block + voffset / DEV_BSIZE;
501		if (run) {
502			*run += 1;
503			*run *= bsize/PAGE_SIZE;
504			*run -= voffset/PAGE_SIZE;
505		}
506	}
507
508	return rtaddress;
509}
510
511/*
512 * small block filesystem vnode pager input
513 */
514static int
515vnode_pager_input_smlfs(object, m)
516	vm_object_t object;
517	vm_page_t m;
518{
519	int i;
520	struct vnode *vp;
521	struct bufobj *bo;
522	struct buf *bp;
523	struct sf_buf *sf;
524	int fileaddr;
525	vm_offset_t bsize;
526	int error = 0;
527
528	vp = object->handle;
529	if (vp->v_mount == NULL)
530		return VM_PAGER_BAD;
531
532	bsize = vp->v_mount->mnt_stat.f_iosize;
533
534	VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
535
536	sf = sf_buf_alloc(m, 0);
537
538	for (i = 0; i < PAGE_SIZE / bsize; i++) {
539		vm_ooffset_t address;
540
541		if (vm_page_bits(i * bsize, bsize) & m->valid)
542			continue;
543
544		address = IDX_TO_OFF(m->pindex) + i * bsize;
545		if (address >= object->un_pager.vnp.vnp_size) {
546			fileaddr = -1;
547		} else {
548			fileaddr = vnode_pager_addr(vp, address, NULL);
549		}
550		if (fileaddr != -1) {
551			bp = getpbuf(&vnode_pbuf_freecnt);
552
553			/* build a minimal buffer header */
554			bp->b_iocmd = BIO_READ;
555			bp->b_iodone = bdone;
556			KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
557			KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
558			bp->b_rcred = crhold(curthread->td_ucred);
559			bp->b_wcred = crhold(curthread->td_ucred);
560			bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
561			bp->b_blkno = fileaddr;
562			pbgetbo(bo, bp);
563			bp->b_bcount = bsize;
564			bp->b_bufsize = bsize;
565			bp->b_runningbufspace = bp->b_bufsize;
566			runningbufspace += bp->b_runningbufspace;
567
568			/* do the input */
569			bp->b_iooffset = dbtob(bp->b_blkno);
570			bstrategy(bp);
571
572			/* we definitely need to be at splvm here */
573
574			bwait(bp, PVM, "vnsrd");
575
576			if ((bp->b_ioflags & BIO_ERROR) != 0)
577				error = EIO;
578
579			/*
580			 * free the buffer header back to the swap buffer pool
581			 */
582			pbrelbo(bp);
583			relpbuf(bp, &vnode_pbuf_freecnt);
584			if (error)
585				break;
586
587			VM_OBJECT_LOCK(object);
588			vm_page_lock_queues();
589			vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
590			vm_page_unlock_queues();
591			VM_OBJECT_UNLOCK(object);
592		} else {
593			VM_OBJECT_LOCK(object);
594			vm_page_lock_queues();
595			vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
596			vm_page_unlock_queues();
597			VM_OBJECT_UNLOCK(object);
598			bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
599		}
600	}
601	sf_buf_free(sf);
602	vm_page_lock_queues();
603	pmap_clear_modify(m);
604	vm_page_unlock_queues();
605	if (error) {
606		return VM_PAGER_ERROR;
607	}
608	return VM_PAGER_OK;
609
610}
611
612
613/*
614 * old style vnode pager input routine
615 */
616static int
617vnode_pager_input_old(object, m)
618	vm_object_t object;
619	vm_page_t m;
620{
621	struct uio auio;
622	struct iovec aiov;
623	int error;
624	int size;
625	struct sf_buf *sf;
626	struct vnode *vp;
627
628	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
629	error = 0;
630
631	/*
632	 * Return failure if beyond current EOF
633	 */
634	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
635		return VM_PAGER_BAD;
636	} else {
637		size = PAGE_SIZE;
638		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
639			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
640		vp = object->handle;
641		VM_OBJECT_UNLOCK(object);
642
643		/*
644		 * Allocate a kernel virtual address and initialize so that
645		 * we can use VOP_READ/WRITE routines.
646		 */
647		sf = sf_buf_alloc(m, 0);
648
649		aiov.iov_base = (caddr_t)sf_buf_kva(sf);
650		aiov.iov_len = size;
651		auio.uio_iov = &aiov;
652		auio.uio_iovcnt = 1;
653		auio.uio_offset = IDX_TO_OFF(m->pindex);
654		auio.uio_segflg = UIO_SYSSPACE;
655		auio.uio_rw = UIO_READ;
656		auio.uio_resid = size;
657		auio.uio_td = curthread;
658
659		error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
660		if (!error) {
661			int count = size - auio.uio_resid;
662
663			if (count == 0)
664				error = EINVAL;
665			else if (count != PAGE_SIZE)
666				bzero((caddr_t)sf_buf_kva(sf) + count,
667				    PAGE_SIZE - count);
668		}
669		sf_buf_free(sf);
670
671		VM_OBJECT_LOCK(object);
672	}
673	vm_page_lock_queues();
674	pmap_clear_modify(m);
675	vm_page_undirty(m);
676	vm_page_unlock_queues();
677	if (!error)
678		m->valid = VM_PAGE_BITS_ALL;
679	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
680}
681
682/*
683 * generic vnode pager input routine
684 */
685
686/*
687 * Local media VFS's that do not implement their own VOP_GETPAGES
688 * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
689 * to implement the previous behaviour.
690 *
691 * All other FS's should use the bypass to get to the local media
692 * backing vp's VOP_GETPAGES.
693 */
694static int
695vnode_pager_getpages(object, m, count, reqpage)
696	vm_object_t object;
697	vm_page_t *m;
698	int count;
699	int reqpage;
700{
701	int rtval;
702	struct vnode *vp;
703	int bytes = count * PAGE_SIZE;
704	int vfslocked;
705
706	vp = object->handle;
707	VM_OBJECT_UNLOCK(object);
708	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
709	rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
710	KASSERT(rtval != EOPNOTSUPP,
711	    ("vnode_pager: FS getpages not implemented\n"));
712	VFS_UNLOCK_GIANT(vfslocked);
713	VM_OBJECT_LOCK(object);
714	return rtval;
715}
716
717/*
718 * This is now called from local media FS's to operate against their
719 * own vnodes if they fail to implement VOP_GETPAGES.
720 */
721int
722vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
723	struct vnode *vp;
724	vm_page_t *m;
725	int bytecount;
726	int reqpage;
727{
728	vm_object_t object;
729	vm_offset_t kva;
730	off_t foff, tfoff, nextoff;
731	int i, j, size, bsize, first, firstaddr;
732	struct bufobj *bo;
733	int runpg;
734	int runend;
735	struct buf *bp;
736	int count;
737	int error = 0;
738
739	object = vp->v_object;
740	count = bytecount / PAGE_SIZE;
741
742	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
743	    ("vnode_pager_generic_getpages does not support devices"));
744	if (vp->v_mount == NULL)
745		return VM_PAGER_BAD;
746
747	bsize = vp->v_mount->mnt_stat.f_iosize;
748
749	/* get the UNDERLYING device for the file with VOP_BMAP() */
750
751	/*
752	 * originally, we did not check for an error return value -- assuming
753	 * an fs always has a bmap entry point -- that assumption is wrong!!!
754	 */
755	foff = IDX_TO_OFF(m[reqpage]->pindex);
756
757	/*
758	 * if we can't bmap, use old VOP code
759	 */
760	if (VOP_BMAP(vp, 0, &bo, 0, NULL, NULL)) {
761		VM_OBJECT_LOCK(object);
762		vm_page_lock_queues();
763		for (i = 0; i < count; i++)
764			if (i != reqpage)
765				vm_page_free(m[i]);
766		vm_page_unlock_queues();
767		cnt.v_vnodein++;
768		cnt.v_vnodepgsin++;
769		error = vnode_pager_input_old(object, m[reqpage]);
770		VM_OBJECT_UNLOCK(object);
771		return (error);
772
773		/*
774		 * if the blocksize is smaller than a page size, then use
775		 * special small filesystem code.  NFS sometimes has a small
776		 * blocksize, but it can handle large reads itself.
777		 */
778	} else if ((PAGE_SIZE / bsize) > 1 &&
779	    (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
780		VM_OBJECT_LOCK(object);
781		vm_page_lock_queues();
782		for (i = 0; i < count; i++)
783			if (i != reqpage)
784				vm_page_free(m[i]);
785		vm_page_unlock_queues();
786		VM_OBJECT_UNLOCK(object);
787		cnt.v_vnodein++;
788		cnt.v_vnodepgsin++;
789		return vnode_pager_input_smlfs(object, m[reqpage]);
790	}
791
792	/*
793	 * If we have a completely valid page available to us, we can
794	 * clean up and return.  Otherwise we have to re-read the
795	 * media.
796	 */
797	VM_OBJECT_LOCK(object);
798	if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
799		vm_page_lock_queues();
800		for (i = 0; i < count; i++)
801			if (i != reqpage)
802				vm_page_free(m[i]);
803		vm_page_unlock_queues();
804		VM_OBJECT_UNLOCK(object);
805		return VM_PAGER_OK;
806	}
807	m[reqpage]->valid = 0;
808	VM_OBJECT_UNLOCK(object);
809
810	/*
811	 * here on direct device I/O
812	 */
813	firstaddr = -1;
814
815	/*
816	 * calculate the run that includes the required page
817	 */
818	for (first = 0, i = 0; i < count; i = runend) {
819		firstaddr = vnode_pager_addr(vp,
820			IDX_TO_OFF(m[i]->pindex), &runpg);
821		if (firstaddr == -1) {
822			VM_OBJECT_LOCK(object);
823			if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
824				panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
825				    firstaddr, (uintmax_t)(foff >> 32),
826				    (uintmax_t)foff,
827				    (uintmax_t)
828				    (object->un_pager.vnp.vnp_size >> 32),
829				    (uintmax_t)object->un_pager.vnp.vnp_size);
830			}
831			vm_page_lock_queues();
832			vm_page_free(m[i]);
833			vm_page_unlock_queues();
834			VM_OBJECT_UNLOCK(object);
835			runend = i + 1;
836			first = runend;
837			continue;
838		}
839		runend = i + runpg;
840		if (runend <= reqpage) {
841			VM_OBJECT_LOCK(object);
842			vm_page_lock_queues();
843			for (j = i; j < runend; j++)
844				vm_page_free(m[j]);
845			vm_page_unlock_queues();
846			VM_OBJECT_UNLOCK(object);
847		} else {
848			if (runpg < (count - first)) {
849				VM_OBJECT_LOCK(object);
850				vm_page_lock_queues();
851				for (i = first + runpg; i < count; i++)
852					vm_page_free(m[i]);
853				vm_page_unlock_queues();
854				VM_OBJECT_UNLOCK(object);
855				count = first + runpg;
856			}
857			break;
858		}
859		first = runend;
860	}
861
862	/*
863	 * the first and last page have been calculated now, move input pages
864	 * to be zero based...
865	 */
866	if (first != 0) {
867		for (i = first; i < count; i++) {
868			m[i - first] = m[i];
869		}
870		count -= first;
871		reqpage -= first;
872	}
873
874	/*
875	 * calculate the file virtual address for the transfer
876	 */
877	foff = IDX_TO_OFF(m[0]->pindex);
878
879	/*
880	 * calculate the size of the transfer
881	 */
882	size = count * PAGE_SIZE;
883	KASSERT(count > 0, ("zero count"));
884	if ((foff + size) > object->un_pager.vnp.vnp_size)
885		size = object->un_pager.vnp.vnp_size - foff;
886	KASSERT(size > 0, ("zero size"));
887
888	/*
889	 * round up physical size for real devices.
890	 */
891	if (1) {
892		int secmask = bo->bo_bsize - 1;
893		KASSERT(secmask < PAGE_SIZE && secmask > 0,
894		    ("vnode_pager_generic_getpages: sector size %d too large",
895		    secmask + 1));
896		size = (size + secmask) & ~secmask;
897	}
898
899	bp = getpbuf(&vnode_pbuf_freecnt);
900	kva = (vm_offset_t) bp->b_data;
901
902	/*
903	 * and map the pages to be read into the kva
904	 */
905	pmap_qenter(kva, m, count);
906
907	/* build a minimal buffer header */
908	bp->b_iocmd = BIO_READ;
909	bp->b_iodone = bdone;
910	KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
911	KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
912	bp->b_rcred = crhold(curthread->td_ucred);
913	bp->b_wcred = crhold(curthread->td_ucred);
914	bp->b_blkno = firstaddr;
915	pbgetbo(bo, bp);
916	bp->b_bcount = size;
917	bp->b_bufsize = size;
918	bp->b_runningbufspace = bp->b_bufsize;
919	runningbufspace += bp->b_runningbufspace;
920
921	cnt.v_vnodein++;
922	cnt.v_vnodepgsin += count;
923
924	/* do the input */
925	bp->b_iooffset = dbtob(bp->b_blkno);
926	bstrategy(bp);
927
928	bwait(bp, PVM, "vnread");
929
930	if ((bp->b_ioflags & BIO_ERROR) != 0)
931		error = EIO;
932
933	if (!error) {
934		if (size != count * PAGE_SIZE)
935			bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
936	}
937	pmap_qremove(kva, count);
938
939	/*
940	 * free the buffer header back to the swap buffer pool
941	 */
942	pbrelbo(bp);
943	relpbuf(bp, &vnode_pbuf_freecnt);
944
945	VM_OBJECT_LOCK(object);
946	vm_page_lock_queues();
947	for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
948		vm_page_t mt;
949
950		nextoff = tfoff + PAGE_SIZE;
951		mt = m[i];
952
953		if (nextoff <= object->un_pager.vnp.vnp_size) {
954			/*
955			 * Read filled up entire page.
956			 */
957			mt->valid = VM_PAGE_BITS_ALL;
958			vm_page_undirty(mt);	/* should be an assert? XXX */
959			pmap_clear_modify(mt);
960		} else {
961			/*
962			 * Read did not fill up entire page.  Since this
963			 * is getpages, the page may be mapped, so we have
964			 * to zero the invalid portions of the page even
965			 * though we aren't setting them valid.
966			 *
967			 * Currently we do not set the entire page valid,
968			 * we just try to clear the piece that we couldn't
969			 * read.
970			 */
971			vm_page_set_validclean(mt, 0,
972			    object->un_pager.vnp.vnp_size - tfoff);
973			/* handled by vm_fault now */
974			/* vm_page_zero_invalid(mt, FALSE); */
975		}
976
977		if (i != reqpage) {
978
979			/*
980			 * whether or not to leave the page activated is up in
981			 * the air, but we should put the page on a page queue
982			 * somewhere. (it already is in the object). Result:
983			 * It appears that empirical results show that
984			 * deactivating pages is best.
985			 */
986
987			/*
988			 * just in case someone was asking for this page we
989			 * now tell them that it is ok to use
990			 */
991			if (!error) {
992				if (mt->flags & PG_WANTED)
993					vm_page_activate(mt);
994				else
995					vm_page_deactivate(mt);
996				vm_page_wakeup(mt);
997			} else {
998				vm_page_free(mt);
999			}
1000		}
1001	}
1002	vm_page_unlock_queues();
1003	VM_OBJECT_UNLOCK(object);
1004	if (error) {
1005		printf("vnode_pager_getpages: I/O read error\n");
1006	}
1007	return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
1008}
1009
1010/*
1011 * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
1012 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
1013 * vnode_pager_generic_putpages() to implement the previous behaviour.
1014 *
1015 * All other FS's should use the bypass to get to the local media
1016 * backing vp's VOP_PUTPAGES.
1017 */
1018static void
1019vnode_pager_putpages(object, m, count, sync, rtvals)
1020	vm_object_t object;
1021	vm_page_t *m;
1022	int count;
1023	boolean_t sync;
1024	int *rtvals;
1025{
1026	int rtval;
1027	struct vnode *vp;
1028	struct mount *mp;
1029	int bytes = count * PAGE_SIZE;
1030
1031	/*
1032	 * Force synchronous operation if we are extremely low on memory
1033	 * to prevent a low-memory deadlock.  VOP operations often need to
1034	 * allocate more memory to initiate the I/O ( i.e. do a BMAP
1035	 * operation ).  The swapper handles the case by limiting the amount
1036	 * of asynchronous I/O, but that sort of solution doesn't scale well
1037	 * for the vnode pager without a lot of work.
1038	 *
1039	 * Also, the backing vnode's iodone routine may not wake the pageout
1040	 * daemon up.  This should be probably be addressed XXX.
1041	 */
1042
1043	if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
1044		sync |= OBJPC_SYNC;
1045
1046	/*
1047	 * Call device-specific putpages function
1048	 */
1049	vp = object->handle;
1050	VM_OBJECT_UNLOCK(object);
1051	if (vp->v_type != VREG)
1052		mp = NULL;
1053	(void)vn_start_write(vp, &mp, V_WAIT);
1054	rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
1055	KASSERT(rtval != EOPNOTSUPP,
1056	    ("vnode_pager: stale FS putpages\n"));
1057	vn_finished_write(mp);
1058	VM_OBJECT_LOCK(object);
1059}
1060
1061
1062/*
1063 * This is now called from local media FS's to operate against their
1064 * own vnodes if they fail to implement VOP_PUTPAGES.
1065 *
1066 * This is typically called indirectly via the pageout daemon and
1067 * clustering has already typically occured, so in general we ask the
1068 * underlying filesystem to write the data out asynchronously rather
1069 * then delayed.
1070 */
1071int
1072vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
1073	struct vnode *vp;
1074	vm_page_t *m;
1075	int bytecount;
1076	int flags;
1077	int *rtvals;
1078{
1079	int i;
1080	vm_object_t object;
1081	int count;
1082
1083	int maxsize, ncount;
1084	vm_ooffset_t poffset;
1085	struct uio auio;
1086	struct iovec aiov;
1087	int error;
1088	int ioflags;
1089
1090	object = vp->v_object;
1091	count = bytecount / PAGE_SIZE;
1092
1093	for (i = 0; i < count; i++)
1094		rtvals[i] = VM_PAGER_AGAIN;
1095
1096	if ((int64_t)m[0]->pindex < 0) {
1097		printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
1098			(long)m[0]->pindex, (u_long)m[0]->dirty);
1099		rtvals[0] = VM_PAGER_BAD;
1100		return VM_PAGER_BAD;
1101	}
1102
1103	maxsize = count * PAGE_SIZE;
1104	ncount = count;
1105
1106	poffset = IDX_TO_OFF(m[0]->pindex);
1107
1108	/*
1109	 * If the page-aligned write is larger then the actual file we
1110	 * have to invalidate pages occuring beyond the file EOF.  However,
1111	 * there is an edge case where a file may not be page-aligned where
1112	 * the last page is partially invalid.  In this case the filesystem
1113	 * may not properly clear the dirty bits for the entire page (which
1114	 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
1115	 * With the page locked we are free to fix-up the dirty bits here.
1116	 *
1117	 * We do not under any circumstances truncate the valid bits, as
1118	 * this will screw up bogus page replacement.
1119	 */
1120	if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
1121		if (object->un_pager.vnp.vnp_size > poffset) {
1122			int pgoff;
1123
1124			maxsize = object->un_pager.vnp.vnp_size - poffset;
1125			ncount = btoc(maxsize);
1126			if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
1127				vm_page_lock_queues();
1128				vm_page_clear_dirty(m[ncount - 1], pgoff,
1129					PAGE_SIZE - pgoff);
1130				vm_page_unlock_queues();
1131			}
1132		} else {
1133			maxsize = 0;
1134			ncount = 0;
1135		}
1136		if (ncount < count) {
1137			for (i = ncount; i < count; i++) {
1138				rtvals[i] = VM_PAGER_BAD;
1139			}
1140		}
1141	}
1142
1143	/*
1144	 * pageouts are already clustered, use IO_ASYNC t o force a bawrite()
1145	 * rather then a bdwrite() to prevent paging I/O from saturating
1146	 * the buffer cache.  Dummy-up the sequential heuristic to cause
1147	 * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
1148	 * the system decides how to cluster.
1149	 */
1150	ioflags = IO_VMIO;
1151	if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
1152		ioflags |= IO_SYNC;
1153	else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
1154		ioflags |= IO_ASYNC;
1155	ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
1156	ioflags |= IO_SEQMAX << IO_SEQSHIFT;
1157
1158	aiov.iov_base = (caddr_t) 0;
1159	aiov.iov_len = maxsize;
1160	auio.uio_iov = &aiov;
1161	auio.uio_iovcnt = 1;
1162	auio.uio_offset = poffset;
1163	auio.uio_segflg = UIO_NOCOPY;
1164	auio.uio_rw = UIO_WRITE;
1165	auio.uio_resid = maxsize;
1166	auio.uio_td = (struct thread *) 0;
1167	error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
1168	cnt.v_vnodeout++;
1169	cnt.v_vnodepgsout += ncount;
1170
1171	if (error) {
1172		printf("vnode_pager_putpages: I/O error %d\n", error);
1173	}
1174	if (auio.uio_resid) {
1175		printf("vnode_pager_putpages: residual I/O %d at %lu\n",
1176		    auio.uio_resid, (u_long)m[0]->pindex);
1177	}
1178	for (i = 0; i < ncount; i++) {
1179		rtvals[i] = VM_PAGER_OK;
1180	}
1181	return rtvals[0];
1182}
1183
1184struct vnode *
1185vnode_pager_lock(vm_object_t first_object)
1186{
1187	struct vnode *vp;
1188	vm_object_t backing_object, object;
1189
1190	VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
1191	for (object = first_object; object != NULL; object = backing_object) {
1192		if (object->type != OBJT_VNODE) {
1193			if ((backing_object = object->backing_object) != NULL)
1194				VM_OBJECT_LOCK(backing_object);
1195			if (object != first_object)
1196				VM_OBJECT_UNLOCK(object);
1197			continue;
1198		}
1199	retry:
1200		if (object->flags & OBJ_DEAD) {
1201			if (object != first_object)
1202				VM_OBJECT_UNLOCK(object);
1203			return NULL;
1204		}
1205		vp = object->handle;
1206		VI_LOCK(vp);
1207		VM_OBJECT_UNLOCK(object);
1208		if (first_object != object)
1209			VM_OBJECT_UNLOCK(first_object);
1210		if (vget(vp, LK_CANRECURSE | LK_INTERLOCK | LK_NOPAUSE |
1211		    LK_RETRY | LK_SHARED, curthread)) {
1212			VM_OBJECT_LOCK(first_object);
1213			if (object != first_object)
1214				VM_OBJECT_LOCK(object);
1215			if (object->type != OBJT_VNODE) {
1216				if (object != first_object)
1217					VM_OBJECT_UNLOCK(object);
1218				return NULL;
1219			}
1220			printf("vnode_pager_lock: retrying\n");
1221			goto retry;
1222		}
1223		VM_OBJECT_LOCK(first_object);
1224		return (vp);
1225	}
1226	return NULL;
1227}
1228