Deleted Added
full compact
1/*
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1993, 1994 John S. Dyson
6 * Copyright (c) 1995, David Greenman
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
41 * $Id: vnode_pager.c,v 1.43 1995/07/09 06:58:03 davidg Exp $
41 * $Id: vnode_pager.c,v 1.44 1995/07/13 08:48:47 davidg Exp $
42 */
43
44/*
45 * Page to/from files (vnodes).
46 */
47
48/*
49 * TODO:
50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
51 * greatly re-simplify the vnode_pager.
52 */
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/kernel.h>
57#include <sys/proc.h>
58#include <sys/malloc.h>
59#include <sys/vnode.h>
60#include <sys/uio.h>
61#include <sys/mount.h>
62#include <sys/buf.h>
63
64#include <vm/vm.h>
65#include <vm/vm_page.h>
66#include <vm/vm_pager.h>
67#include <vm/vnode_pager.h>
68
69struct pagerops vnodepagerops = {
70 NULL,
71 vnode_pager_alloc,
72 vnode_pager_dealloc,
73 vnode_pager_getpages,
74 vnode_pager_putpages,
75 vnode_pager_haspage,
76 NULL
77};
78
79/*
80 * Allocate (or lookup) pager for a vnode.
81 * Handle is a vnode pointer.
82 */
83vm_object_t
84vnode_pager_alloc(handle, size, prot, offset)
85 void *handle;
86 vm_size_t size;
87 vm_prot_t prot;
88 vm_offset_t offset;
89{
90 vm_object_t object;
91 struct vnode *vp;
92
93 /*
94 * Pageout to vnode, no can do yet.
95 */
96 if (handle == NULL)
97 return (NULL);
98
99 vp = (struct vnode *) handle;
100
101 /*
102 * Prevent race condition when allocating the object. This
103 * can happen with NFS vnodes since the nfsnode isn't locked.
104 */
105 while (vp->v_flag & VOLOCK) {
106 vp->v_flag |= VOWANT;
107 tsleep(vp, PVM, "vnpobj", 0);
108 }
109 vp->v_flag |= VOLOCK;
110
111 /*
112 * If the object is being terminated, wait for it to
113 * go away.
114 */
115 while (((object = vp->v_object) != NULL) && (object->flags & OBJ_DEAD)) {
116 tsleep(object, PVM, "vadead", 0);
117 }
118
119 if (object == NULL) {
120 /*
121 * And an object of the appropriate size
122 */
123 object = vm_object_allocate(OBJT_VNODE, round_page(size));
124 object->flags = OBJ_CANPERSIST;
125
126 /*
127 * Hold a reference to the vnode and initialize object data.
128 */
129 VREF(vp);
130 object->un_pager.vnp.vnp_size = size;
131
132 object->handle = handle;
133 vp->v_object = object;
134 } else {
135 /*
136 * vm_object_reference() will remove the object from the cache if
137 * found and gain a reference to the object.
138 */
139 vm_object_reference(object);
140 }
141
142 if (vp->v_type == VREG)
143 vp->v_flag |= VVMIO;
144
145 vp->v_flag &= ~VOLOCK;
146 if (vp->v_flag & VOWANT) {
147 vp->v_flag &= ~VOWANT;
148 wakeup(vp);
149 }
150 return (object);
151}
152
153void
154vnode_pager_dealloc(object)
155 vm_object_t object;
156{
157 register struct vnode *vp = object->handle;
158
159 if (vp == NULL)
160 panic("vnode_pager_dealloc: pager already dealloced");
161
162 if (object->paging_in_progress) {
163 int s = splbio();
164 while (object->paging_in_progress) {
165 object->flags |= OBJ_PIPWNT;
166 tsleep(object, PVM, "vnpdea", 0);
167 }
168 splx(s);
169 }
170
171 object->handle = NULL;
172
173 vp->v_object = NULL;
174 vp->v_flag &= ~(VTEXT | VVMIO);
175 vp->v_flag |= VAGE;
176 vrele(vp);
177}
178
179boolean_t
180vnode_pager_haspage(object, offset, before, after)
181 vm_object_t object;
182 vm_offset_t offset;
183 int *before;
184 int *after;
185{
186 struct vnode *vp = object->handle;
187 daddr_t bn;
188 int err, run;
189 daddr_t startblock, reqblock;
190
191 /*
192 * If filesystem no longer mounted or offset beyond end of file we do
193 * not have the page.
194 */
195 if ((vp->v_mount == NULL) || (offset >= object->un_pager.vnp.vnp_size))
196 return FALSE;
197
198 startblock = reqblock = offset / vp->v_mount->mnt_stat.f_iosize;
199 if (startblock > PFCLUSTER_BEHIND)
200 startblock -= PFCLUSTER_BEHIND;
201 else
202 startblock = 0;;
203
204 if (before != NULL) {
205 /*
206 * Loop looking for a contiguous chunk that includes the
207 * requested page.
208 */
209 while (TRUE) {
210 err = VOP_BMAP(vp, startblock, (struct vnode **) 0, &bn, &run);
210 err = VOP_BMAP(vp, startblock, (struct vnode **) 0, &bn, &run, NULL);
211 if (err || bn == -1) {
212 if (startblock < reqblock) {
213 startblock++;
214 continue;
215 }
216 *before = 0;
217 if (after != NULL)
218 *after = 0;
219 return err ? TRUE : FALSE;
220 }
221 if ((startblock + run) < reqblock) {
222 startblock += run + 1;
223 continue;
224 }
225 *before = reqblock - startblock;
226 if (after != NULL)
227 *after = run;
228 return TRUE;
229 }
230 }
231
232 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, after);
232 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, after, NULL);
233 if (err)
234 return TRUE;
235 return ((long) bn < 0 ? FALSE : TRUE);
236}
237
238/*
239 * Lets the VM system know about a change in size for a file.
240 * We adjust our own internal size and flush any cached pages in
241 * the associated object that are affected by the size change.
242 *
243 * Note: this routine may be invoked as a result of a pager put
244 * operation (possibly at object termination time), so we must be careful.
245 */
246void
247vnode_pager_setsize(vp, nsize)
248 struct vnode *vp;
249 u_long nsize;
250{
251 vm_object_t object = vp->v_object;
252
253 if (object == NULL)
254 return;
255
256 /*
257 * Hasn't changed size
258 */
259 if (nsize == object->un_pager.vnp.vnp_size)
260 return;
261
262 /*
263 * File has shrunk. Toss any cached pages beyond the new EOF.
264 */
265 if (nsize < object->un_pager.vnp.vnp_size) {
266 if (round_page((vm_offset_t) nsize) < object->un_pager.vnp.vnp_size) {
267 vm_object_page_remove(object,
268 round_page((vm_offset_t) nsize), object->un_pager.vnp.vnp_size, FALSE);
269 }
270 /*
271 * this gets rid of garbage at the end of a page that is now
272 * only partially backed by the vnode...
273 */
274 if (nsize & PAGE_MASK) {
275 vm_offset_t kva;
276 vm_page_t m;
277
278 m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize));
279 if (m) {
280 kva = vm_pager_map_page(m);
281 bzero((caddr_t) kva + (nsize & PAGE_MASK),
282 round_page(nsize) - nsize);
283 vm_pager_unmap_page(kva);
284 }
285 }
286 }
287 object->un_pager.vnp.vnp_size = (vm_offset_t) nsize;
288 object->size = round_page(nsize);
289}
290
291void
292vnode_pager_umount(mp)
293 register struct mount *mp;
294{
295 struct vnode *vp, *nvp;
296
297loop:
298 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
299 /*
300 * Vnode can be reclaimed by getnewvnode() while we
301 * traverse the list.
302 */
303 if (vp->v_mount != mp)
304 goto loop;
305
306 /*
307 * Save the next pointer now since uncaching may terminate the
308 * object and render vnode invalid
309 */
310 nvp = vp->v_mntvnodes.le_next;
311
312 if (vp->v_object != NULL) {
313 VOP_LOCK(vp);
314 vnode_pager_uncache(vp);
315 VOP_UNLOCK(vp);
316 }
317 }
318}
319
320/*
321 * Remove vnode associated object from the object cache.
322 * This routine must be called with the vnode locked.
323 *
324 * XXX unlock the vnode.
325 * We must do this since uncaching the object may result in its
326 * destruction which may initiate paging activity which may necessitate
327 * re-locking the vnode.
328 */
329void
330vnode_pager_uncache(vp)
331 struct vnode *vp;
332{
333 vm_object_t object;
334
335 /*
336 * Not a mapped vnode
337 */
338 object = vp->v_object;
339 if (object == NULL)
340 return;
341
342 vm_object_reference(object);
343 VOP_UNLOCK(vp);
344 pager_cache(object, FALSE);
345 VOP_LOCK(vp);
346 return;
347}
348
349
350void
351vnode_pager_freepage(m)
352 vm_page_t m;
353{
354 PAGE_WAKEUP(m);
355 vm_page_free(m);
356}
357
358/*
359 * calculate the linear (byte) disk address of specified virtual
360 * file address
361 */
362vm_offset_t
363vnode_pager_addr(vp, address, run)
364 struct vnode *vp;
365 vm_offset_t address;
366 int *run;
367{
368 int rtaddress;
369 int bsize;
370 vm_offset_t block;
371 struct vnode *rtvp;
372 int err;
373 int vblock, voffset;
374
375 if ((int) address < 0)
376 return -1;
377
378 bsize = vp->v_mount->mnt_stat.f_iosize;
379 vblock = address / bsize;
380 voffset = address % bsize;
381
382 err = VOP_BMAP(vp, vblock, &rtvp, &block, run);
382 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL);
383
384 if (err || (block == -1))
385 rtaddress = -1;
386 else {
387 rtaddress = block + voffset / DEV_BSIZE;
388 if( run) {
389 *run += 1;
390 *run *= bsize/PAGE_SIZE;
391 *run -= voffset/PAGE_SIZE;
392 }
393 }
394
395 return rtaddress;
396}
397
398/*
399 * interrupt routine for I/O completion
400 */
401void
402vnode_pager_iodone(bp)
403 struct buf *bp;
404{
405 bp->b_flags |= B_DONE;
406 wakeup(bp);
407}
408
409/*
410 * small block file system vnode pager input
411 */
412int
413vnode_pager_input_smlfs(object, m)
414 vm_object_t object;
415 vm_page_t m;
416{
417 int i;
418 int s;
419 struct vnode *dp, *vp;
420 struct buf *bp;
421 vm_offset_t kva;
422 int fileaddr;
423 vm_offset_t bsize;
424 int error = 0;
425
426 vp = object->handle;
427 bsize = vp->v_mount->mnt_stat.f_iosize;
428
429
430 VOP_BMAP(vp, 0, &dp, 0, 0);
430 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL);
431
432 kva = vm_pager_map_page(m);
433
434 for (i = 0; i < PAGE_SIZE / bsize; i++) {
435
436 if ((vm_page_bits(m->offset + i * bsize, bsize) & m->valid))
437 continue;
438
439 fileaddr = vnode_pager_addr(vp, m->offset + i * bsize, (int *)0);
440 if (fileaddr != -1) {
441 bp = getpbuf();
442
443 /* build a minimal buffer header */
444 bp->b_flags = B_BUSY | B_READ | B_CALL;
445 bp->b_iodone = vnode_pager_iodone;
446 bp->b_proc = curproc;
447 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
448 if (bp->b_rcred != NOCRED)
449 crhold(bp->b_rcred);
450 if (bp->b_wcred != NOCRED)
451 crhold(bp->b_wcred);
452 bp->b_un.b_addr = (caddr_t) kva + i * bsize;
453 bp->b_blkno = fileaddr;
454 pbgetvp(dp, bp);
455 bp->b_bcount = bsize;
456 bp->b_bufsize = bsize;
457
458 /* do the input */
459 VOP_STRATEGY(bp);
460
461 /* we definitely need to be at splbio here */
462
463 s = splbio();
464 while ((bp->b_flags & B_DONE) == 0) {
465 tsleep(bp, PVM, "vnsrd", 0);
466 }
467 splx(s);
468 if ((bp->b_flags & B_ERROR) != 0)
469 error = EIO;
470
471 /*
472 * free the buffer header back to the swap buffer pool
473 */
474 relpbuf(bp);
475 if (error)
476 break;
477
478 vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize);
479 vm_page_set_valid(m, (i * bsize) & (PAGE_SIZE-1), bsize);
480 } else {
481 vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize);
482 bzero((caddr_t) kva + i * bsize, bsize);
483 }
484 }
485 vm_pager_unmap_page(kva);
486 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
487 if (error) {
488 return VM_PAGER_ERROR;
489 }
490 return VM_PAGER_OK;
491
492}
493
494
495/*
496 * old style vnode pager output routine
497 */
498int
499vnode_pager_input_old(object, m)
500 vm_object_t object;
501 vm_page_t m;
502{
503 struct uio auio;
504 struct iovec aiov;
505 int error;
506 int size;
507 vm_offset_t kva;
508
509 error = 0;
510
511 /*
512 * Return failure if beyond current EOF
513 */
514 if (m->offset >= object->un_pager.vnp.vnp_size) {
515 return VM_PAGER_BAD;
516 } else {
517 size = PAGE_SIZE;
518 if (m->offset + size > object->un_pager.vnp.vnp_size)
519 size = object->un_pager.vnp.vnp_size - m->offset;
520
521 /*
522 * Allocate a kernel virtual address and initialize so that
523 * we can use VOP_READ/WRITE routines.
524 */
525 kva = vm_pager_map_page(m);
526
527 aiov.iov_base = (caddr_t) kva;
528 aiov.iov_len = size;
529 auio.uio_iov = &aiov;
530 auio.uio_iovcnt = 1;
531 auio.uio_offset = m->offset;
532 auio.uio_segflg = UIO_SYSSPACE;
533 auio.uio_rw = UIO_READ;
534 auio.uio_resid = size;
535 auio.uio_procp = (struct proc *) 0;
536
537 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred);
538 if (!error) {
539 register int count = size - auio.uio_resid;
540
541 if (count == 0)
542 error = EINVAL;
543 else if (count != PAGE_SIZE)
544 bzero((caddr_t) kva + count, PAGE_SIZE - count);
545 }
546 vm_pager_unmap_page(kva);
547 }
548 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
549 m->dirty = 0;
550 return error ? VM_PAGER_ERROR : VM_PAGER_OK;
551}
552
553/*
554 * generic vnode pager input routine
555 */
556int
557vnode_pager_getpages(object, m, count, reqpage)
558 vm_object_t object;
559 vm_page_t *m;
560 int count;
561 int reqpage;
562{
563 vm_offset_t kva, foff;
564 int i, size, bsize, first, firstaddr;
565 struct vnode *dp, *vp;
566 int runpg;
567 int runend;
568 struct buf *bp;
569 int s;
570 int error = 0;
571
572 vp = object->handle;
573 bsize = vp->v_mount->mnt_stat.f_iosize;
574
575 /* get the UNDERLYING device for the file with VOP_BMAP() */
576
577 /*
578 * originally, we did not check for an error return value -- assuming
579 * an fs always has a bmap entry point -- that assumption is wrong!!!
580 */
581 foff = m[reqpage]->offset;
582
583 /*
584 * if we can't bmap, use old VOP code
585 */
586 if (VOP_BMAP(vp, 0, &dp, 0, 0)) {
586 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) {
587 for (i = 0; i < count; i++) {
588 if (i != reqpage) {
589 vnode_pager_freepage(m[i]);
590 }
591 }
592 cnt.v_vnodein++;
593 cnt.v_vnodepgsin++;
594 return vnode_pager_input_old(object, m[reqpage]);
595
596 /*
597 * if the blocksize is smaller than a page size, then use
598 * special small filesystem code. NFS sometimes has a small
599 * blocksize, but it can handle large reads itself.
600 */
601 } else if ((PAGE_SIZE / bsize) > 1 &&
602 (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
603
604 for (i = 0; i < count; i++) {
605 if (i != reqpage) {
606 vnode_pager_freepage(m[i]);
607 }
608 }
609 cnt.v_vnodein++;
610 cnt.v_vnodepgsin++;
611 return vnode_pager_input_smlfs(object, m[reqpage]);
612 }
613 /*
614 * if ANY DEV_BSIZE blocks are valid on a large filesystem block
615 * then, the entire page is valid --
616 */
617 if (m[reqpage]->valid) {
618 m[reqpage]->valid = VM_PAGE_BITS_ALL;
619 for (i = 0; i < count; i++) {
620 if (i != reqpage)
621 vnode_pager_freepage(m[i]);
622 }
623 return VM_PAGER_OK;
624 }
625
626 /*
627 * here on direct device I/O
628 */
629
630 firstaddr = -1;
631 /*
632 * calculate the run that includes the required page
633 */
634 for(first = 0, i = 0; i < count; i = runend) {
635 firstaddr = vnode_pager_addr(vp, m[i]->offset, &runpg);
636 if (firstaddr == -1) {
637 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
638 panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d",
639 firstaddr, foff, object->un_pager.vnp.vnp_size);
640 }
641 vnode_pager_freepage(m[i]);
642 runend = i + 1;
643 first = runend;
644 continue;
645 }
646 runend = i + runpg;
647 if (runend <= reqpage) {
648 int j;
649 for (j = i; j < runend; j++) {
650 vnode_pager_freepage(m[j]);
651 }
652 } else {
653 if (runpg < (count - first)) {
654 for (i = first + runpg; i < count; i++)
655 vnode_pager_freepage(m[i]);
656 count = first + runpg;
657 }
658 break;
659 }
660 first = runend;
661 }
662
663 /*
664 * the first and last page have been calculated now, move input pages
665 * to be zero based...
666 */
667 if (first != 0) {
668 for (i = first; i < count; i++) {
669 m[i - first] = m[i];
670 }
671 count -= first;
672 reqpage -= first;
673 }
674
675 /*
676 * calculate the file virtual address for the transfer
677 */
678 foff = m[0]->offset;
679
680 /*
681 * calculate the size of the transfer
682 */
683 size = count * PAGE_SIZE;
684 if ((foff + size) > object->un_pager.vnp.vnp_size)
685 size = object->un_pager.vnp.vnp_size - foff;
686
687 /*
688 * round up physical size for real devices
689 */
690 if (dp->v_type == VBLK || dp->v_type == VCHR)
691 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
692
693 bp = getpbuf();
694 kva = (vm_offset_t) bp->b_data;
695
696 /*
697 * and map the pages to be read into the kva
698 */
699 pmap_qenter(kva, m, count);
700
701 /* build a minimal buffer header */
702 bp->b_flags = B_BUSY | B_READ | B_CALL;
703 bp->b_iodone = vnode_pager_iodone;
704 /* B_PHYS is not set, but it is nice to fill this in */
705 bp->b_proc = curproc;
706 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
707 if (bp->b_rcred != NOCRED)
708 crhold(bp->b_rcred);
709 if (bp->b_wcred != NOCRED)
710 crhold(bp->b_wcred);
711 bp->b_blkno = firstaddr;
712 pbgetvp(dp, bp);
713 bp->b_bcount = size;
714 bp->b_bufsize = size;
715
716 cnt.v_vnodein++;
717 cnt.v_vnodepgsin += count;
718
719 /* do the input */
720 VOP_STRATEGY(bp);
721
722 s = splbio();
723 /* we definitely need to be at splbio here */
724
725 while ((bp->b_flags & B_DONE) == 0) {
726 tsleep(bp, PVM, "vnread", 0);
727 }
728 splx(s);
729 if ((bp->b_flags & B_ERROR) != 0)
730 error = EIO;
731
732 if (!error) {
733 if (size != count * PAGE_SIZE)
734 bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
735 }
736 pmap_qremove(kva, count);
737
738 /*
739 * free the buffer header back to the swap buffer pool
740 */
741 relpbuf(bp);
742
743 for (i = 0; i < count; i++) {
744 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
745 m[i]->dirty = 0;
746 m[i]->valid = VM_PAGE_BITS_ALL;
747 if (i != reqpage) {
748
749 /*
750 * whether or not to leave the page activated is up in
751 * the air, but we should put the page on a page queue
752 * somewhere. (it already is in the object). Result:
753 * It appears that emperical results show that
754 * deactivating pages is best.
755 */
756
757 /*
758 * just in case someone was asking for this page we
759 * now tell them that it is ok to use
760 */
761 if (!error) {
762 vm_page_deactivate(m[i]);
763 PAGE_WAKEUP(m[i]);
764 } else {
765 vnode_pager_freepage(m[i]);
766 }
767 }
768 }
769 if (error) {
770 printf("vnode_pager_getpages: I/O read error\n");
771 }
772 return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
773}
774
775/*
776 * generic vnode pager output routine
777 */
778int
779vnode_pager_putpages(object, m, count, sync, rtvals)
780 vm_object_t object;
781 vm_page_t *m;
782 int count;
783 boolean_t sync;
784 int *rtvals;
785{
786 int i;
787
788 struct vnode *vp;
789 int maxsize, ncount;
790 struct uio auio;
791 struct iovec aiov;
792 int error;
793
794 vp = object->handle;;
795 for (i = 0; i < count; i++)
796 rtvals[i] = VM_PAGER_AGAIN;
797
798 if ((int) m[0]->offset < 0) {
799 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->offset, m[0]->dirty);
800 rtvals[0] = VM_PAGER_BAD;
801 return VM_PAGER_BAD;
802 }
803
804 maxsize = count * PAGE_SIZE;
805 ncount = count;
806
807 if (maxsize + m[0]->offset > object->un_pager.vnp.vnp_size) {
808 if (object->un_pager.vnp.vnp_size > m[0]->offset)
809 maxsize = object->un_pager.vnp.vnp_size - m[0]->offset;
810 else
811 maxsize = 0;
812 ncount = (maxsize + PAGE_SIZE - 1) / PAGE_SIZE;
813 if (ncount < count) {
814 for (i = ncount; i < count; i++) {
815 rtvals[i] = VM_PAGER_BAD;
816 }
817 if (ncount == 0) {
818 printf("vnode_pager_putpages: write past end of file: %d, %d\n",
819 m[0]->offset, object->un_pager.vnp.vnp_size);
820 return rtvals[0];
821 }
822 }
823 }
824
825 for (i = 0; i < count; i++) {
826 m[i]->busy++;
827 m[i]->flags &= ~PG_BUSY;
828 }
829
830 aiov.iov_base = (caddr_t) 0;
831 aiov.iov_len = maxsize;
832 auio.uio_iov = &aiov;
833 auio.uio_iovcnt = 1;
834 auio.uio_offset = m[0]->offset;
835 auio.uio_segflg = UIO_NOCOPY;
836 auio.uio_rw = UIO_WRITE;
837 auio.uio_resid = maxsize;
838 auio.uio_procp = (struct proc *) 0;
839 error = VOP_WRITE(vp, &auio, IO_VMIO, curproc->p_ucred);
840 cnt.v_vnodeout++;
841 cnt.v_vnodepgsout += ncount;
842
843 if (error) {
844 printf("vnode_pager_putpages: I/O error %d\n", error);
845 }
846 if (auio.uio_resid) {
847 printf("vnode_pager_putpages: residual I/O %d at %d\n", auio.uio_resid, m[0]->offset);
848 }
849 for (i = 0; i < count; i++) {
850 m[i]->busy--;
851 if (i < ncount) {
852 rtvals[i] = VM_PAGER_OK;
853 }
854 if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED))
855 wakeup(m[i]);
856 }
857 return rtvals[0];
858}
859
860struct vnode *
861vnode_pager_lock(object)
862 vm_object_t object;
863{
864 for (; object != NULL; object = object->backing_object) {
865 if (object->type != OBJT_VNODE)
866 continue;
867
868 VOP_LOCK(object->handle);
869 return object->handle;
870 }
871 return NULL;
872}