Deleted Added
full compact
vnode_pager.c (76117) vnode_pager.c (76827)
1/*
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1993, 1994 John S. Dyson
6 * Copyright (c) 1995, David Greenman
7 *
8 * This code is derived from software contributed to Berkeley by

--- 24 unchanged lines hidden (view full) ---

33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
1/*
2 * Copyright (c) 1990 University of Utah.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1993, 1994 John S. Dyson
6 * Copyright (c) 1995, David Greenman
7 *
8 * This code is derived from software contributed to Berkeley by

--- 24 unchanged lines hidden (view full) ---

33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
41 * $FreeBSD: head/sys/vm/vnode_pager.c 76117 2001-04-29 02:45:39Z grog $
41 * $FreeBSD: head/sys/vm/vnode_pager.c 76827 2001-05-19 01:28:09Z alfred $
42 */
43
44/*
45 * Page to/from files (vnodes).
46 */
47
48/*
49 * TODO:

--- 48 unchanged lines hidden (view full) ---

98 */
99vm_object_t
100vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
101 vm_ooffset_t offset)
102{
103 vm_object_t object;
104 struct vnode *vp;
105
42 */
43
44/*
45 * Page to/from files (vnodes).
46 */
47
48/*
49 * TODO:

--- 48 unchanged lines hidden (view full) ---

98 */
99vm_object_t
100vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
101 vm_ooffset_t offset)
102{
103 vm_object_t object;
104 struct vnode *vp;
105
106 mtx_assert(&vm_mtx, MA_OWNED);
106 /*
107 * Pageout to vnode, no can do yet.
108 */
109 if (handle == NULL)
110 return (NULL);
111
112 /*
113 * XXX hack - This initialization should be put somewhere else.
114 */
115 if (vnode_pbuf_freecnt < 0) {
116 vnode_pbuf_freecnt = nswbuf / 2 + 1;
117 }
118
119 vp = (struct vnode *) handle;
120
121 /*
122 * Prevent race condition when allocating the object. This
123 * can happen with NFS vnodes since the nfsnode isn't locked.
124 */
107 /*
108 * Pageout to vnode, no can do yet.
109 */
110 if (handle == NULL)
111 return (NULL);
112
113 /*
114 * XXX hack - This initialization should be put somewhere else.
115 */
116 if (vnode_pbuf_freecnt < 0) {
117 vnode_pbuf_freecnt = nswbuf / 2 + 1;
118 }
119
120 vp = (struct vnode *) handle;
121
122 /*
123 * Prevent race condition when allocating the object. This
124 * can happen with NFS vnodes since the nfsnode isn't locked.
125 */
126 mtx_unlock(&vm_mtx);
127 mtx_lock(&Giant);
125 while (vp->v_flag & VOLOCK) {
126 vp->v_flag |= VOWANT;
127 tsleep(vp, PVM, "vnpobj", 0);
128 }
129 vp->v_flag |= VOLOCK;
128 while (vp->v_flag & VOLOCK) {
129 vp->v_flag |= VOWANT;
130 tsleep(vp, PVM, "vnpobj", 0);
131 }
132 vp->v_flag |= VOLOCK;
133 mtx_unlock(&Giant);
134 mtx_lock(&vm_mtx);
130
131 /*
132 * If the object is being terminated, wait for it to
133 * go away.
134 */
135 while (((object = vp->v_object) != NULL) &&
136 (object->flags & OBJ_DEAD)) {
135
136 /*
137 * If the object is being terminated, wait for it to
138 * go away.
139 */
140 while (((object = vp->v_object) != NULL) &&
141 (object->flags & OBJ_DEAD)) {
137 tsleep(object, PVM, "vadead", 0);
142 msleep(object, &vm_mtx, PVM, "vadead", 0);
138 }
139
140 if (vp->v_usecount == 0)
141 panic("vnode_pager_alloc: no vnode reference");
142
143 if (object == NULL) {
144 /*
145 * And an object of the appropriate size

--- 6 unchanged lines hidden (view full) ---

152 object->handle = handle;
153 vp->v_object = object;
154 vp->v_usecount++;
155 } else {
156 object->ref_count++;
157 vp->v_usecount++;
158 }
159
143 }
144
145 if (vp->v_usecount == 0)
146 panic("vnode_pager_alloc: no vnode reference");
147
148 if (object == NULL) {
149 /*
150 * And an object of the appropriate size

--- 6 unchanged lines hidden (view full) ---

157 object->handle = handle;
158 vp->v_object = object;
159 vp->v_usecount++;
160 } else {
161 object->ref_count++;
162 vp->v_usecount++;
163 }
164
165 mtx_unlock(&vm_mtx);
166 mtx_lock(&Giant);
160 vp->v_flag &= ~VOLOCK;
161 if (vp->v_flag & VOWANT) {
162 vp->v_flag &= ~VOWANT;
163 wakeup(vp);
164 }
167 vp->v_flag &= ~VOLOCK;
168 if (vp->v_flag & VOWANT) {
169 vp->v_flag &= ~VOWANT;
170 wakeup(vp);
171 }
172 mtx_unlock(&Giant);
173 mtx_lock(&vm_mtx);
165 return (object);
166}
167
168static void
169vnode_pager_dealloc(object)
170 vm_object_t object;
171{
172 register struct vnode *vp = object->handle;

--- 43 unchanged lines hidden (view full) ---

216 pagesperblock = bsize / PAGE_SIZE;
217 blocksperpage = 0;
218 if (pagesperblock > 0) {
219 reqblock = pindex / pagesperblock;
220 } else {
221 blocksperpage = (PAGE_SIZE / bsize);
222 reqblock = pindex * blocksperpage;
223 }
174 return (object);
175}
176
177static void
178vnode_pager_dealloc(object)
179 vm_object_t object;
180{
181 register struct vnode *vp = object->handle;

--- 43 unchanged lines hidden (view full) ---

225 pagesperblock = bsize / PAGE_SIZE;
226 blocksperpage = 0;
227 if (pagesperblock > 0) {
228 reqblock = pindex / pagesperblock;
229 } else {
230 blocksperpage = (PAGE_SIZE / bsize);
231 reqblock = pindex * blocksperpage;
232 }
233 mtx_unlock(&vm_mtx);
234 mtx_lock(&Giant);
224 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
225 after, before);
235 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
236 after, before);
237 mtx_unlock(&Giant);
238 mtx_lock(&vm_mtx);
226 if (err)
227 return TRUE;
228 if ( bn == -1)
229 return FALSE;
230 if (pagesperblock > 0) {
231 poff = pindex - (reqblock * pagesperblock);
232 if (before) {
233 *before *= pagesperblock;

--- 46 unchanged lines hidden (view full) ---

280 return;
281
282 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
283
284 /*
285 * File has shrunk. Toss any cached pages beyond the new EOF.
286 */
287 if (nsize < object->un_pager.vnp.vnp_size) {
239 if (err)
240 return TRUE;
241 if ( bn == -1)
242 return FALSE;
243 if (pagesperblock > 0) {
244 poff = pindex - (reqblock * pagesperblock);
245 if (before) {
246 *before *= pagesperblock;

--- 46 unchanged lines hidden (view full) ---

293 return;
294
295 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
296
297 /*
298 * File has shrunk. Toss any cached pages beyond the new EOF.
299 */
300 if (nsize < object->un_pager.vnp.vnp_size) {
301 int hadvmlock;
302
303 hadvmlock = mtx_owned(&vm_mtx);
304 if (!hadvmlock)
305 mtx_lock(&vm_mtx);
288 vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size);
289 if (nobjsize < object->size) {
290 vm_object_page_remove(object, nobjsize, object->size,
291 FALSE);
292 }
293 /*
294 * this gets rid of garbage at the end of a page that is now
295 * only partially backed by the vnode...

--- 24 unchanged lines hidden (view full) ---

320 * case is one of them. If the page is still
321 * partially dirty, make it fully dirty.
322 */
323 vm_page_set_validclean(m, base, size);
324 if (m->dirty != 0)
325 m->dirty = VM_PAGE_BITS_ALL;
326 }
327 }
306 vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size);
307 if (nobjsize < object->size) {
308 vm_object_page_remove(object, nobjsize, object->size,
309 FALSE);
310 }
311 /*
312 * this gets rid of garbage at the end of a page that is now
313 * only partially backed by the vnode...

--- 24 unchanged lines hidden (view full) ---

338 * case is one of them. If the page is still
339 * partially dirty, make it fully dirty.
340 */
341 vm_page_set_validclean(m, base, size);
342 if (m->dirty != 0)
343 m->dirty = VM_PAGE_BITS_ALL;
344 }
345 }
346 if (!hadvmlock)
347 mtx_unlock(&vm_mtx);
328 }
329 object->un_pager.vnp.vnp_size = nsize;
330 object->size = nobjsize;
331}
332
333/*
334 * calculate the linear (byte) disk address of specified virtual
335 * file address

--- 201 unchanged lines hidden (view full) ---

537 return error ? VM_PAGER_ERROR : VM_PAGER_OK;
538}
539
540/*
541 * generic vnode pager input routine
542 */
543
544/*
348 }
349 object->un_pager.vnp.vnp_size = nsize;
350 object->size = nobjsize;
351}
352
353/*
354 * calculate the linear (byte) disk address of specified virtual
355 * file address

--- 201 unchanged lines hidden (view full) ---

557 return error ? VM_PAGER_ERROR : VM_PAGER_OK;
558}
559
560/*
561 * generic vnode pager input routine
562 */
563
564/*
545 * EOPNOTSUPP is no longer legal. For local media VFS's that do not
546 * implement their own VOP_GETPAGES, their VOP_GETPAGES should call to
565 * Local media VFS's that do not implement their own VOP_GETPAGES
566 * should have their VOP_GETPAGES should call to
547 * vnode_pager_generic_getpages() to implement the previous behaviour.
548 *
549 * All other FS's should use the bypass to get to the local media
550 * backing vp's VOP_GETPAGES.
551 */
552static int
553vnode_pager_getpages(object, m, count, reqpage)
554 vm_object_t object;
555 vm_page_t *m;
556 int count;
557 int reqpage;
558{
559 int rtval;
560 struct vnode *vp;
561 int bytes = count * PAGE_SIZE;
562
567 * vnode_pager_generic_getpages() to implement the previous behaviour.
568 *
569 * All other FS's should use the bypass to get to the local media
570 * backing vp's VOP_GETPAGES.
571 */
572static int
573vnode_pager_getpages(object, m, count, reqpage)
574 vm_object_t object;
575 vm_page_t *m;
576 int count;
577 int reqpage;
578{
579 int rtval;
580 struct vnode *vp;
581 int bytes = count * PAGE_SIZE;
582
583 mtx_assert(&vm_mtx, MA_OWNED);
563 vp = object->handle;
584 vp = object->handle;
564 /*
565 * XXX temporary diagnostic message to help track stale FS code,
566 * Returning EOPNOTSUPP from here may make things unhappy.
567 */
568 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
585 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
569 if (rtval == EOPNOTSUPP) {
570 printf("vnode_pager: *** WARNING *** stale FS getpages\n");
571 rtval = vnode_pager_generic_getpages( vp, m, bytes, reqpage);
572 }
586 KASSERT(rtval != EOPNOTSUPP,
587 ("vnode_pager: FS getpages not implemented\n"));
573 return rtval;
574}
575
576
577/*
578 * This is now called from local media FS's to operate against their
579 * own vnodes if they fail to implement VOP_GETPAGES.
580 */

--- 305 unchanged lines hidden (view full) ---

886
887 /*
888 * Call device-specific putpages function
889 */
890
891 vp = object->handle;
892 if (vp->v_type != VREG)
893 mp = NULL;
588 return rtval;
589}
590
591
592/*
593 * This is now called from local media FS's to operate against their
594 * own vnodes if they fail to implement VOP_GETPAGES.
595 */

--- 305 unchanged lines hidden (view full) ---

901
902 /*
903 * Call device-specific putpages function
904 */
905
906 vp = object->handle;
907 if (vp->v_type != VREG)
908 mp = NULL;
909 mtx_unlock(&vm_mtx);
910 mtx_lock(&Giant);
894 (void)vn_start_write(vp, &mp, V_WAIT);
911 (void)vn_start_write(vp, &mp, V_WAIT);
912 mtx_unlock(&Giant);
913 mtx_lock(&vm_mtx);
895 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
914 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
896 if (rtval == EOPNOTSUPP) {
897 printf("vnode_pager: *** WARNING *** stale FS putpages\n");
898 rtval = vnode_pager_generic_putpages( vp, m, bytes, sync, rtvals);
899 }
915 KASSERT(rtval != EOPNOTSUPP,
916 ("vnode_pager: stale FS putpages\n"));
917 mtx_unlock(&vm_mtx);
918 mtx_lock(&Giant);
900 vn_finished_write(mp);
919 vn_finished_write(mp);
920 mtx_unlock(&Giant);
921 mtx_lock(&vm_mtx);
901}
902
903
904/*
905 * This is now called from local media FS's to operate against their
906 * own vnodes if they fail to implement VOP_PUTPAGES.
907 *
908 * This is typically called indirectly via the pageout daemon and

--- 86 unchanged lines hidden (view full) ---

995}
996
997struct vnode *
998vnode_pager_lock(object)
999 vm_object_t object;
1000{
1001 struct proc *p = curproc; /* XXX */
1002
922}
923
924
925/*
926 * This is now called from local media FS's to operate against their
927 * own vnodes if they fail to implement VOP_PUTPAGES.
928 *
929 * This is typically called indirectly via the pageout daemon and

--- 86 unchanged lines hidden (view full) ---

1016}
1017
1018struct vnode *
1019vnode_pager_lock(object)
1020 vm_object_t object;
1021{
1022 struct proc *p = curproc; /* XXX */
1023
1024 mtx_assert(&vm_mtx, MA_NOTOWNED);
1025 mtx_assert(&Giant, MA_OWNED);
1003 for (; object != NULL; object = object->backing_object) {
1004 if (object->type != OBJT_VNODE)
1005 continue;
1006 if (object->flags & OBJ_DEAD)
1007 return NULL;
1008
1009 while (vget(object->handle,
1010 LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
1011 if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE))
1012 return NULL;
1013 printf("vnode_pager_lock: retrying\n");
1014 }
1015 return object->handle;
1016 }
1017 return NULL;
1018}
1026 for (; object != NULL; object = object->backing_object) {
1027 if (object->type != OBJT_VNODE)
1028 continue;
1029 if (object->flags & OBJ_DEAD)
1030 return NULL;
1031
1032 while (vget(object->handle,
1033 LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
1034 if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE))
1035 return NULL;
1036 printf("vnode_pager_lock: retrying\n");
1037 }
1038 return object->handle;
1039 }
1040 return NULL;
1041}