vnode_pager.c (9456) | vnode_pager.c (9507) |
---|---|
1/* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. | 1/* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. |
5 * Copyright (c) 1993,1994 John S. Dyson | 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman |
6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: --- 18 unchanged lines hidden (view full) --- 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 | 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: --- 18 unchanged lines hidden (view full) --- 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 |
40 * $Id: vnode_pager.c,v 1.42 1995/07/06 11:48:48 davidg Exp $ | 41 * $Id: vnode_pager.c,v 1.43 1995/07/09 06:58:03 davidg Exp $ |
41 */ 42 43/* 44 * Page to/from files (vnodes). | 42 */ 43 44/* 45 * Page to/from files (vnodes). |
45 * 46 * TODO: 47 * pageouts 48 * fix credential use (uses current process credentials now) | |
49 */ 50 51/* | 46 */ 47 48/* |
52 * 1) Supports multiple - block reads/writes 53 * 2) Bypasses buffer cache for reads 54 * | |
55 * TODO: | 49 * TODO: |
56 * Implement getpage/putpage interface for filesystems. Should | 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will |
57 * greatly re-simplify the vnode_pager. | 51 * greatly re-simplify the vnode_pager. |
58 * | |
59 */ 60 61#include <sys/param.h> 62#include <sys/systm.h> 63#include <sys/kernel.h> 64#include <sys/proc.h> 65#include <sys/malloc.h> 66#include <sys/vnode.h> 67#include <sys/uio.h> 68#include <sys/mount.h> | 52 */ 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/kernel.h> 57#include <sys/proc.h> 58#include <sys/malloc.h> 59#include <sys/vnode.h> 60#include <sys/uio.h> 61#include <sys/mount.h> |
62#include <sys/buf.h> |
|
69 70#include <vm/vm.h> 71#include <vm/vm_page.h> | 63 64#include <vm/vm.h> 65#include <vm/vm_page.h> |
66#include <vm/vm_pager.h> |
|
72#include <vm/vnode_pager.h> 73 | 67#include <vm/vnode_pager.h> 68 |
74#include <sys/buf.h> 75#include <miscfs/specfs/specdev.h> 76 77int vnode_pager_putmulti(); 78 79void vnode_pager_init(); 80void vnode_pager_dealloc(); 81int vnode_pager_getpage(); 82int vnode_pager_getmulti(); 83int vnode_pager_putpage(); 84boolean_t vnode_pager_haspage(); 85 | |
86struct pagerops vnodepagerops = { | 69struct pagerops vnodepagerops = { |
87 vnode_pager_init, | 70 NULL, |
88 vnode_pager_alloc, 89 vnode_pager_dealloc, | 71 vnode_pager_alloc, 72 vnode_pager_dealloc, |
90 vnode_pager_getpage, 91 vnode_pager_getmulti, 92 vnode_pager_putpage, 93 vnode_pager_putmulti, 94 vnode_pager_haspage | 73 vnode_pager_getpages, 74 vnode_pager_putpages, 75 vnode_pager_haspage, 76 NULL |
95}; 96 | 77}; 78 |
97 98 99static int vnode_pager_input(vn_pager_t vnp, vm_page_t * m, int count, int reqpage); 100static int vnode_pager_output(vn_pager_t vnp, vm_page_t * m, int count, int *rtvals); 101 102extern vm_map_t pager_map; 103 104struct pagerlst vnode_pager_list; /* list of managed vnodes */ 105 106#define MAXBP (PAGE_SIZE/DEV_BSIZE); 107 108void 109vnode_pager_init() 110{ 111 TAILQ_INIT(&vnode_pager_list); 112} 113 | |
114/* 115 * Allocate (or lookup) pager for a vnode. 116 * Handle is a vnode pointer. 117 */ | 79/* 80 * Allocate (or lookup) pager for a vnode. 81 * Handle is a vnode pointer. 82 */ |
118vm_pager_t | 83vm_object_t |
119vnode_pager_alloc(handle, size, prot, offset) 120 void *handle; 121 vm_size_t size; 122 vm_prot_t prot; 123 vm_offset_t offset; 124{ | 84vnode_pager_alloc(handle, size, prot, offset) 85 void *handle; 86 vm_size_t size; 87 vm_prot_t prot; 88 vm_offset_t offset; 89{ |
125 register vm_pager_t pager; 126 register vn_pager_t vnp; | |
127 vm_object_t object; 128 struct vnode *vp; 129 130 /* 131 * Pageout to vnode, no can do yet. 132 */ 133 if (handle == NULL) 134 return (NULL); --- 9 unchanged lines hidden (view full) --- 144 tsleep(vp, PVM, "vnpobj", 0); 145 } 146 vp->v_flag |= VOLOCK; 147 148 /* 149 * If the object is being terminated, wait for it to 150 * go away. 151 */ | 90 vm_object_t object; 91 struct vnode *vp; 92 93 /* 94 * Pageout to vnode, no can do yet. 95 */ 96 if (handle == NULL) 97 return (NULL); --- 9 unchanged lines hidden (view full) --- 107 tsleep(vp, PVM, "vnpobj", 0); 108 } 109 vp->v_flag |= VOLOCK; 110 111 /* 112 * If the object is being terminated, wait for it to 113 * go away. 114 */ |
152 while (((object = vp->v_object) != NULL) && (object->flags & OBJ_DEAD)) | 115 while (((object = vp->v_object) != NULL) && (object->flags & OBJ_DEAD)) { |
153 tsleep(object, PVM, "vadead", 0); | 116 tsleep(object, PVM, "vadead", 0); |
117 } |
|
154 | 118 |
155 pager = NULL; 156 if (object != NULL) 157 pager = object->pager; 158 if (pager == NULL) { 159 | 119 if (object == NULL) { |
160 /* | 120 /* |
161 * Allocate pager structures 162 */ 163 pager = (vm_pager_t) malloc(sizeof *pager, M_VMPAGER, M_WAITOK); 164 vnp = (vn_pager_t) malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK); 165 166 /* | |
167 * And an object of the appropriate size 168 */ | 121 * And an object of the appropriate size 122 */ |
169 object = vm_object_allocate(round_page(size)); | 123 object = vm_object_allocate(OBJT_VNODE, round_page(size)); |
170 object->flags = OBJ_CANPERSIST; | 124 object->flags = OBJ_CANPERSIST; |
171 vm_object_enter(object, pager); 172 object->pager = pager; | |
173 174 /* | 125 126 /* |
175 * Hold a reference to the vnode and initialize pager data. | 127 * Hold a reference to the vnode and initialize object data. |
176 */ 177 VREF(vp); | 128 */ 129 VREF(vp); |
178 vnp->vnp_flags = 0; 179 vnp->vnp_vp = vp; 180 vnp->vnp_size = size; | 130 object->un_pager.vnp.vnp_size = size; |
181 | 131 |
182 TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list); 183 pager->pg_handle = handle; 184 pager->pg_type = PG_VNODE; 185 pager->pg_ops = &vnodepagerops; 186 pager->pg_data = (caddr_t) vnp; 187 vp->v_object = (caddr_t) object; | 132 object->handle = handle; 133 vp->v_object = object; |
188 } else { | 134 } else { |
189 | |
190 /* | 135 /* |
191 * vm_object_lookup() will remove the object from the cache if 192 * found and also gain a reference to the object. | 136 * vm_object_reference() will remove the object from the cache if 137 * found and gain a reference to the object. |
193 */ | 138 */ |
194 (void) vm_object_lookup(pager); | 139 vm_object_reference(object); |
195 } 196 197 if (vp->v_type == VREG) 198 vp->v_flag |= VVMIO; 199 200 vp->v_flag &= ~VOLOCK; 201 if (vp->v_flag & VOWANT) { 202 vp->v_flag &= ~VOWANT; 203 wakeup(vp); 204 } | 140 } 141 142 if (vp->v_type == VREG) 143 vp->v_flag |= VVMIO; 144 145 vp->v_flag &= ~VOLOCK; 146 if (vp->v_flag & VOWANT) { 147 vp->v_flag &= ~VOWANT; 148 wakeup(vp); 149 } |
205 return (pager); | 150 return (object); |
206} 207 208void | 151} 152 153void |
209vnode_pager_dealloc(pager) 210 vm_pager_t pager; 211{ 212 register vn_pager_t vnp = (vn_pager_t) pager->pg_data; 213 register struct vnode *vp; | 154vnode_pager_dealloc(object) |
214 vm_object_t object; | 155 vm_object_t object; |
156{ 157 register struct vnode *vp = object->handle; |
|
215 | 158 |
216 vp = vnp->vnp_vp; 217 if (vp) { 218 int s = splbio(); | 159 if (vp == NULL) 160 panic("vnode_pager_dealloc: pager already dealloced"); |
219 | 161 |
220 object = vp->v_object; 221 if (object) { 222 while (object->paging_in_progress) { 223 object->flags |= OBJ_PIPWNT; 224 tsleep(object, PVM, "vnpdea", 0); 225 } | 162 if (object->paging_in_progress) { 163 int s = splbio(); 164 while (object->paging_in_progress) { 165 object->flags |= OBJ_PIPWNT; 166 tsleep(object, PVM, "vnpdea", 0); |
226 } 227 splx(s); | 167 } 168 splx(s); |
228 229 vp->v_object = NULL; 230 vp->v_flag &= ~(VTEXT | VVMIO); 231 vp->v_flag |= VAGE; 232 vrele(vp); | |
233 } | 169 } |
234 TAILQ_REMOVE(&vnode_pager_list, pager, pg_list); 235 free((caddr_t) vnp, M_VMPGDATA); 236 free((caddr_t) pager, M_VMPAGER); 237} | |
238 | 170 |
239int 240vnode_pager_getmulti(pager, m, count, reqpage, sync) 241 vm_pager_t pager; 242 vm_page_t *m; 243 int count; 244 int reqpage; 245 boolean_t sync; 246{ | 171 object->handle = NULL; |
247 | 172 |
248 return vnode_pager_input((vn_pager_t) pager->pg_data, m, count, reqpage); | 173 vp->v_object = NULL; 174 vp->v_flag &= ~(VTEXT | VVMIO); 175 vp->v_flag |= VAGE; 176 vrele(vp); |
249} 250 | 177} 178 |
251int 252vnode_pager_getpage(pager, m, sync) 253 vm_pager_t pager; 254 vm_page_t m; 255 boolean_t sync; 256{ 257 258 vm_page_t marray[1]; 259 260 if (pager == NULL) 261 return FALSE; 262 marray[0] = m; 263 264 return vnode_pager_input((vn_pager_t) pager->pg_data, marray, 1, 0); 265} 266 | |
267boolean_t | 179boolean_t |
268vnode_pager_putpage(pager, m, sync) 269 vm_pager_t pager; 270 vm_page_t m; 271 boolean_t sync; 272{ 273 vm_page_t marray[1]; 274 int rtvals[1]; 275 276 if (pager == NULL) 277 return FALSE; 278 marray[0] = m; 279 vnode_pager_output((vn_pager_t) pager->pg_data, marray, 1, rtvals); 280 return rtvals[0]; 281} 282 283int 284vnode_pager_putmulti(pager, m, c, sync, rtvals) 285 vm_pager_t pager; 286 vm_page_t *m; 287 int c; 288 boolean_t sync; 289 int *rtvals; 290{ 291 return vnode_pager_output((vn_pager_t) pager->pg_data, m, c, rtvals); 292} 293 294 295boolean_t 296vnode_pager_haspage(pager, offset) 297 vm_pager_t pager; | 180vnode_pager_haspage(object, offset, before, after) 181 vm_object_t object; |
298 vm_offset_t offset; | 182 vm_offset_t offset; |
183 int *before; 184 int *after; |
|
299{ | 185{ |
300 register vn_pager_t vnp = (vn_pager_t) pager->pg_data; 301 register struct vnode *vp = vnp->vnp_vp; | 186 struct vnode *vp = object->handle; |
302 daddr_t bn; | 187 daddr_t bn; |
303 int err; 304 daddr_t block; | 188 int err, run; 189 daddr_t startblock, reqblock; |
305 306 /* 307 * If filesystem no longer mounted or offset beyond end of file we do 308 * not have the page. 309 */ | 190 191 /* 192 * If filesystem no longer mounted or offset beyond end of file we do 193 * not have the page. 194 */ |
310 if ((vp->v_mount == NULL) || (offset >= vnp->vnp_size)) | 195 if ((vp->v_mount == NULL) || (offset >= object->un_pager.vnp.vnp_size)) |
311 return FALSE; 312 | 196 return FALSE; 197 |
313 block = offset / vp->v_mount->mnt_stat.f_iosize; 314 if (incore(vp, block)) 315 return TRUE; | 198 startblock = reqblock = offset / vp->v_mount->mnt_stat.f_iosize; 199 if (startblock > PFCLUSTER_BEHIND) 200 startblock -= PFCLUSTER_BEHIND; 201 else 202 startblock = 0;; |
316 | 203 |
317 /* 318 * Read the index to find the disk block to read from. If there is no 319 * block, report that we don't have this data. 320 * 321 * Assumes that the vnode has whole page or nothing. 322 */ 323 err = VOP_BMAP(vp, block, (struct vnode **) 0, &bn, 0); | 204 if (before != NULL) { 205 /* 206 * Loop looking for a contiguous chunk that includes the 207 * requested page. 208 */ 209 while (TRUE) { 210 err = VOP_BMAP(vp, startblock, (struct vnode **) 0, &bn, &run); 211 if (err || bn == -1) { 212 if (startblock < reqblock) { 213 startblock++; 214 continue; 215 } 216 *before = 0; 217 if (after != NULL) 218 *after = 0; 219 return err ? TRUE : FALSE; 220 } 221 if ((startblock + run) < reqblock) { 222 startblock += run + 1; 223 continue; 224 } 225 *before = reqblock - startblock; 226 if (after != NULL) 227 *after = run; 228 return TRUE; 229 } 230 } 231 232 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, after); |
324 if (err) | 233 if (err) |
325 return (TRUE); | 234 return TRUE; |
326 return ((long) bn < 0 ? FALSE : TRUE); 327} 328 329/* 330 * Lets the VM system know about a change in size for a file. | 235 return ((long) bn < 0 ? FALSE : TRUE); 236} 237 238/* 239 * Lets the VM system know about a change in size for a file. |
331 * If this vnode is mapped into some address space (i.e. we have a pager 332 * for it) we adjust our own internal size and flush any cached pages in | 240 * We adjust our own internal size and flush any cached pages in |
333 * the associated object that are affected by the size change. 334 * 335 * Note: this routine may be invoked as a result of a pager put 336 * operation (possibly at object termination time), so we must be careful. 337 */ 338void 339vnode_pager_setsize(vp, nsize) 340 struct vnode *vp; 341 u_long nsize; 342{ | 241 * the associated object that are affected by the size change. 242 * 243 * Note: this routine may be invoked as a result of a pager put 244 * operation (possibly at object termination time), so we must be careful. 245 */ 246void 247vnode_pager_setsize(vp, nsize) 248 struct vnode *vp; 249 u_long nsize; 250{ |
343 register vn_pager_t vnp; 344 register vm_object_t object; 345 vm_pager_t pager; | 251 vm_object_t object = vp->v_object; |
346 | 252 |
347 /* 348 * Not a mapped vnode 349 */ 350 if (vp == NULL || vp->v_type != VREG || vp->v_object == NULL) | 253 if (object == NULL) |
351 return; 352 353 /* 354 * Hasn't changed size 355 */ | 254 return; 255 256 /* 257 * Hasn't changed size 258 */ |
356 object = vp->v_object; 357 if (object == NULL) | 259 if (nsize == object->un_pager.vnp.vnp_size) |
358 return; | 260 return; |
359 if ((pager = object->pager) == NULL) 360 return; 361 vnp = (vn_pager_t) pager->pg_data; 362 if (nsize == vnp->vnp_size) 363 return; | |
364 365 /* 366 * File has shrunk. Toss any cached pages beyond the new EOF. 367 */ | 261 262 /* 263 * File has shrunk. Toss any cached pages beyond the new EOF. 264 */ |
368 if (nsize < vnp->vnp_size) { 369 if (round_page((vm_offset_t) nsize) < vnp->vnp_size) { 370 vm_object_lock(object); | 265 if (nsize < object->un_pager.vnp.vnp_size) { 266 if (round_page((vm_offset_t) nsize) < object->un_pager.vnp.vnp_size) { |
371 vm_object_page_remove(object, | 267 vm_object_page_remove(object, |
372 round_page((vm_offset_t) nsize), vnp->vnp_size, FALSE); 373 vm_object_unlock(object); | 268 round_page((vm_offset_t) nsize), object->un_pager.vnp.vnp_size, FALSE); |
374 } 375 /* 376 * this gets rid of garbage at the end of a page that is now 377 * only partially backed by the vnode... 378 */ 379 if (nsize & PAGE_MASK) { 380 vm_offset_t kva; 381 vm_page_t m; 382 383 m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize)); 384 if (m) { 385 kva = vm_pager_map_page(m); 386 bzero((caddr_t) kva + (nsize & PAGE_MASK), 387 round_page(nsize) - nsize); 388 vm_pager_unmap_page(kva); 389 } 390 } 391 } | 269 } 270 /* 271 * this gets rid of garbage at the end of a page that is now 272 * only partially backed by the vnode... 273 */ 274 if (nsize & PAGE_MASK) { 275 vm_offset_t kva; 276 vm_page_t m; 277 278 m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize)); 279 if (m) { 280 kva = vm_pager_map_page(m); 281 bzero((caddr_t) kva + (nsize & PAGE_MASK), 282 round_page(nsize) - nsize); 283 vm_pager_unmap_page(kva); 284 } 285 } 286 } |
392 vnp->vnp_size = (vm_offset_t) nsize; | 287 object->un_pager.vnp.vnp_size = (vm_offset_t) nsize; |
393 object->size = round_page(nsize); 394} 395 396void 397vnode_pager_umount(mp) 398 register struct mount *mp; 399{ | 288 object->size = round_page(nsize); 289} 290 291void 292vnode_pager_umount(mp) 293 register struct mount *mp; 294{ |
400 register vm_pager_t pager, npager; 401 struct vnode *vp; | 295 struct vnode *vp, *nvp; |
402 | 296 |
403 for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager) { | 297loop: 298 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { |
404 /* | 299 /* |
300 * Vnode can be reclaimed by getnewvnode() while we 301 * traverse the list. 302 */ 303 if (vp->v_mount != mp) 304 goto loop; 305 306 /* |
|
405 * Save the next pointer now since uncaching may terminate the | 307 * Save the next pointer now since uncaching may terminate the |
406 * object and render pager invalid | 308 * object and render vnode invalid |
407 */ | 309 */ |
408 npager = pager->pg_list.tqe_next; 409 vp = ((vn_pager_t) pager->pg_data)->vnp_vp; 410 if (mp == (struct mount *) 0 || vp->v_mount == mp) { | 310 nvp = vp->v_mntvnodes.le_next; 311 312 if (vp->v_object != NULL) { |
411 VOP_LOCK(vp); | 313 VOP_LOCK(vp); |
412 (void) vnode_pager_uncache(vp); | 314 vnode_pager_uncache(vp); |
413 VOP_UNLOCK(vp); 414 } 415 } 416} 417 418/* 419 * Remove vnode associated object from the object cache. 420 * This routine must be called with the vnode locked. 421 * 422 * XXX unlock the vnode. 423 * We must do this since uncaching the object may result in its 424 * destruction which may initiate paging activity which may necessitate 425 * re-locking the vnode. 426 */ | 315 VOP_UNLOCK(vp); 316 } 317 } 318} 319 320/* 321 * Remove vnode associated object from the object cache. 322 * This routine must be called with the vnode locked. 323 * 324 * XXX unlock the vnode. 325 * We must do this since uncaching the object may result in its 326 * destruction which may initiate paging activity which may necessitate 327 * re-locking the vnode. 328 */ |
427boolean_t | 329void |
428vnode_pager_uncache(vp) | 330vnode_pager_uncache(vp) |
429 register struct vnode *vp; | 331 struct vnode *vp; |
430{ | 332{ |
431 register vm_object_t object; 432 boolean_t uncached; 433 vm_pager_t pager; | 333 vm_object_t object; |
434 435 /* 436 * Not a mapped vnode 437 */ 438 object = vp->v_object; 439 if (object == NULL) | 334 335 /* 336 * Not a mapped vnode 337 */ 338 object = vp->v_object; 339 if (object == NULL) |
440 return (TRUE); | 340 return; |
441 | 341 |
442 pager = object->pager; 443 if (pager == NULL) 444 return (TRUE); 445 446#ifdef DEBUG 447 if (!VOP_ISLOCKED(vp)) { 448 extern int (**nfsv2_vnodeop_p)(); 449 450 if (vp->v_op != nfsv2_vnodeop_p) 451 panic("vnode_pager_uncache: vnode not locked!"); 452 } 453#endif 454 /* 455 * Must use vm_object_lookup() as it actually removes the object from 456 * the cache list. 457 */ 458 object = vm_object_lookup(pager); 459 if (object) { 460 uncached = (object->ref_count <= 1); 461 VOP_UNLOCK(vp); 462 pager_cache(object, FALSE); 463 VOP_LOCK(vp); 464 } else 465 uncached = TRUE; 466 return (uncached); | 342 vm_object_reference(object); 343 VOP_UNLOCK(vp); 344 pager_cache(object, FALSE); 345 VOP_LOCK(vp); 346 return; |
467} 468 469 470void 471vnode_pager_freepage(m) 472 vm_page_t m; 473{ 474 PAGE_WAKEUP(m); --- 43 unchanged lines hidden (view full) --- 518/* 519 * interrupt routine for I/O completion 520 */ 521void 522vnode_pager_iodone(bp) 523 struct buf *bp; 524{ 525 bp->b_flags |= B_DONE; | 347} 348 349 350void 351vnode_pager_freepage(m) 352 vm_page_t m; 353{ 354 PAGE_WAKEUP(m); --- 43 unchanged lines hidden (view full) --- 398/* 399 * interrupt routine for I/O completion 400 */ 401void 402vnode_pager_iodone(bp) 403 struct buf *bp; 404{ 405 bp->b_flags |= B_DONE; |
526 wakeup((caddr_t) bp); | 406 wakeup(bp); |
527} 528 529/* 530 * small block file system vnode pager input 531 */ 532int | 407} 408 409/* 410 * small block file system vnode pager input 411 */ 412int |
533vnode_pager_input_smlfs(vnp, m) 534 vn_pager_t vnp; | 413vnode_pager_input_smlfs(object, m) 414 vm_object_t object; |
535 vm_page_t m; 536{ 537 int i; 538 int s; 539 struct vnode *dp, *vp; 540 struct buf *bp; 541 vm_offset_t kva; 542 int fileaddr; | 415 vm_page_t m; 416{ 417 int i; 418 int s; 419 struct vnode *dp, *vp; 420 struct buf *bp; 421 vm_offset_t kva; 422 int fileaddr; |
543 int block; | |
544 vm_offset_t bsize; 545 int error = 0; 546 | 423 vm_offset_t bsize; 424 int error = 0; 425 |
547 vp = vnp->vnp_vp; | 426 vp = object->handle; |
548 bsize = vp->v_mount->mnt_stat.f_iosize; 549 550 551 VOP_BMAP(vp, 0, &dp, 0, 0); 552 553 kva = vm_pager_map_page(m); 554 555 for (i = 0; i < PAGE_SIZE / bsize; i++) { --- 41 unchanged lines hidden (view full) --- 597 break; 598 599 vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 600 vm_page_set_valid(m, (i * bsize) & (PAGE_SIZE-1), bsize); 601 } else { 602 vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 603 bzero((caddr_t) kva + i * bsize, bsize); 604 } | 427 bsize = vp->v_mount->mnt_stat.f_iosize; 428 429 430 VOP_BMAP(vp, 0, &dp, 0, 0); 431 432 kva = vm_pager_map_page(m); 433 434 for (i = 0; i < PAGE_SIZE / bsize; i++) { --- 41 unchanged lines hidden (view full) --- 476 break; 477 478 vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 479 vm_page_set_valid(m, (i * bsize) & (PAGE_SIZE-1), bsize); 480 } else { 481 vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 482 bzero((caddr_t) kva + i * bsize, bsize); 483 } |
605nextblock: | |
606 } 607 vm_pager_unmap_page(kva); 608 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 609 if (error) { 610 return VM_PAGER_ERROR; 611 } 612 return VM_PAGER_OK; 613 614} 615 616 617/* 618 * old style vnode pager output routine 619 */ 620int | 484 } 485 vm_pager_unmap_page(kva); 486 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 487 if (error) { 488 return VM_PAGER_ERROR; 489 } 490 return VM_PAGER_OK; 491 492} 493 494 495/* 496 * old style vnode pager output routine 497 */ 498int |
621vnode_pager_input_old(vnp, m) 622 vn_pager_t vnp; | 499vnode_pager_input_old(object, m) 500 vm_object_t object; |
623 vm_page_t m; 624{ 625 struct uio auio; 626 struct iovec aiov; 627 int error; 628 int size; 629 vm_offset_t kva; 630 631 error = 0; 632 633 /* 634 * Return failure if beyond current EOF 635 */ | 501 vm_page_t m; 502{ 503 struct uio auio; 504 struct iovec aiov; 505 int error; 506 int size; 507 vm_offset_t kva; 508 509 error = 0; 510 511 /* 512 * Return failure if beyond current EOF 513 */ |
636 if (m->offset >= vnp->vnp_size) { | 514 if (m->offset >= object->un_pager.vnp.vnp_size) { |
637 return VM_PAGER_BAD; 638 } else { 639 size = PAGE_SIZE; | 515 return VM_PAGER_BAD; 516 } else { 517 size = PAGE_SIZE; |
640 if (m->offset + size > vnp->vnp_size) 641 size = vnp->vnp_size - m->offset; | 518 if (m->offset + size > object->un_pager.vnp.vnp_size) 519 size = object->un_pager.vnp.vnp_size - m->offset; |
642 643 /* 644 * Allocate a kernel virtual address and initialize so that 645 * we can use VOP_READ/WRITE routines. 646 */ 647 kva = vm_pager_map_page(m); 648 649 aiov.iov_base = (caddr_t) kva; 650 aiov.iov_len = size; 651 auio.uio_iov = &aiov; 652 auio.uio_iovcnt = 1; 653 auio.uio_offset = m->offset; 654 auio.uio_segflg = UIO_SYSSPACE; 655 auio.uio_rw = UIO_READ; 656 auio.uio_resid = size; 657 auio.uio_procp = (struct proc *) 0; 658 | 520 521 /* 522 * Allocate a kernel virtual address and initialize so that 523 * we can use VOP_READ/WRITE routines. 524 */ 525 kva = vm_pager_map_page(m); 526 527 aiov.iov_base = (caddr_t) kva; 528 aiov.iov_len = size; 529 auio.uio_iov = &aiov; 530 auio.uio_iovcnt = 1; 531 auio.uio_offset = m->offset; 532 auio.uio_segflg = UIO_SYSSPACE; 533 auio.uio_rw = UIO_READ; 534 auio.uio_resid = size; 535 auio.uio_procp = (struct proc *) 0; 536 |
659 error = VOP_READ(vnp->vnp_vp, &auio, 0, curproc->p_ucred); | 537 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); |
660 if (!error) { 661 register int count = size - auio.uio_resid; 662 663 if (count == 0) 664 error = EINVAL; 665 else if (count != PAGE_SIZE) 666 bzero((caddr_t) kva + count, PAGE_SIZE - count); 667 } 668 vm_pager_unmap_page(kva); 669 } 670 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 671 m->dirty = 0; 672 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 673} 674 675/* 676 * generic vnode pager input routine 677 */ 678int | 538 if (!error) { 539 register int count = size - auio.uio_resid; 540 541 if (count == 0) 542 error = EINVAL; 543 else if (count != PAGE_SIZE) 544 bzero((caddr_t) kva + count, PAGE_SIZE - count); 545 } 546 vm_pager_unmap_page(kva); 547 } 548 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 549 m->dirty = 0; 550 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 551} 552 553/* 554 * generic vnode pager input routine 555 */ 556int |
679vnode_pager_input(vnp, m, count, reqpage) 680 register vn_pager_t vnp; | 557vnode_pager_getpages(object, m, count, reqpage) 558 vm_object_t object; |
681 vm_page_t *m; | 559 vm_page_t *m; |
682 int count, reqpage; | 560 int count; 561 int reqpage; |
683{ | 562{ |
684 int i; | |
685 vm_offset_t kva, foff; | 563 vm_offset_t kva, foff; |
686 int size; 687 vm_object_t object; | 564 int i, size, bsize, first, firstaddr; |
688 struct vnode *dp, *vp; | 565 struct vnode *dp, *vp; |
689 int bsize; 690 691 int first, last; 692 int firstaddr; 693 int block, offset; | |
694 int runpg; 695 int runend; | 566 int runpg; 567 int runend; |
696 | |
697 struct buf *bp; 698 int s; | 568 struct buf *bp; 569 int s; |
699 int failflag; 700 | |
701 int error = 0; 702 | 570 int error = 0; 571 |
703 object = m[reqpage]->object; /* all vm_page_t items are in same 704 * object */ 705 706 vp = vnp->vnp_vp; | 572 vp = object->handle; |
707 bsize = vp->v_mount->mnt_stat.f_iosize; 708 709 /* get the UNDERLYING device for the file with VOP_BMAP() */ 710 711 /* 712 * originally, we did not check for an error return value -- assuming 713 * an fs always has a bmap entry point -- that assumption is wrong!!! 714 */ --- 5 unchanged lines hidden (view full) --- 720 if (VOP_BMAP(vp, 0, &dp, 0, 0)) { 721 for (i = 0; i < count; i++) { 722 if (i != reqpage) { 723 vnode_pager_freepage(m[i]); 724 } 725 } 726 cnt.v_vnodein++; 727 cnt.v_vnodepgsin++; | 573 bsize = vp->v_mount->mnt_stat.f_iosize; 574 575 /* get the UNDERLYING device for the file with VOP_BMAP() */ 576 577 /* 578 * originally, we did not check for an error return value -- assuming 579 * an fs always has a bmap entry point -- that assumption is wrong!!! 580 */ --- 5 unchanged lines hidden (view full) --- 586 if (VOP_BMAP(vp, 0, &dp, 0, 0)) { 587 for (i = 0; i < count; i++) { 588 if (i != reqpage) { 589 vnode_pager_freepage(m[i]); 590 } 591 } 592 cnt.v_vnodein++; 593 cnt.v_vnodepgsin++; |
728 return vnode_pager_input_old(vnp, m[reqpage]); | 594 return vnode_pager_input_old(object, m[reqpage]); |
729 730 /* 731 * if the blocksize is smaller than a page size, then use 732 * special small filesystem code. NFS sometimes has a small 733 * blocksize, but it can handle large reads itself. 734 */ 735 } else if ((PAGE_SIZE / bsize) > 1 && 736 (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) { 737 738 for (i = 0; i < count; i++) { 739 if (i != reqpage) { 740 vnode_pager_freepage(m[i]); 741 } 742 } 743 cnt.v_vnodein++; 744 cnt.v_vnodepgsin++; | 595 596 /* 597 * if the blocksize is smaller than a page size, then use 598 * special small filesystem code. NFS sometimes has a small 599 * blocksize, but it can handle large reads itself. 600 */ 601 } else if ((PAGE_SIZE / bsize) > 1 && 602 (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) { 603 604 for (i = 0; i < count; i++) { 605 if (i != reqpage) { 606 vnode_pager_freepage(m[i]); 607 } 608 } 609 cnt.v_vnodein++; 610 cnt.v_vnodepgsin++; |
745 return vnode_pager_input_smlfs(vnp, m[reqpage]); | 611 return vnode_pager_input_smlfs(object, m[reqpage]); |
746 } 747 /* 748 * if ANY DEV_BSIZE blocks are valid on a large filesystem block 749 * then, the entire page is valid -- 750 */ 751 if (m[reqpage]->valid) { 752 m[reqpage]->valid = VM_PAGE_BITS_ALL; 753 for (i = 0; i < count; i++) { --- 9 unchanged lines hidden (view full) --- 763 764 firstaddr = -1; 765 /* 766 * calculate the run that includes the required page 767 */ 768 for(first = 0, i = 0; i < count; i = runend) { 769 firstaddr = vnode_pager_addr(vp, m[i]->offset, &runpg); 770 if (firstaddr == -1) { | 612 } 613 /* 614 * if ANY DEV_BSIZE blocks are valid on a large filesystem block 615 * then, the entire page is valid -- 616 */ 617 if (m[reqpage]->valid) { 618 m[reqpage]->valid = VM_PAGE_BITS_ALL; 619 for (i = 0; i < count; i++) { --- 9 unchanged lines hidden (view full) --- 629 630 firstaddr = -1; 631 /* 632 * calculate the run that includes the required page 633 */ 634 for(first = 0, i = 0; i < count; i = runend) { 635 firstaddr = vnode_pager_addr(vp, m[i]->offset, &runpg); 636 if (firstaddr == -1) { |
771 if( i == reqpage && foff < vnp->vnp_size) { 772 printf("vnode_pager_input: unexpected missing page: firstaddr: %d, foff: %d, vnp_size: %d\n", 773 firstaddr, foff, vnp->vnp_size); 774 panic("vnode_pager_input:..."); | 637 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 638 panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d", 639 firstaddr, foff, object->un_pager.vnp.vnp_size); |
775 } 776 vnode_pager_freepage(m[i]); 777 runend = i + 1; 778 first = runend; 779 continue; 780 } 781 runend = i + runpg; | 640 } 641 vnode_pager_freepage(m[i]); 642 runend = i + 1; 643 first = runend; 644 continue; 645 } 646 runend = i + runpg; |
782 if( runend <= reqpage) { | 647 if (runend <= reqpage) { |
783 int j; | 648 int j; |
784 for(j = i; j < runend; j++) { | 649 for (j = i; j < runend; j++) { |
785 vnode_pager_freepage(m[j]); 786 } 787 } else { | 650 vnode_pager_freepage(m[j]); 651 } 652 } else { |
788 if( runpg < (count - first)) { 789 for(i=first + runpg; i < count; i++) | 653 if (runpg < (count - first)) { 654 for (i = first + runpg; i < count; i++) |
790 vnode_pager_freepage(m[i]); 791 count = first + runpg; 792 } 793 break; 794 } 795 first = runend; 796 } 797 --- 13 unchanged lines hidden (view full) --- 811 * calculate the file virtual address for the transfer 812 */ 813 foff = m[0]->offset; 814 815 /* 816 * calculate the size of the transfer 817 */ 818 size = count * PAGE_SIZE; | 655 vnode_pager_freepage(m[i]); 656 count = first + runpg; 657 } 658 break; 659 } 660 first = runend; 661 } 662 --- 13 unchanged lines hidden (view full) --- 676 * calculate the file virtual address for the transfer 677 */ 678 foff = m[0]->offset; 679 680 /* 681 * calculate the size of the transfer 682 */ 683 size = count * PAGE_SIZE; |
819 if ((foff + size) > vnp->vnp_size) 820 size = vnp->vnp_size - foff; | 684 if ((foff + size) > object->un_pager.vnp.vnp_size) 685 size = object->un_pager.vnp.vnp_size - foff; |
821 822 /* 823 * round up physical size for real devices 824 */ 825 if (dp->v_type == VBLK || dp->v_type == VCHR) 826 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 827 828 bp = getpbuf(); --- 41 unchanged lines hidden (view full) --- 870 } 871 pmap_qremove(kva, count); 872 873 /* 874 * free the buffer header back to the swap buffer pool 875 */ 876 relpbuf(bp); 877 | 686 687 /* 688 * round up physical size for real devices 689 */ 690 if (dp->v_type == VBLK || dp->v_type == VCHR) 691 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 692 693 bp = getpbuf(); --- 41 unchanged lines hidden (view full) --- 735 } 736 pmap_qremove(kva, count); 737 738 /* 739 * free the buffer header back to the swap buffer pool 740 */ 741 relpbuf(bp); 742 |
878finishup: | |
879 for (i = 0; i < count; i++) { 880 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 881 m[i]->dirty = 0; 882 m[i]->valid = VM_PAGE_BITS_ALL; 883 if (i != reqpage) { 884 885 /* 886 * whether or not to leave the page activated is up in --- 11 unchanged lines hidden (view full) --- 898 vm_page_deactivate(m[i]); 899 PAGE_WAKEUP(m[i]); 900 } else { 901 vnode_pager_freepage(m[i]); 902 } 903 } 904 } 905 if (error) { | 743 for (i = 0; i < count; i++) { 744 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 745 m[i]->dirty = 0; 746 m[i]->valid = VM_PAGE_BITS_ALL; 747 if (i != reqpage) { 748 749 /* 750 * whether or not to leave the page activated is up in --- 11 unchanged lines hidden (view full) --- 762 vm_page_deactivate(m[i]); 763 PAGE_WAKEUP(m[i]); 764 } else { 765 vnode_pager_freepage(m[i]); 766 } 767 } 768 } 769 if (error) { |
906 printf("vnode_pager_input: I/O read error\n"); | 770 printf("vnode_pager_getpages: I/O read error\n"); |
907 } 908 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 909} 910 911/* 912 * generic vnode pager output routine 913 */ 914int | 771 } 772 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 773} 774 775/* 776 * generic vnode pager output routine 777 */ 778int |
915vnode_pager_output(vnp, m, count, rtvals) 916 vn_pager_t vnp; | 779vnode_pager_putpages(object, m, count, sync, rtvals) 780 vm_object_t object; |
917 vm_page_t *m; 918 int count; | 781 vm_page_t *m; 782 int count; |
783 boolean_t sync; |
|
919 int *rtvals; 920{ 921 int i; 922 923 struct vnode *vp; 924 int maxsize, ncount; 925 struct uio auio; 926 struct iovec aiov; 927 int error; 928 | 784 int *rtvals; 785{ 786 int i; 787 788 struct vnode *vp; 789 int maxsize, ncount; 790 struct uio auio; 791 struct iovec aiov; 792 int error; 793 |
929 vp = vnp->vnp_vp; | 794 vp = object->handle;; |
930 for (i = 0; i < count; i++) 931 rtvals[i] = VM_PAGER_AGAIN; 932 933 if ((int) m[0]->offset < 0) { | 795 for (i = 0; i < count; i++) 796 rtvals[i] = VM_PAGER_AGAIN; 797 798 if ((int) m[0]->offset < 0) { |
934 printf("vnode_pager_output: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->offset, m[0]->dirty); | 799 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->offset, m[0]->dirty); |
935 rtvals[0] = VM_PAGER_BAD; 936 return VM_PAGER_BAD; 937 } 938 939 maxsize = count * PAGE_SIZE; 940 ncount = count; 941 | 800 rtvals[0] = VM_PAGER_BAD; 801 return VM_PAGER_BAD; 802 } 803 804 maxsize = count * PAGE_SIZE; 805 ncount = count; 806 |
942 if (maxsize + m[0]->offset > vnp->vnp_size) { 943 if (vnp->vnp_size > m[0]->offset) 944 maxsize = vnp->vnp_size - m[0]->offset; | 807 if (maxsize + m[0]->offset > object->un_pager.vnp.vnp_size) { 808 if (object->un_pager.vnp.vnp_size > m[0]->offset) 809 maxsize = object->un_pager.vnp.vnp_size - m[0]->offset; |
945 else 946 maxsize = 0; 947 ncount = (maxsize + PAGE_SIZE - 1) / PAGE_SIZE; 948 if (ncount < count) { 949 for (i = ncount; i < count; i++) { 950 rtvals[i] = VM_PAGER_BAD; 951 } 952 if (ncount == 0) { | 810 else 811 maxsize = 0; 812 ncount = (maxsize + PAGE_SIZE - 1) / PAGE_SIZE; 813 if (ncount < count) { 814 for (i = ncount; i < count; i++) { 815 rtvals[i] = VM_PAGER_BAD; 816 } 817 if (ncount == 0) { |
953 printf("vnode_pager_output: write past end of file: %d, %d\n", 954 m[0]->offset, vnp->vnp_size); | 818 printf("vnode_pager_putpages: write past end of file: %d, %d\n", 819 m[0]->offset, object->un_pager.vnp.vnp_size); |
955 return rtvals[0]; 956 } 957 } 958 } 959 960 for (i = 0; i < count; i++) { 961 m[i]->busy++; 962 m[i]->flags &= ~PG_BUSY; --- 8 unchanged lines hidden (view full) --- 971 auio.uio_rw = UIO_WRITE; 972 auio.uio_resid = maxsize; 973 auio.uio_procp = (struct proc *) 0; 974 error = VOP_WRITE(vp, &auio, IO_VMIO, curproc->p_ucred); 975 cnt.v_vnodeout++; 976 cnt.v_vnodepgsout += ncount; 977 978 if (error) { | 820 return rtvals[0]; 821 } 822 } 823 } 824 825 for (i = 0; i < count; i++) { 826 m[i]->busy++; 827 m[i]->flags &= ~PG_BUSY; --- 8 unchanged lines hidden (view full) --- 836 auio.uio_rw = UIO_WRITE; 837 auio.uio_resid = maxsize; 838 auio.uio_procp = (struct proc *) 0; 839 error = VOP_WRITE(vp, &auio, IO_VMIO, curproc->p_ucred); 840 cnt.v_vnodeout++; 841 cnt.v_vnodepgsout += ncount; 842 843 if (error) { |
979 printf("vnode_pager_output: I/O error %d\n", error); | 844 printf("vnode_pager_putpages: I/O error %d\n", error); |
980 } 981 if (auio.uio_resid) { | 845 } 846 if (auio.uio_resid) { |
982 printf("vnode_pager_output: residual I/O %d at %d\n", auio.uio_resid, m[0]->offset); | 847 printf("vnode_pager_putpages: residual I/O %d at %d\n", auio.uio_resid, m[0]->offset); |
983 } 984 for (i = 0; i < count; i++) { 985 m[i]->busy--; 986 if (i < ncount) { 987 rtvals[i] = VM_PAGER_OK; 988 } 989 if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) | 848 } 849 for (i = 0; i < count; i++) { 850 m[i]->busy--; 851 if (i < ncount) { 852 rtvals[i] = VM_PAGER_OK; 853 } 854 if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) |
990 wakeup((caddr_t) m[i]); | 855 wakeup(m[i]); |
991 } 992 return rtvals[0]; 993} 994 995struct vnode * | 856 } 857 return rtvals[0]; 858} 859 860struct vnode * |
996vnode_pager_lock(vm_object_t object) { 997 998 for(;object;object=object->shadow) { 999 vn_pager_t vnp; 1000 if( !object->pager || (object->pager->pg_type != PG_VNODE)) | 861vnode_pager_lock(object) 862 vm_object_t object; 863{ 864 for (; object != NULL; object = object->backing_object) { 865 if (object->type != OBJT_VNODE) |
1001 continue; 1002 | 866 continue; 867 |
1003 vnp = (vn_pager_t) object->pager->pg_data; 1004 VOP_LOCK(vnp->vnp_vp); 1005 return vnp->vnp_vp; | 868 VOP_LOCK(object->handle); 869 return object->handle; |
1006 } | 870 } |
1007 return (struct vnode *)NULL; | 871 return NULL; |
1008} | 872} |
1009 1010void 1011vnode_pager_unlock(struct vnode *vp) { 1012 VOP_UNLOCK(vp); 1013} 1014 | |