1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 *
| 1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 *
|
69 * $Id: vm_fault.c,v 1.37 1995/11/20 12:19:53 phk Exp $
| 69 * $Id: vm_fault.c,v 1.38 1995/12/07 12:48:10 davidg Exp $
|
70 */ 71 72/* 73 * Page fault handling module. 74 */ 75 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/proc.h> 79#include <sys/vnode.h> 80#include <sys/resource.h> 81#include <sys/signalvar.h> 82#include <sys/resourcevar.h> 83#include <sys/vmmeter.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_prot.h> 88#include <vm/lock.h> 89#include <vm/pmap.h> 90#include <vm/vm_map.h> 91#include <vm/vm_object.h> 92#include <vm/vm_page.h> 93#include <vm/vm_pageout.h> 94#include <vm/vm_kern.h> 95#include <vm/vm_pager.h> 96#include <vm/vnode_pager.h> 97#include <vm/swap_pager.h> 98#include <vm/vm_extern.h> 99 100int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *)); 101 102#define VM_FAULT_READ_AHEAD 4 103#define VM_FAULT_READ_BEHIND 3 104#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 105 106/* 107 * vm_fault: 108 * 109 * Handle a page fault occuring at the given address, 110 * requiring the given permissions, in the map specified. 111 * If successful, the page is inserted into the 112 * associated physical map. 113 * 114 * NOTE: the given address should be truncated to the 115 * proper page address. 116 * 117 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 118 * a standard error specifying why the fault is fatal is returned. 119 * 120 * 121 * The map in question must be referenced, and remains so. 122 * Caller may hold no locks. 123 */ 124int 125vm_fault(map, vaddr, fault_type, change_wiring) 126 vm_map_t map; 127 vm_offset_t vaddr; 128 vm_prot_t fault_type; 129 boolean_t change_wiring; 130{ 131 vm_object_t first_object;
| 70 */ 71 72/* 73 * Page fault handling module. 74 */ 75 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/proc.h> 79#include <sys/vnode.h> 80#include <sys/resource.h> 81#include <sys/signalvar.h> 82#include <sys/resourcevar.h> 83#include <sys/vmmeter.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_prot.h> 88#include <vm/lock.h> 89#include <vm/pmap.h> 90#include <vm/vm_map.h> 91#include <vm/vm_object.h> 92#include <vm/vm_page.h> 93#include <vm/vm_pageout.h> 94#include <vm/vm_kern.h> 95#include <vm/vm_pager.h> 96#include <vm/vnode_pager.h> 97#include <vm/swap_pager.h> 98#include <vm/vm_extern.h> 99 100int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *)); 101 102#define VM_FAULT_READ_AHEAD 4 103#define VM_FAULT_READ_BEHIND 3 104#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 105 106/* 107 * vm_fault: 108 * 109 * Handle a page fault occuring at the given address, 110 * requiring the given permissions, in the map specified. 111 * If successful, the page is inserted into the 112 * associated physical map. 113 * 114 * NOTE: the given address should be truncated to the 115 * proper page address. 116 * 117 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 118 * a standard error specifying why the fault is fatal is returned. 119 * 120 * 121 * The map in question must be referenced, and remains so. 122 * Caller may hold no locks. 123 */ 124int 125vm_fault(map, vaddr, fault_type, change_wiring) 126 vm_map_t map; 127 vm_offset_t vaddr; 128 vm_prot_t fault_type; 129 boolean_t change_wiring; 130{ 131 vm_object_t first_object;
|
132 vm_offset_t first_offset;
| 132 vm_pindex_t first_pindex;
|
133 vm_map_entry_t entry; 134 register vm_object_t object;
| 133 vm_map_entry_t entry; 134 register vm_object_t object;
|
135 register vm_offset_t offset;
| 135 register vm_pindex_t pindex;
|
136 vm_page_t m; 137 vm_page_t first_m; 138 vm_prot_t prot; 139 int result; 140 boolean_t wired; 141 boolean_t su; 142 boolean_t lookup_still_valid; 143 vm_page_t old_m; 144 vm_object_t next_object; 145 vm_page_t marray[VM_FAULT_READ]; 146 int hardfault = 0; 147 struct vnode *vp = NULL; 148 149 cnt.v_vm_faults++; /* needs lock XXX */ 150/* 151 * Recovery actions 152 */ 153#define FREE_PAGE(m) { \ 154 PAGE_WAKEUP(m); \ 155 vm_page_free(m); \ 156} 157 158#define RELEASE_PAGE(m) { \ 159 PAGE_WAKEUP(m); \ 160 if ((m->flags & PG_ACTIVE) == 0) vm_page_activate(m); \ 161} 162 163#define UNLOCK_MAP { \ 164 if (lookup_still_valid) { \ 165 vm_map_lookup_done(map, entry); \ 166 lookup_still_valid = FALSE; \ 167 } \ 168} 169 170#define UNLOCK_THINGS { \ 171 vm_object_pip_wakeup(object); \ 172 if (object != first_object) { \ 173 FREE_PAGE(first_m); \ 174 vm_object_pip_wakeup(first_object); \ 175 } \ 176 UNLOCK_MAP; \ 177 if (vp != NULL) VOP_UNLOCK(vp); \ 178} 179 180#define UNLOCK_AND_DEALLOCATE { \ 181 UNLOCK_THINGS; \ 182 vm_object_deallocate(first_object); \ 183} 184 185 186RetryFault:; 187 188 /* 189 * Find the backing store object and offset into it to begin the 190 * search. 191 */ 192 193 if ((result = vm_map_lookup(&map, vaddr, 194 fault_type, &entry, &first_object,
| 136 vm_page_t m; 137 vm_page_t first_m; 138 vm_prot_t prot; 139 int result; 140 boolean_t wired; 141 boolean_t su; 142 boolean_t lookup_still_valid; 143 vm_page_t old_m; 144 vm_object_t next_object; 145 vm_page_t marray[VM_FAULT_READ]; 146 int hardfault = 0; 147 struct vnode *vp = NULL; 148 149 cnt.v_vm_faults++; /* needs lock XXX */ 150/* 151 * Recovery actions 152 */ 153#define FREE_PAGE(m) { \ 154 PAGE_WAKEUP(m); \ 155 vm_page_free(m); \ 156} 157 158#define RELEASE_PAGE(m) { \ 159 PAGE_WAKEUP(m); \ 160 if ((m->flags & PG_ACTIVE) == 0) vm_page_activate(m); \ 161} 162 163#define UNLOCK_MAP { \ 164 if (lookup_still_valid) { \ 165 vm_map_lookup_done(map, entry); \ 166 lookup_still_valid = FALSE; \ 167 } \ 168} 169 170#define UNLOCK_THINGS { \ 171 vm_object_pip_wakeup(object); \ 172 if (object != first_object) { \ 173 FREE_PAGE(first_m); \ 174 vm_object_pip_wakeup(first_object); \ 175 } \ 176 UNLOCK_MAP; \ 177 if (vp != NULL) VOP_UNLOCK(vp); \ 178} 179 180#define UNLOCK_AND_DEALLOCATE { \ 181 UNLOCK_THINGS; \ 182 vm_object_deallocate(first_object); \ 183} 184 185 186RetryFault:; 187 188 /* 189 * Find the backing store object and offset into it to begin the 190 * search. 191 */ 192 193 if ((result = vm_map_lookup(&map, vaddr, 194 fault_type, &entry, &first_object,
|
195 &first_offset, &prot, &wired, &su)) != KERN_SUCCESS) {
| 195 &first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
|
196 return (result); 197 } 198 199 vp = vnode_pager_lock(first_object); 200 201 lookup_still_valid = TRUE; 202 203 if (wired) 204 fault_type = prot; 205 206 first_m = NULL; 207 208 /* 209 * Make a reference to this object to prevent its disposal while we 210 * are messing with it. Once we have the reference, the map is free 211 * to be diddled. Since objects reference their shadows (and copies), 212 * they will stay around as well. 213 */ 214 215 first_object->ref_count++; 216 first_object->paging_in_progress++; 217 218 /* 219 * INVARIANTS (through entire routine): 220 * 221 * 1) At all times, we must either have the object lock or a busy 222 * page in some object to prevent some other process from trying to 223 * bring in the same page. 224 * 225 * Note that we cannot hold any locks during the pager access or when 226 * waiting for memory, so we use a busy page then. 227 * 228 * Note also that we aren't as concerned about more than one thead 229 * attempting to pager_data_unlock the same page at once, so we don't 230 * hold the page as busy then, but do record the highest unlock value 231 * so far. [Unlock requests may also be delivered out of order.] 232 * 233 * 2) Once we have a busy page, we must remove it from the pageout 234 * queues, so that the pageout daemon will not grab it away. 235 * 236 * 3) To prevent another process from racing us down the shadow chain 237 * and entering a new page in the top object before we do, we must 238 * keep a busy page in the top object while following the shadow 239 * chain. 240 * 241 * 4) We must increment paging_in_progress on any object for which 242 * we have a busy page, to prevent vm_object_collapse from removing 243 * the busy page without our noticing. 244 */ 245 246 /* 247 * Search for the page at object/offset. 248 */ 249 250 object = first_object;
| 196 return (result); 197 } 198 199 vp = vnode_pager_lock(first_object); 200 201 lookup_still_valid = TRUE; 202 203 if (wired) 204 fault_type = prot; 205 206 first_m = NULL; 207 208 /* 209 * Make a reference to this object to prevent its disposal while we 210 * are messing with it. Once we have the reference, the map is free 211 * to be diddled. Since objects reference their shadows (and copies), 212 * they will stay around as well. 213 */ 214 215 first_object->ref_count++; 216 first_object->paging_in_progress++; 217 218 /* 219 * INVARIANTS (through entire routine): 220 * 221 * 1) At all times, we must either have the object lock or a busy 222 * page in some object to prevent some other process from trying to 223 * bring in the same page. 224 * 225 * Note that we cannot hold any locks during the pager access or when 226 * waiting for memory, so we use a busy page then. 227 * 228 * Note also that we aren't as concerned about more than one thead 229 * attempting to pager_data_unlock the same page at once, so we don't 230 * hold the page as busy then, but do record the highest unlock value 231 * so far. [Unlock requests may also be delivered out of order.] 232 * 233 * 2) Once we have a busy page, we must remove it from the pageout 234 * queues, so that the pageout daemon will not grab it away. 235 * 236 * 3) To prevent another process from racing us down the shadow chain 237 * and entering a new page in the top object before we do, we must 238 * keep a busy page in the top object while following the shadow 239 * chain. 240 * 241 * 4) We must increment paging_in_progress on any object for which 242 * we have a busy page, to prevent vm_object_collapse from removing 243 * the busy page without our noticing. 244 */ 245 246 /* 247 * Search for the page at object/offset. 248 */ 249 250 object = first_object;
|
251 offset = first_offset;
| 251 pindex = first_pindex;
|
252 253 /* 254 * See whether this page is resident 255 */ 256 257 while (TRUE) {
| 252 253 /* 254 * See whether this page is resident 255 */ 256 257 while (TRUE) {
|
258 m = vm_page_lookup(object, offset);
| 258 m = vm_page_lookup(object, pindex);
|
259 if (m != NULL) { 260 /* 261 * If the page is being brought in, wait for it and 262 * then retry. 263 */ 264 if ((m->flags & PG_BUSY) || m->busy) { 265 int s; 266 267 UNLOCK_THINGS; 268 s = splhigh(); 269 if ((m->flags & PG_BUSY) || m->busy) { 270 m->flags |= PG_WANTED | PG_REFERENCED; 271 cnt.v_intrans++; 272 tsleep(m, PSWP, "vmpfw", 0); 273 } 274 splx(s); 275 vm_object_deallocate(first_object); 276 goto RetryFault; 277 } 278 279 /* 280 * Mark page busy for other processes, and the pagedaemon. 281 */ 282 m->flags |= PG_BUSY; 283 if ((m->flags & PG_CACHE) && 284 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) { 285 UNLOCK_AND_DEALLOCATE; 286 VM_WAIT; 287 PAGE_WAKEUP(m); 288 goto RetryFault; 289 } 290 291 if (m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 292 m->object != kernel_object && m->object != kmem_object) { 293 goto readrest; 294 } 295 break; 296 } 297 if (((object->type != OBJT_DEFAULT) && (!change_wiring || wired)) 298 || (object == first_object)) { 299
| 259 if (m != NULL) { 260 /* 261 * If the page is being brought in, wait for it and 262 * then retry. 263 */ 264 if ((m->flags & PG_BUSY) || m->busy) { 265 int s; 266 267 UNLOCK_THINGS; 268 s = splhigh(); 269 if ((m->flags & PG_BUSY) || m->busy) { 270 m->flags |= PG_WANTED | PG_REFERENCED; 271 cnt.v_intrans++; 272 tsleep(m, PSWP, "vmpfw", 0); 273 } 274 splx(s); 275 vm_object_deallocate(first_object); 276 goto RetryFault; 277 } 278 279 /* 280 * Mark page busy for other processes, and the pagedaemon. 281 */ 282 m->flags |= PG_BUSY; 283 if ((m->flags & PG_CACHE) && 284 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) { 285 UNLOCK_AND_DEALLOCATE; 286 VM_WAIT; 287 PAGE_WAKEUP(m); 288 goto RetryFault; 289 } 290 291 if (m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 292 m->object != kernel_object && m->object != kmem_object) { 293 goto readrest; 294 } 295 break; 296 } 297 if (((object->type != OBJT_DEFAULT) && (!change_wiring || wired)) 298 || (object == first_object)) { 299
|
300 if (offset >= object->size) {
| 300 if (pindex >= object->size) {
|
301 UNLOCK_AND_DEALLOCATE; 302 return (KERN_PROTECTION_FAILURE); 303 } 304 305 /* 306 * Allocate a new page for this object/offset pair. 307 */
| 301 UNLOCK_AND_DEALLOCATE; 302 return (KERN_PROTECTION_FAILURE); 303 } 304 305 /* 306 * Allocate a new page for this object/offset pair. 307 */
|
308 m = vm_page_alloc(object, offset,
| 308 m = vm_page_alloc(object, pindex,
|
309 vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO)); 310 311 if (m == NULL) { 312 UNLOCK_AND_DEALLOCATE; 313 VM_WAIT; 314 goto RetryFault; 315 } 316 } 317readrest: 318 if (object->type != OBJT_DEFAULT && (!change_wiring || wired)) { 319 int rv; 320 int faultcount; 321 int reqpage; 322 323 /* 324 * now we find out if any other pages should be paged 325 * in at this time this routine checks to see if the 326 * pages surrounding this fault reside in the same 327 * object as the page for this fault. If they do, 328 * then they are faulted in also into the object. The 329 * array "marray" returned contains an array of 330 * vm_page_t structs where one of them is the 331 * vm_page_t passed to the routine. The reqpage 332 * return value is the index into the marray for the 333 * vm_page_t passed to the routine. 334 */ 335 faultcount = vm_fault_additional_pages( 336 m, VM_FAULT_READ_BEHIND, VM_FAULT_READ_AHEAD, 337 marray, &reqpage); 338 339 /* 340 * Call the pager to retrieve the data, if any, after 341 * releasing the lock on the map. 342 */ 343 UNLOCK_MAP; 344 345 rv = faultcount ? 346 vm_pager_get_pages(object, marray, faultcount, 347 reqpage) : VM_PAGER_FAIL; 348 349 if (rv == VM_PAGER_OK) { 350 /* 351 * Found the page. Leave it busy while we play 352 * with it. 353 */ 354 355 /* 356 * Relookup in case pager changed page. Pager 357 * is responsible for disposition of old page 358 * if moved. 359 */
| 309 vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO)); 310 311 if (m == NULL) { 312 UNLOCK_AND_DEALLOCATE; 313 VM_WAIT; 314 goto RetryFault; 315 } 316 } 317readrest: 318 if (object->type != OBJT_DEFAULT && (!change_wiring || wired)) { 319 int rv; 320 int faultcount; 321 int reqpage; 322 323 /* 324 * now we find out if any other pages should be paged 325 * in at this time this routine checks to see if the 326 * pages surrounding this fault reside in the same 327 * object as the page for this fault. If they do, 328 * then they are faulted in also into the object. The 329 * array "marray" returned contains an array of 330 * vm_page_t structs where one of them is the 331 * vm_page_t passed to the routine. The reqpage 332 * return value is the index into the marray for the 333 * vm_page_t passed to the routine. 334 */ 335 faultcount = vm_fault_additional_pages( 336 m, VM_FAULT_READ_BEHIND, VM_FAULT_READ_AHEAD, 337 marray, &reqpage); 338 339 /* 340 * Call the pager to retrieve the data, if any, after 341 * releasing the lock on the map. 342 */ 343 UNLOCK_MAP; 344 345 rv = faultcount ? 346 vm_pager_get_pages(object, marray, faultcount, 347 reqpage) : VM_PAGER_FAIL; 348 349 if (rv == VM_PAGER_OK) { 350 /* 351 * Found the page. Leave it busy while we play 352 * with it. 353 */ 354 355 /* 356 * Relookup in case pager changed page. Pager 357 * is responsible for disposition of old page 358 * if moved. 359 */
|
360 m = vm_page_lookup(object, offset);
| 360 m = vm_page_lookup(object, pindex);
|
361 if( !m) { 362 UNLOCK_AND_DEALLOCATE; 363 goto RetryFault; 364 } 365 366 hardfault++; 367 break; 368 } 369 /* 370 * Remove the bogus page (which does not exist at this 371 * object/offset); before doing so, we must get back 372 * our object lock to preserve our invariant. 373 * 374 * Also wake up any other process that may want to bring 375 * in this page. 376 * 377 * If this is the top-level object, we must leave the 378 * busy page to prevent another process from rushing 379 * past us, and inserting the page in that object at 380 * the same time that we are. 381 */ 382 383 if (rv == VM_PAGER_ERROR) 384 printf("vm_fault: pager input (probably hardware) error, PID %d failure\n", 385 curproc->p_pid); 386 /* 387 * Data outside the range of the pager or an I/O error 388 */ 389 /* 390 * XXX - the check for kernel_map is a kludge to work 391 * around having the machine panic on a kernel space 392 * fault w/ I/O error. 393 */ 394 if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 395 FREE_PAGE(m); 396 UNLOCK_AND_DEALLOCATE; 397 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 398 } 399 if (object != first_object) { 400 FREE_PAGE(m); 401 /* 402 * XXX - we cannot just fall out at this 403 * point, m has been freed and is invalid! 404 */ 405 } 406 } 407 /* 408 * We get here if the object has default pager (or unwiring) or the 409 * pager doesn't have the page. 410 */ 411 if (object == first_object) 412 first_m = m; 413 414 /* 415 * Move on to the next object. Lock the next object before 416 * unlocking the current one. 417 */ 418
| 361 if( !m) { 362 UNLOCK_AND_DEALLOCATE; 363 goto RetryFault; 364 } 365 366 hardfault++; 367 break; 368 } 369 /* 370 * Remove the bogus page (which does not exist at this 371 * object/offset); before doing so, we must get back 372 * our object lock to preserve our invariant. 373 * 374 * Also wake up any other process that may want to bring 375 * in this page. 376 * 377 * If this is the top-level object, we must leave the 378 * busy page to prevent another process from rushing 379 * past us, and inserting the page in that object at 380 * the same time that we are. 381 */ 382 383 if (rv == VM_PAGER_ERROR) 384 printf("vm_fault: pager input (probably hardware) error, PID %d failure\n", 385 curproc->p_pid); 386 /* 387 * Data outside the range of the pager or an I/O error 388 */ 389 /* 390 * XXX - the check for kernel_map is a kludge to work 391 * around having the machine panic on a kernel space 392 * fault w/ I/O error. 393 */ 394 if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 395 FREE_PAGE(m); 396 UNLOCK_AND_DEALLOCATE; 397 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 398 } 399 if (object != first_object) { 400 FREE_PAGE(m); 401 /* 402 * XXX - we cannot just fall out at this 403 * point, m has been freed and is invalid! 404 */ 405 } 406 } 407 /* 408 * We get here if the object has default pager (or unwiring) or the 409 * pager doesn't have the page. 410 */ 411 if (object == first_object) 412 first_m = m; 413 414 /* 415 * Move on to the next object. Lock the next object before 416 * unlocking the current one. 417 */ 418
|
419 offset += object->backing_object_offset;
| 419 pindex += OFF_TO_IDX(object->backing_object_offset);
|
420 next_object = object->backing_object; 421 if (next_object == NULL) { 422 /* 423 * If there's no object left, fill the page in the top 424 * object with zeros. 425 */ 426 if (object != first_object) { 427 vm_object_pip_wakeup(object); 428 429 object = first_object;
| 420 next_object = object->backing_object; 421 if (next_object == NULL) { 422 /* 423 * If there's no object left, fill the page in the top 424 * object with zeros. 425 */ 426 if (object != first_object) { 427 vm_object_pip_wakeup(object); 428 429 object = first_object;
|
430 offset = first_offset;
| 430 pindex = first_pindex;
|
431 m = first_m; 432 } 433 first_m = NULL; 434 435 if ((m->flags & PG_ZERO) == 0) 436 vm_page_zero_fill(m); 437 m->valid = VM_PAGE_BITS_ALL; 438 cnt.v_zfod++; 439 break; 440 } else { 441 if (object != first_object) { 442 vm_object_pip_wakeup(object); 443 } 444 object = next_object; 445 object->paging_in_progress++; 446 } 447 } 448 449 if ((m->flags & PG_BUSY) == 0) 450 panic("vm_fault: not busy after main loop"); 451 452 /* 453 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 454 * is held.] 455 */ 456 457 old_m = m; /* save page that would be copied */ 458 459 /* 460 * If the page is being written, but isn't already owned by the 461 * top-level object, we have to copy it into a new page owned by the 462 * top-level object. 463 */ 464 465 if (object != first_object) { 466 /* 467 * We only really need to copy if we want to write it. 468 */ 469 470 if (fault_type & VM_PROT_WRITE) { 471 472 /* 473 * If we try to collapse first_object at this point, 474 * we may deadlock when we try to get the lock on an 475 * intermediate object (since we have the bottom 476 * object locked). We can't unlock the bottom object, 477 * because the page we found may move (by collapse) if 478 * we do. 479 * 480 * Instead, we first copy the page. Then, when we have 481 * no more use for the bottom object, we unlock it and 482 * try to collapse. 483 * 484 * Note that we copy the page even if we didn't need 485 * to... that's the breaks. 486 */ 487 488 /* 489 * We already have an empty page in first_object - use 490 * it. 491 */ 492 493 vm_page_copy(m, first_m); 494 first_m->valid = VM_PAGE_BITS_ALL; 495 496 /* 497 * If another map is truly sharing this page with us, 498 * we have to flush all uses of the original page, 499 * since we can't distinguish those which want the 500 * original from those which need the new copy. 501 * 502 * XXX If we know that only one map has access to this 503 * page, then we could avoid the pmap_page_protect() 504 * call. 505 */ 506 507 if ((m->flags & PG_ACTIVE) == 0) 508 vm_page_activate(m); 509 vm_page_protect(m, VM_PROT_NONE); 510 511 /* 512 * We no longer need the old page or object. 513 */ 514 PAGE_WAKEUP(m); 515 vm_object_pip_wakeup(object); 516 517 /* 518 * Only use the new page below... 519 */ 520 521 cnt.v_cow_faults++; 522 m = first_m; 523 object = first_object;
| 431 m = first_m; 432 } 433 first_m = NULL; 434 435 if ((m->flags & PG_ZERO) == 0) 436 vm_page_zero_fill(m); 437 m->valid = VM_PAGE_BITS_ALL; 438 cnt.v_zfod++; 439 break; 440 } else { 441 if (object != first_object) { 442 vm_object_pip_wakeup(object); 443 } 444 object = next_object; 445 object->paging_in_progress++; 446 } 447 } 448 449 if ((m->flags & PG_BUSY) == 0) 450 panic("vm_fault: not busy after main loop"); 451 452 /* 453 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 454 * is held.] 455 */ 456 457 old_m = m; /* save page that would be copied */ 458 459 /* 460 * If the page is being written, but isn't already owned by the 461 * top-level object, we have to copy it into a new page owned by the 462 * top-level object. 463 */ 464 465 if (object != first_object) { 466 /* 467 * We only really need to copy if we want to write it. 468 */ 469 470 if (fault_type & VM_PROT_WRITE) { 471 472 /* 473 * If we try to collapse first_object at this point, 474 * we may deadlock when we try to get the lock on an 475 * intermediate object (since we have the bottom 476 * object locked). We can't unlock the bottom object, 477 * because the page we found may move (by collapse) if 478 * we do. 479 * 480 * Instead, we first copy the page. Then, when we have 481 * no more use for the bottom object, we unlock it and 482 * try to collapse. 483 * 484 * Note that we copy the page even if we didn't need 485 * to... that's the breaks. 486 */ 487 488 /* 489 * We already have an empty page in first_object - use 490 * it. 491 */ 492 493 vm_page_copy(m, first_m); 494 first_m->valid = VM_PAGE_BITS_ALL; 495 496 /* 497 * If another map is truly sharing this page with us, 498 * we have to flush all uses of the original page, 499 * since we can't distinguish those which want the 500 * original from those which need the new copy. 501 * 502 * XXX If we know that only one map has access to this 503 * page, then we could avoid the pmap_page_protect() 504 * call. 505 */ 506 507 if ((m->flags & PG_ACTIVE) == 0) 508 vm_page_activate(m); 509 vm_page_protect(m, VM_PROT_NONE); 510 511 /* 512 * We no longer need the old page or object. 513 */ 514 PAGE_WAKEUP(m); 515 vm_object_pip_wakeup(object); 516 517 /* 518 * Only use the new page below... 519 */ 520 521 cnt.v_cow_faults++; 522 m = first_m; 523 object = first_object;
|
524 offset = first_offset;
| 524 pindex = first_pindex;
|
525 526 /* 527 * Now that we've gotten the copy out of the way, 528 * let's try to collapse the top object. 529 * 530 * But we have to play ugly games with 531 * paging_in_progress to do that... 532 */ 533 vm_object_pip_wakeup(object); 534 vm_object_collapse(object); 535 object->paging_in_progress++; 536 } else { 537 prot &= ~VM_PROT_WRITE; 538 } 539 } 540 541 /* 542 * We must verify that the maps have not changed since our last 543 * lookup. 544 */ 545 546 if (!lookup_still_valid) { 547 vm_object_t retry_object;
| 525 526 /* 527 * Now that we've gotten the copy out of the way, 528 * let's try to collapse the top object. 529 * 530 * But we have to play ugly games with 531 * paging_in_progress to do that... 532 */ 533 vm_object_pip_wakeup(object); 534 vm_object_collapse(object); 535 object->paging_in_progress++; 536 } else { 537 prot &= ~VM_PROT_WRITE; 538 } 539 } 540 541 /* 542 * We must verify that the maps have not changed since our last 543 * lookup. 544 */ 545 546 if (!lookup_still_valid) { 547 vm_object_t retry_object;
|
548 vm_offset_t retry_offset;
| 548 vm_pindex_t retry_pindex;
|
549 vm_prot_t retry_prot; 550 551 /* 552 * Since map entries may be pageable, make sure we can take a 553 * page fault on them. 554 */ 555 556 /* 557 * To avoid trying to write_lock the map while another process 558 * has it read_locked (in vm_map_pageable), we do not try for 559 * write permission. If the page is still writable, we will 560 * get write permission. If it is not, or has been marked 561 * needs_copy, we enter the mapping without write permission, 562 * and will merely take another fault. 563 */ 564 result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE,
| 549 vm_prot_t retry_prot; 550 551 /* 552 * Since map entries may be pageable, make sure we can take a 553 * page fault on them. 554 */ 555 556 /* 557 * To avoid trying to write_lock the map while another process 558 * has it read_locked (in vm_map_pageable), we do not try for 559 * write permission. If the page is still writable, we will 560 * get write permission. If it is not, or has been marked 561 * needs_copy, we enter the mapping without write permission, 562 * and will merely take another fault. 563 */ 564 result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE,
|
565 &entry, &retry_object, &retry_offset, &retry_prot, &wired, &su);
| 565 &entry, &retry_object, &retry_pindex, &retry_prot, &wired, &su);
|
566 567 /* 568 * If we don't need the page any longer, put it on the active 569 * list (the easiest thing to do here). If no one needs it, 570 * pageout will grab it eventually. 571 */ 572 573 if (result != KERN_SUCCESS) { 574 RELEASE_PAGE(m); 575 UNLOCK_AND_DEALLOCATE; 576 return (result); 577 } 578 lookup_still_valid = TRUE; 579 580 if ((retry_object != first_object) ||
| 566 567 /* 568 * If we don't need the page any longer, put it on the active 569 * list (the easiest thing to do here). If no one needs it, 570 * pageout will grab it eventually. 571 */ 572 573 if (result != KERN_SUCCESS) { 574 RELEASE_PAGE(m); 575 UNLOCK_AND_DEALLOCATE; 576 return (result); 577 } 578 lookup_still_valid = TRUE; 579 580 if ((retry_object != first_object) ||
|
581 (retry_offset != first_offset)) {
| 581 (retry_pindex != first_pindex)) {
|
582 RELEASE_PAGE(m); 583 UNLOCK_AND_DEALLOCATE; 584 goto RetryFault; 585 } 586 /* 587 * Check whether the protection has changed or the object has 588 * been copied while we left the map unlocked. Changing from 589 * read to write permission is OK - we leave the page 590 * write-protected, and catch the write fault. Changing from 591 * write to read permission means that we can't mark the page 592 * write-enabled after all. 593 */ 594 prot &= retry_prot; 595 } 596 /* 597 * (the various bits we're fiddling with here are locked by the 598 * object's lock) 599 */ 600 601 /* 602 * It's critically important that a wired-down page be faulted only 603 * once in each map for which it is wired. 604 */ 605 606 /* 607 * Put this page into the physical map. We had to do the unlock above 608 * because pmap_enter may cause other faults. We don't put the page 609 * back on the active queue until later so that the page-out daemon 610 * won't find us (yet). 611 */ 612 613 if (prot & VM_PROT_WRITE) { 614 m->flags |= PG_WRITEABLE; 615 m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY; 616 /* 617 * If the fault is a write, we know that this page is being 618 * written NOW. This will save on the pmap_is_modified() calls 619 * later. 620 */ 621 if (fault_type & VM_PROT_WRITE) { 622 m->dirty = VM_PAGE_BITS_ALL; 623 } 624 } 625 626 m->flags |= PG_MAPPED|PG_REFERENCED; 627 m->flags &= ~PG_ZERO; 628 629 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); 630#if 0 631 if (change_wiring == 0 && wired == 0) 632 pmap_prefault(map->pmap, vaddr, entry, first_object); 633#endif 634 635 /* 636 * If the page is not wired down, then put it where the pageout daemon 637 * can find it. 638 */ 639 if (change_wiring) { 640 if (wired) 641 vm_page_wire(m); 642 else 643 vm_page_unwire(m); 644 } else { 645 if ((m->flags & PG_ACTIVE) == 0) 646 vm_page_activate(m); 647 } 648 649 if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 650 if (hardfault) { 651 curproc->p_stats->p_ru.ru_majflt++; 652 } else { 653 curproc->p_stats->p_ru.ru_minflt++; 654 } 655 } 656 657 if ((m->flags & PG_BUSY) == 0)
| 582 RELEASE_PAGE(m); 583 UNLOCK_AND_DEALLOCATE; 584 goto RetryFault; 585 } 586 /* 587 * Check whether the protection has changed or the object has 588 * been copied while we left the map unlocked. Changing from 589 * read to write permission is OK - we leave the page 590 * write-protected, and catch the write fault. Changing from 591 * write to read permission means that we can't mark the page 592 * write-enabled after all. 593 */ 594 prot &= retry_prot; 595 } 596 /* 597 * (the various bits we're fiddling with here are locked by the 598 * object's lock) 599 */ 600 601 /* 602 * It's critically important that a wired-down page be faulted only 603 * once in each map for which it is wired. 604 */ 605 606 /* 607 * Put this page into the physical map. We had to do the unlock above 608 * because pmap_enter may cause other faults. We don't put the page 609 * back on the active queue until later so that the page-out daemon 610 * won't find us (yet). 611 */ 612 613 if (prot & VM_PROT_WRITE) { 614 m->flags |= PG_WRITEABLE; 615 m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY; 616 /* 617 * If the fault is a write, we know that this page is being 618 * written NOW. This will save on the pmap_is_modified() calls 619 * later. 620 */ 621 if (fault_type & VM_PROT_WRITE) { 622 m->dirty = VM_PAGE_BITS_ALL; 623 } 624 } 625 626 m->flags |= PG_MAPPED|PG_REFERENCED; 627 m->flags &= ~PG_ZERO; 628 629 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); 630#if 0 631 if (change_wiring == 0 && wired == 0) 632 pmap_prefault(map->pmap, vaddr, entry, first_object); 633#endif 634 635 /* 636 * If the page is not wired down, then put it where the pageout daemon 637 * can find it. 638 */ 639 if (change_wiring) { 640 if (wired) 641 vm_page_wire(m); 642 else 643 vm_page_unwire(m); 644 } else { 645 if ((m->flags & PG_ACTIVE) == 0) 646 vm_page_activate(m); 647 } 648 649 if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 650 if (hardfault) { 651 curproc->p_stats->p_ru.ru_majflt++; 652 } else { 653 curproc->p_stats->p_ru.ru_minflt++; 654 } 655 } 656 657 if ((m->flags & PG_BUSY) == 0)
|
658 printf("page not busy: %d\n", m->offset);
| 658 printf("page not busy: %d\n", m->pindex);
|
659 /* 660 * Unlock everything, and return 661 */ 662 663 PAGE_WAKEUP(m); 664 UNLOCK_AND_DEALLOCATE; 665 666 return (KERN_SUCCESS); 667 668} 669 670/* 671 * vm_fault_wire: 672 * 673 * Wire down a range of virtual addresses in a map. 674 */ 675int 676vm_fault_wire(map, start, end) 677 vm_map_t map; 678 vm_offset_t start, end; 679{ 680 681 register vm_offset_t va; 682 register pmap_t pmap; 683 int rv; 684 685 pmap = vm_map_pmap(map); 686 687 /* 688 * Inform the physical mapping system that the range of addresses may 689 * not fault, so that page tables and such can be locked down as well. 690 */ 691 692 pmap_pageable(pmap, start, end, FALSE); 693 694 /* 695 * We simulate a fault to get the page and enter it in the physical 696 * map. 697 */ 698 699 for (va = start; va < end; va += PAGE_SIZE) { 700 701 while( curproc != pageproc && 702 (cnt.v_free_count <= cnt.v_pageout_free_min)) 703 VM_WAIT; 704 705 rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE); 706 if (rv) { 707 if (va != start) 708 vm_fault_unwire(map, start, va); 709 return (rv); 710 } 711 } 712 return (KERN_SUCCESS); 713} 714 715 716/* 717 * vm_fault_unwire: 718 * 719 * Unwire a range of virtual addresses in a map. 720 */ 721void 722vm_fault_unwire(map, start, end) 723 vm_map_t map; 724 vm_offset_t start, end; 725{ 726 727 register vm_offset_t va, pa; 728 register pmap_t pmap; 729 730 pmap = vm_map_pmap(map); 731 732 /* 733 * Since the pages are wired down, we must be able to get their 734 * mappings from the physical map system. 735 */ 736 737 for (va = start; va < end; va += PAGE_SIZE) { 738 pa = pmap_extract(pmap, va); 739 if (pa == (vm_offset_t) 0) { 740 panic("unwire: page not in pmap"); 741 } 742 pmap_change_wiring(pmap, va, FALSE); 743 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 744 } 745 746 /* 747 * Inform the physical mapping system that the range of addresses may 748 * fault, so that page tables and such may be unwired themselves. 749 */ 750 751 pmap_pageable(pmap, start, end, TRUE); 752 753} 754 755/* 756 * Routine: 757 * vm_fault_copy_entry 758 * Function: 759 * Copy all of the pages from a wired-down map entry to another. 760 * 761 * In/out conditions: 762 * The source and destination maps must be locked for write. 763 * The source map entry must be wired down (or be a sharing map 764 * entry corresponding to a main map entry that is wired down). 765 */ 766 767void 768vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 769 vm_map_t dst_map; 770 vm_map_t src_map; 771 vm_map_entry_t dst_entry; 772 vm_map_entry_t src_entry; 773{ 774 vm_object_t dst_object; 775 vm_object_t src_object;
| 659 /* 660 * Unlock everything, and return 661 */ 662 663 PAGE_WAKEUP(m); 664 UNLOCK_AND_DEALLOCATE; 665 666 return (KERN_SUCCESS); 667 668} 669 670/* 671 * vm_fault_wire: 672 * 673 * Wire down a range of virtual addresses in a map. 674 */ 675int 676vm_fault_wire(map, start, end) 677 vm_map_t map; 678 vm_offset_t start, end; 679{ 680 681 register vm_offset_t va; 682 register pmap_t pmap; 683 int rv; 684 685 pmap = vm_map_pmap(map); 686 687 /* 688 * Inform the physical mapping system that the range of addresses may 689 * not fault, so that page tables and such can be locked down as well. 690 */ 691 692 pmap_pageable(pmap, start, end, FALSE); 693 694 /* 695 * We simulate a fault to get the page and enter it in the physical 696 * map. 697 */ 698 699 for (va = start; va < end; va += PAGE_SIZE) { 700 701 while( curproc != pageproc && 702 (cnt.v_free_count <= cnt.v_pageout_free_min)) 703 VM_WAIT; 704 705 rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE); 706 if (rv) { 707 if (va != start) 708 vm_fault_unwire(map, start, va); 709 return (rv); 710 } 711 } 712 return (KERN_SUCCESS); 713} 714 715 716/* 717 * vm_fault_unwire: 718 * 719 * Unwire a range of virtual addresses in a map. 720 */ 721void 722vm_fault_unwire(map, start, end) 723 vm_map_t map; 724 vm_offset_t start, end; 725{ 726 727 register vm_offset_t va, pa; 728 register pmap_t pmap; 729 730 pmap = vm_map_pmap(map); 731 732 /* 733 * Since the pages are wired down, we must be able to get their 734 * mappings from the physical map system. 735 */ 736 737 for (va = start; va < end; va += PAGE_SIZE) { 738 pa = pmap_extract(pmap, va); 739 if (pa == (vm_offset_t) 0) { 740 panic("unwire: page not in pmap"); 741 } 742 pmap_change_wiring(pmap, va, FALSE); 743 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 744 } 745 746 /* 747 * Inform the physical mapping system that the range of addresses may 748 * fault, so that page tables and such may be unwired themselves. 749 */ 750 751 pmap_pageable(pmap, start, end, TRUE); 752 753} 754 755/* 756 * Routine: 757 * vm_fault_copy_entry 758 * Function: 759 * Copy all of the pages from a wired-down map entry to another. 760 * 761 * In/out conditions: 762 * The source and destination maps must be locked for write. 763 * The source map entry must be wired down (or be a sharing map 764 * entry corresponding to a main map entry that is wired down). 765 */ 766 767void 768vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 769 vm_map_t dst_map; 770 vm_map_t src_map; 771 vm_map_entry_t dst_entry; 772 vm_map_entry_t src_entry; 773{ 774 vm_object_t dst_object; 775 vm_object_t src_object;
|
776 vm_offset_t dst_offset; 777 vm_offset_t src_offset;
| 776 vm_ooffset_t dst_offset; 777 vm_ooffset_t src_offset;
|
778 vm_prot_t prot; 779 vm_offset_t vaddr; 780 vm_page_t dst_m; 781 vm_page_t src_m; 782 783#ifdef lint 784 src_map++; 785#endif /* lint */ 786 787 src_object = src_entry->object.vm_object; 788 src_offset = src_entry->offset; 789 790 /* 791 * Create the top-level object for the destination entry. (Doesn't 792 * actually shadow anything - we copy the pages directly.) 793 */ 794 dst_object = vm_object_allocate(OBJT_DEFAULT,
| 778 vm_prot_t prot; 779 vm_offset_t vaddr; 780 vm_page_t dst_m; 781 vm_page_t src_m; 782 783#ifdef lint 784 src_map++; 785#endif /* lint */ 786 787 src_object = src_entry->object.vm_object; 788 src_offset = src_entry->offset; 789 790 /* 791 * Create the top-level object for the destination entry. (Doesn't 792 * actually shadow anything - we copy the pages directly.) 793 */ 794 dst_object = vm_object_allocate(OBJT_DEFAULT,
|
795 (vm_size_t) (dst_entry->end - dst_entry->start));
| 795 (vm_size_t) OFF_TO_IDX(dst_entry->end - dst_entry->start));
|
796 797 dst_entry->object.vm_object = dst_object; 798 dst_entry->offset = 0; 799 800 prot = dst_entry->max_protection; 801 802 /* 803 * Loop through all of the pages in the entry's range, copying each 804 * one from the source object (it should be there) to the destination 805 * object. 806 */ 807 for (vaddr = dst_entry->start, dst_offset = 0; 808 vaddr < dst_entry->end; 809 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 810 811 /* 812 * Allocate a page in the destination object 813 */ 814 do {
| 796 797 dst_entry->object.vm_object = dst_object; 798 dst_entry->offset = 0; 799 800 prot = dst_entry->max_protection; 801 802 /* 803 * Loop through all of the pages in the entry's range, copying each 804 * one from the source object (it should be there) to the destination 805 * object. 806 */ 807 for (vaddr = dst_entry->start, dst_offset = 0; 808 vaddr < dst_entry->end; 809 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 810 811 /* 812 * Allocate a page in the destination object 813 */ 814 do {
|
815 dst_m = vm_page_alloc(dst_object, dst_offset, VM_ALLOC_NORMAL);
| 815 dst_m = vm_page_alloc(dst_object, 816 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
|
816 if (dst_m == NULL) { 817 VM_WAIT; 818 } 819 } while (dst_m == NULL); 820 821 /* 822 * Find the page in the source object, and copy it in. 823 * (Because the source is wired down, the page will be in 824 * memory.) 825 */
| 817 if (dst_m == NULL) { 818 VM_WAIT; 819 } 820 } while (dst_m == NULL); 821 822 /* 823 * Find the page in the source object, and copy it in. 824 * (Because the source is wired down, the page will be in 825 * memory.) 826 */
|
826 src_m = vm_page_lookup(src_object, dst_offset + src_offset);
| 827 src_m = vm_page_lookup(src_object, 828 OFF_TO_IDX(dst_offset + src_offset));
|
827 if (src_m == NULL) 828 panic("vm_fault_copy_wired: page missing"); 829 830 vm_page_copy(src_m, dst_m); 831 832 /* 833 * Enter it in the pmap... 834 */ 835 836 dst_m->flags |= PG_WRITEABLE|PG_MAPPED; 837 dst_m->flags &= ~PG_ZERO; 838 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 839 prot, FALSE); 840 841 /* 842 * Mark it no longer busy, and put it on the active list. 843 */ 844 vm_page_activate(dst_m); 845 PAGE_WAKEUP(dst_m); 846 } 847} 848 849 850/* 851 * This routine checks around the requested page for other pages that 852 * might be able to be faulted in. This routine brackets the viable 853 * pages for the pages to be paged in. 854 * 855 * Inputs: 856 * m, rbehind, rahead 857 * 858 * Outputs: 859 * marray (array of vm_page_t), reqpage (index of requested page) 860 * 861 * Return value: 862 * number of pages in marray 863 */ 864int 865vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 866 vm_page_t m; 867 int rbehind; 868 int rahead; 869 vm_page_t *marray; 870 int *reqpage; 871{ 872 int i; 873 vm_object_t object;
| 829 if (src_m == NULL) 830 panic("vm_fault_copy_wired: page missing"); 831 832 vm_page_copy(src_m, dst_m); 833 834 /* 835 * Enter it in the pmap... 836 */ 837 838 dst_m->flags |= PG_WRITEABLE|PG_MAPPED; 839 dst_m->flags &= ~PG_ZERO; 840 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 841 prot, FALSE); 842 843 /* 844 * Mark it no longer busy, and put it on the active list. 845 */ 846 vm_page_activate(dst_m); 847 PAGE_WAKEUP(dst_m); 848 } 849} 850 851 852/* 853 * This routine checks around the requested page for other pages that 854 * might be able to be faulted in. This routine brackets the viable 855 * pages for the pages to be paged in. 856 * 857 * Inputs: 858 * m, rbehind, rahead 859 * 860 * Outputs: 861 * marray (array of vm_page_t), reqpage (index of requested page) 862 * 863 * Return value: 864 * number of pages in marray 865 */ 866int 867vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 868 vm_page_t m; 869 int rbehind; 870 int rahead; 871 vm_page_t *marray; 872 int *reqpage; 873{ 874 int i; 875 vm_object_t object;
|
874 vm_offset_t offset, startoffset, endoffset, toffset, size;
| 876 vm_pindex_t pindex, startpindex, endpindex, tpindex; 877 vm_offset_t size;
|
875 vm_page_t rtm; 876 int treqpage; 877 int cbehind, cahead; 878 879 object = m->object;
| 878 vm_page_t rtm; 879 int treqpage; 880 int cbehind, cahead; 881 882 object = m->object;
|
880 offset = m->offset;
| 883 pindex = m->pindex;
|
881 882 /* 883 * if the requested page is not available, then give up now 884 */ 885 886 if (!vm_pager_has_page(object,
| 884 885 /* 886 * if the requested page is not available, then give up now 887 */ 888 889 if (!vm_pager_has_page(object,
|
887 object->paging_offset + offset, &cbehind, &cahead))
| 890 OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead))
|
888 return 0; 889 890 if ((cbehind == 0) && (cahead == 0)) { 891 *reqpage = 0; 892 marray[0] = m; 893 return 1; 894 } 895 896 if (rahead > cahead) { 897 rahead = cahead; 898 } 899 900 if (rbehind > cbehind) { 901 rbehind = cbehind; 902 } 903 904 /* 905 * try to do any readahead that we might have free pages for. 906 */ 907 if ((rahead + rbehind) > 908 ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 909 pagedaemon_wakeup(); 910 *reqpage = 0; 911 marray[0] = m; 912 return 1; 913 } 914 915 /* 916 * scan backward for the read behind pages -- in memory or on disk not 917 * in same object 918 */
| 891 return 0; 892 893 if ((cbehind == 0) && (cahead == 0)) { 894 *reqpage = 0; 895 marray[0] = m; 896 return 1; 897 } 898 899 if (rahead > cahead) { 900 rahead = cahead; 901 } 902 903 if (rbehind > cbehind) { 904 rbehind = cbehind; 905 } 906 907 /* 908 * try to do any readahead that we might have free pages for. 909 */ 910 if ((rahead + rbehind) > 911 ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 912 pagedaemon_wakeup(); 913 *reqpage = 0; 914 marray[0] = m; 915 return 1; 916 } 917 918 /* 919 * scan backward for the read behind pages -- in memory or on disk not 920 * in same object 921 */
|
919 toffset = offset - PAGE_SIZE; 920 if (toffset < offset) { 921 if (rbehind * PAGE_SIZE > offset) 922 rbehind = offset / PAGE_SIZE; 923 startoffset = offset - rbehind * PAGE_SIZE; 924 while (toffset >= startoffset) { 925 if (vm_page_lookup( object, toffset)) { 926 startoffset = toffset + PAGE_SIZE;
| 922 tpindex = pindex - 1; 923 if (tpindex < pindex) { 924 if (rbehind > pindex) 925 rbehind = pindex; 926 startpindex = pindex - rbehind; 927 while (tpindex >= startpindex) { 928 if (vm_page_lookup( object, tpindex)) { 929 startpindex = tpindex + 1;
|
927 break; 928 }
| 930 break; 931 }
|
929 if (toffset == 0)
| 932 if (tpindex == 0)
|
930 break;
| 933 break;
|
931 toffset -= PAGE_SIZE;
| 934 tpindex -= 1;
|
932 } 933 } else {
| 935 } 936 } else {
|
934 startoffset = offset;
| 937 startpindex = pindex;
|
935 } 936 937 /* 938 * scan forward for the read ahead pages -- in memory or on disk not 939 * in same object 940 */
| 938 } 939 940 /* 941 * scan forward for the read ahead pages -- in memory or on disk not 942 * in same object 943 */
|
941 toffset = offset + PAGE_SIZE; 942 endoffset = offset + (rahead + 1) * PAGE_SIZE; 943 if (endoffset > object->size) 944 endoffset = object->size; 945 while (toffset < endoffset) { 946 if ( vm_page_lookup(object, toffset)) {
| 944 tpindex = pindex + 1; 945 endpindex = pindex + (rahead + 1); 946 if (endpindex > object->size) 947 endpindex = object->size; 948 while (tpindex < endpindex) { 949 if ( vm_page_lookup(object, tpindex)) {
|
947 break; 948 }
| 950 break; 951 }
|
949 toffset += PAGE_SIZE;
| 952 tpindex += 1;
|
950 }
| 953 }
|
951 endoffset = toffset;
| 954 endpindex = tpindex;
|
952 953 /* calculate number of bytes of pages */
| 955 956 /* calculate number of bytes of pages */
|
954 size = (endoffset - startoffset) / PAGE_SIZE;
| 957 size = endpindex - startpindex;
|
955 956 /* calculate the page offset of the required page */
| 958 959 /* calculate the page offset of the required page */
|
957 treqpage = (offset - startoffset) / PAGE_SIZE;
| 960 treqpage = pindex - startpindex;
|
958 959 /* see if we have space (again) */ 960 if ((cnt.v_free_count + cnt.v_cache_count) > 961 (cnt.v_free_reserved + size)) { 962 /* 963 * get our pages and don't block for them 964 */ 965 for (i = 0; i < size; i++) { 966 if (i != treqpage) { 967 rtm = vm_page_alloc(object,
| 961 962 /* see if we have space (again) */ 963 if ((cnt.v_free_count + cnt.v_cache_count) > 964 (cnt.v_free_reserved + size)) { 965 /* 966 * get our pages and don't block for them 967 */ 968 for (i = 0; i < size; i++) { 969 if (i != treqpage) { 970 rtm = vm_page_alloc(object,
|
968 startoffset + i * PAGE_SIZE,
| 971 startpindex + i,
|
969 VM_ALLOC_NORMAL); 970 if (rtm == NULL) { 971 if (i < treqpage) { 972 int j; 973 for (j = 0; j < i; j++) { 974 FREE_PAGE(marray[j]); 975 } 976 *reqpage = 0; 977 marray[0] = m; 978 return 1; 979 } else { 980 size = i; 981 *reqpage = treqpage; 982 return size; 983 } 984 } 985 marray[i] = rtm; 986 } else { 987 marray[i] = m; 988 } 989 } 990 991 *reqpage = treqpage; 992 return size; 993 } 994 *reqpage = 0; 995 marray[0] = m; 996 return 1; 997}
| 972 VM_ALLOC_NORMAL); 973 if (rtm == NULL) { 974 if (i < treqpage) { 975 int j; 976 for (j = 0; j < i; j++) { 977 FREE_PAGE(marray[j]); 978 } 979 *reqpage = 0; 980 marray[0] = m; 981 return 1; 982 } else { 983 size = i; 984 *reqpage = treqpage; 985 return size; 986 } 987 } 988 marray[i] = rtm; 989 } else { 990 marray[i] = m; 991 } 992 } 993 994 *reqpage = treqpage; 995 return size; 996 } 997 *reqpage = 0; 998 marray[0] = m; 999 return 1; 1000}
|