Deleted Added
full compact
vm_fault.c (90935) vm_fault.c (92029)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 52 unchanged lines hidden (view full) ---

61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 52 unchanged lines hidden (view full) ---

61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 *
69 * $FreeBSD: head/sys/vm/vm_fault.c 90935 2002-02-19 18:34:02Z silby $
69 * $FreeBSD: head/sys/vm/vm_fault.c 92029 2002-03-10 21:52:48Z eivind $
70 */
71
72/*
73 * Page fault handling module.
74 */
70 */
71
72/*
73 * Page fault handling module.
74 */
75
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/lock.h>
80#include <sys/mutex.h>
81#include <sys/proc.h>
82#include <sys/resourcevar.h>
83#include <sys/sysctl.h>

--- 73 unchanged lines hidden (view full) ---

157#define unlock_and_deallocate(fs) _unlock_things(fs, 1)
158
159/*
160 * TRYPAGER - used by vm_fault to calculate whether the pager for the
161 * current object *might* contain the page.
162 *
163 * default objects are zero-fill, there is no real pager.
164 */
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/kernel.h>
78#include <sys/lock.h>
79#include <sys/mutex.h>
80#include <sys/proc.h>
81#include <sys/resourcevar.h>
82#include <sys/sysctl.h>

--- 73 unchanged lines hidden (view full) ---

156#define unlock_and_deallocate(fs) _unlock_things(fs, 1)
157
158/*
159 * TRYPAGER - used by vm_fault to calculate whether the pager for the
160 * current object *might* contain the page.
161 *
162 * default objects are zero-fill, there is no real pager.
163 */
165
166#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
167 (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
168
169/*
170 * vm_fault:
171 *
172 * Handle a page fault occurring at the given address,
173 * requiring the given permissions, in the map specified.

--- 115 unchanged lines hidden (view full) ---

289 if (wired)
290 fault_type = prot;
291
292 fs.first_m = NULL;
293
294 /*
295 * Search for the page at object/offset.
296 */
164#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
165 (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
166
167/*
168 * vm_fault:
169 *
170 * Handle a page fault occurring at the given address,
171 * requiring the given permissions, in the map specified.

--- 115 unchanged lines hidden (view full) ---

287 if (wired)
288 fault_type = prot;
289
290 fs.first_m = NULL;
291
292 /*
293 * Search for the page at object/offset.
294 */
297
298 fs.object = fs.first_object;
299 fs.pindex = fs.first_pindex;
295 fs.object = fs.first_object;
296 fs.pindex = fs.first_pindex;
300
301 while (TRUE) {
302 /*
303 * If the object is dead, we stop here
304 */
297 while (TRUE) {
298 /*
299 * If the object is dead, we stop here
300 */
305
306 if (fs.object->flags & OBJ_DEAD) {
307 unlock_and_deallocate(&fs);
308 return (KERN_PROTECTION_FAILURE);
309 }
310
311 /*
312 * See if page is resident
313 */
301 if (fs.object->flags & OBJ_DEAD) {
302 unlock_and_deallocate(&fs);
303 return (KERN_PROTECTION_FAILURE);
304 }
305
306 /*
307 * See if page is resident
308 */
314
315 fs.m = vm_page_lookup(fs.object, fs.pindex);
316 if (fs.m != NULL) {
317 int queue, s;
318 /*
319 * Wait/Retry if the page is busy. We have to do this
320 * if the page is busy via either PG_BUSY or
321 * vm_page_t->busy because the vm_pager may be using
322 * vm_page_t->busy for pageouts ( and even pageins if

--- 10 unchanged lines hidden (view full) ---

333 */
334 if ((fs.m->flags & PG_BUSY) || fs.m->busy) {
335 unlock_things(&fs);
336 (void)vm_page_sleep_busy(fs.m, TRUE, "vmpfw");
337 cnt.v_intrans++;
338 vm_object_deallocate(fs.first_object);
339 goto RetryFault;
340 }
309 fs.m = vm_page_lookup(fs.object, fs.pindex);
310 if (fs.m != NULL) {
311 int queue, s;
312 /*
313 * Wait/Retry if the page is busy. We have to do this
314 * if the page is busy via either PG_BUSY or
315 * vm_page_t->busy because the vm_pager may be using
316 * vm_page_t->busy for pageouts ( and even pageins if

--- 10 unchanged lines hidden (view full) ---

327 */
328 if ((fs.m->flags & PG_BUSY) || fs.m->busy) {
329 unlock_things(&fs);
330 (void)vm_page_sleep_busy(fs.m, TRUE, "vmpfw");
331 cnt.v_intrans++;
332 vm_object_deallocate(fs.first_object);
333 goto RetryFault;
334 }
341
342 queue = fs.m->queue;
335 queue = fs.m->queue;
336
343 s = splvm();
344 vm_pageq_remove_nowakeup(fs.m);
345 splx(s);
346
347 if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) {
348 vm_page_activate(fs.m);
349 unlock_and_deallocate(&fs);
350 VM_WAITPFAULT;
351 goto RetryFault;
352 }
353
354 /*
355 * Mark page busy for other processes, and the
356 * pagedaemon. If it still isn't completely valid
357 * (readable), jump to readrest, else break-out ( we
358 * found the page ).
359 */
337 s = splvm();
338 vm_pageq_remove_nowakeup(fs.m);
339 splx(s);
340
341 if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) {
342 vm_page_activate(fs.m);
343 unlock_and_deallocate(&fs);
344 VM_WAITPFAULT;
345 goto RetryFault;
346 }
347
348 /*
349 * Mark page busy for other processes, and the
350 * pagedaemon. If it still isn't completely valid
351 * (readable), jump to readrest, else break-out ( we
352 * found the page ).
353 */
360
361 vm_page_busy(fs.m);
362 if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
363 fs.m->object != kernel_object && fs.m->object != kmem_object) {
364 goto readrest;
365 }
366
367 break;
368 }
369
370 /*
371 * Page is not resident, If this is the search termination
372 * or the pager might contain the page, allocate a new page.
373 */
354 vm_page_busy(fs.m);
355 if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
356 fs.m->object != kernel_object && fs.m->object != kmem_object) {
357 goto readrest;
358 }
359
360 break;
361 }
362
363 /*
364 * Page is not resident, If this is the search termination
365 * or the pager might contain the page, allocate a new page.
366 */
374
375 if (TRYPAGER || fs.object == fs.first_object) {
376 if (fs.pindex >= fs.object->size) {
377 unlock_and_deallocate(&fs);
378 return (KERN_PROTECTION_FAILURE);
379 }
380
381 /*
382 * Allocate a new page for this object/offset pair.

--- 15 unchanged lines hidden (view full) ---

398 * We have found a valid page or we have allocated a new page.
399 * The page thus may not be valid or may not be entirely
400 * valid.
401 *
402 * Attempt to fault-in the page if there is a chance that the
403 * pager has it, and potentially fault in additional pages
404 * at the same time.
405 */
367 if (TRYPAGER || fs.object == fs.first_object) {
368 if (fs.pindex >= fs.object->size) {
369 unlock_and_deallocate(&fs);
370 return (KERN_PROTECTION_FAILURE);
371 }
372
373 /*
374 * Allocate a new page for this object/offset pair.

--- 15 unchanged lines hidden (view full) ---

390 * We have found a valid page or we have allocated a new page.
391 * The page thus may not be valid or may not be entirely
392 * valid.
393 *
394 * Attempt to fault-in the page if there is a chance that the
395 * pager has it, and potentially fault in additional pages
396 * at the same time.
397 */
406
407 if (TRYPAGER) {
408 int rv;
409 int reqpage;
410 int ahead, behind;
411 u_char behavior = vm_map_entry_behavior(fs.entry);
412
413 if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
414 ahead = 0;

--- 21 unchanged lines hidden (view full) ---

436 else
437 firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
438
439 /*
440 * note: partially valid pages cannot be
441 * included in the lookahead - NFS piecemeal
442 * writes will barf on it badly.
443 */
398 if (TRYPAGER) {
399 int rv;
400 int reqpage;
401 int ahead, behind;
402 u_char behavior = vm_map_entry_behavior(fs.entry);
403
404 if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
405 ahead = 0;

--- 21 unchanged lines hidden (view full) ---

427 else
428 firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
429
430 /*
431 * note: partially valid pages cannot be
432 * included in the lookahead - NFS piecemeal
433 * writes will barf on it badly.
434 */
444
445 for(tmppindex = fs.first_pindex - 1;
435 for (tmppindex = fs.first_pindex - 1;
446 tmppindex >= firstpindex;
447 --tmppindex) {
448 vm_page_t mt;
436 tmppindex >= firstpindex;
437 --tmppindex) {
438 vm_page_t mt;
449 mt = vm_page_lookup( fs.first_object, tmppindex);
439
440 mt = vm_page_lookup(fs.first_object, tmppindex);
450 if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
451 break;
452 if (mt->busy ||
453 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
454 mt->hold_count ||
455 mt->wire_count)
456 continue;
457 if (mt->dirty == 0)

--- 51 unchanged lines hidden (view full) ---

509 */
510
511 /*
512 * Relookup in case pager changed page. Pager
513 * is responsible for disposition of old page
514 * if moved.
515 */
516 fs.m = vm_page_lookup(fs.object, fs.pindex);
441 if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
442 break;
443 if (mt->busy ||
444 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
445 mt->hold_count ||
446 mt->wire_count)
447 continue;
448 if (mt->dirty == 0)

--- 51 unchanged lines hidden (view full) ---

500 */
501
502 /*
503 * Relookup in case pager changed page. Pager
504 * is responsible for disposition of old page
505 * if moved.
506 */
507 fs.m = vm_page_lookup(fs.object, fs.pindex);
517 if(!fs.m) {
508 if (!fs.m) {
518 unlock_and_deallocate(&fs);
519 goto RetryFault;
520 }
521
522 hardfault++;
523 break; /* break to PAGE HAS BEEN FOUND */
524 }
525 /*

--- 4 unchanged lines hidden (view full) ---

530 * Also wake up any other process that may want to bring
531 * in this page.
532 *
533 * If this is the top-level object, we must leave the
534 * busy page to prevent another process from rushing
535 * past us, and inserting the page in that object at
536 * the same time that we are.
537 */
509 unlock_and_deallocate(&fs);
510 goto RetryFault;
511 }
512
513 hardfault++;
514 break; /* break to PAGE HAS BEEN FOUND */
515 }
516 /*

--- 4 unchanged lines hidden (view full) ---

521 * Also wake up any other process that may want to bring
522 * in this page.
523 *
524 * If this is the top-level object, we must leave the
525 * busy page to prevent another process from rushing
526 * past us, and inserting the page in that object at
527 * the same time that we are.
528 */
538
539 if (rv == VM_PAGER_ERROR)
540 printf("vm_fault: pager read error, pid %d (%s)\n",
541 curproc->p_pid, curproc->p_comm);
542 /*
543 * Data outside the range of the pager or an I/O error
544 */
545 /*
546 * XXX - the check for kernel_map is a kludge to work

--- 23 unchanged lines hidden (view full) ---

570 */
571 if (fs.object == fs.first_object)
572 fs.first_m = fs.m;
573
574 /*
575 * Move on to the next object. Lock the next object before
576 * unlocking the current one.
577 */
529 if (rv == VM_PAGER_ERROR)
530 printf("vm_fault: pager read error, pid %d (%s)\n",
531 curproc->p_pid, curproc->p_comm);
532 /*
533 * Data outside the range of the pager or an I/O error
534 */
535 /*
536 * XXX - the check for kernel_map is a kludge to work

--- 23 unchanged lines hidden (view full) ---

560 */
561 if (fs.object == fs.first_object)
562 fs.first_m = fs.m;
563
564 /*
565 * Move on to the next object. Lock the next object before
566 * unlocking the current one.
567 */
578
579 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
580 next_object = fs.object->backing_object;
581 if (next_object == NULL) {
582 /*
583 * If there's no object left, fill the page in the top
584 * object with zeros.
585 */
586 if (fs.object != fs.first_object) {

--- 34 unchanged lines hidden (view full) ---

621 * is held.]
622 */
623
624 /*
625 * If the page is being written, but isn't already owned by the
626 * top-level object, we have to copy it into a new page owned by the
627 * top-level object.
628 */
568 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
569 next_object = fs.object->backing_object;
570 if (next_object == NULL) {
571 /*
572 * If there's no object left, fill the page in the top
573 * object with zeros.
574 */
575 if (fs.object != fs.first_object) {

--- 34 unchanged lines hidden (view full) ---

610 * is held.]
611 */
612
613 /*
614 * If the page is being written, but isn't already owned by the
615 * top-level object, we have to copy it into a new page owned by the
616 * top-level object.
617 */
629
630 if (fs.object != fs.first_object) {
631 /*
632 * We only really need to copy if we want to write it.
633 */
618 if (fs.object != fs.first_object) {
619 /*
620 * We only really need to copy if we want to write it.
621 */
634
635 if (fault_type & VM_PROT_WRITE) {
636 /*
637 * This allows pages to be virtually copied from a
638 * backing_object into the first_object, where the
639 * backing object has no other refs to it, and cannot
640 * gain any more refs. Instead of a bcopy, we just
641 * move the page from the backing object to the
642 * first object. Note that we must mark the page

--- 61 unchanged lines hidden (view full) ---

704 */
705 release_page(&fs);
706 }
707
708 /*
709 * fs.object != fs.first_object due to above
710 * conditional
711 */
622 if (fault_type & VM_PROT_WRITE) {
623 /*
624 * This allows pages to be virtually copied from a
625 * backing_object into the first_object, where the
626 * backing object has no other refs to it, and cannot
627 * gain any more refs. Instead of a bcopy, we just
628 * move the page from the backing object to the
629 * first object. Note that we must mark the page

--- 61 unchanged lines hidden (view full) ---

691 */
692 release_page(&fs);
693 }
694
695 /*
696 * fs.object != fs.first_object due to above
697 * conditional
698 */
712
713 vm_object_pip_wakeup(fs.object);
714
715 /*
716 * Only use the new page below...
717 */
699 vm_object_pip_wakeup(fs.object);
700
701 /*
702 * Only use the new page below...
703 */
718
719 cnt.v_cow_faults++;
720 fs.m = fs.first_m;
721 fs.object = fs.first_object;
722 fs.pindex = fs.first_pindex;
723
724 } else {
725 prot &= ~VM_PROT_WRITE;
726 }
727 }
728
729 /*
730 * We must verify that the maps have not changed since our last
731 * lookup.
732 */
704 cnt.v_cow_faults++;
705 fs.m = fs.first_m;
706 fs.object = fs.first_object;
707 fs.pindex = fs.first_pindex;
708
709 } else {
710 prot &= ~VM_PROT_WRITE;
711 }
712 }
713
714 /*
715 * We must verify that the maps have not changed since our last
716 * lookup.
717 */
733
734 if (!fs.lookup_still_valid &&
735 (fs.map->timestamp != map_generation)) {
736 vm_object_t retry_object;
737 vm_pindex_t retry_pindex;
738 vm_prot_t retry_prot;
739
740 /*
741 * Since map entries may be pageable, make sure we can take a
742 * page fault on them.
743 */
744
745 /*
746 * Unlock vnode before the lookup to avoid deadlock. E.G.
747 * avoid a deadlock between the inode and exec_map that can
748 * occur due to locks being obtained in different orders.
749 */
718 if (!fs.lookup_still_valid &&
719 (fs.map->timestamp != map_generation)) {
720 vm_object_t retry_object;
721 vm_pindex_t retry_pindex;
722 vm_prot_t retry_prot;
723
724 /*
725 * Since map entries may be pageable, make sure we can take a
726 * page fault on them.
727 */
728
729 /*
730 * Unlock vnode before the lookup to avoid deadlock. E.G.
731 * avoid a deadlock between the inode and exec_map that can
732 * occur due to locks being obtained in different orders.
733 */
750
751 if (fs.vp != NULL) {
752 vput(fs.vp);
753 fs.vp = NULL;
754 }
755
756 if (fs.map->infork) {
757 release_page(&fs);
758 unlock_and_deallocate(&fs);

--- 12 unchanged lines hidden (view full) ---

771 &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
772 map_generation = fs.map->timestamp;
773
774 /*
775 * If we don't need the page any longer, put it on the active
776 * list (the easiest thing to do here). If no one needs it,
777 * pageout will grab it eventually.
778 */
734 if (fs.vp != NULL) {
735 vput(fs.vp);
736 fs.vp = NULL;
737 }
738
739 if (fs.map->infork) {
740 release_page(&fs);
741 unlock_and_deallocate(&fs);

--- 12 unchanged lines hidden (view full) ---

754 &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
755 map_generation = fs.map->timestamp;
756
757 /*
758 * If we don't need the page any longer, put it on the active
759 * list (the easiest thing to do here). If no one needs it,
760 * pageout will grab it eventually.
761 */
779
780 if (result != KERN_SUCCESS) {
781 release_page(&fs);
782 unlock_and_deallocate(&fs);
783 return (result);
784 }
785 fs.lookup_still_valid = TRUE;
786
787 if ((retry_object != fs.first_object) ||

--- 52 unchanged lines hidden (view full) ---

840 vm_pager_page_unswapped(fs.m);
841 splx(s);
842 }
843 }
844
845 /*
846 * Page had better still be busy
847 */
762 if (result != KERN_SUCCESS) {
763 release_page(&fs);
764 unlock_and_deallocate(&fs);
765 return (result);
766 }
767 fs.lookup_still_valid = TRUE;
768
769 if ((retry_object != fs.first_object) ||

--- 52 unchanged lines hidden (view full) ---

822 vm_pager_page_unswapped(fs.m);
823 splx(s);
824 }
825 }
826
827 /*
828 * Page had better still be busy
829 */
848
849 KASSERT(fs.m->flags & PG_BUSY,
850 ("vm_fault: page %p not busy!", fs.m));
830 KASSERT(fs.m->flags & PG_BUSY,
831 ("vm_fault: page %p not busy!", fs.m));
851
852 unlock_things(&fs);
853
854 /*
855 * Sanity check: page must be completely valid or it is not fit to
856 * map into user space. vm_pager_get_pages() ensures this.
857 */
832 unlock_things(&fs);
833
834 /*
835 * Sanity check: page must be completely valid or it is not fit to
836 * map into user space. vm_pager_get_pages() ensures this.
837 */
858
859 if (fs.m->valid != VM_PAGE_BITS_ALL) {
860 vm_page_zero_invalid(fs.m, TRUE);
861 printf("Warning: page %p partially invalid on fault\n", fs.m);
862 }
838 if (fs.m->valid != VM_PAGE_BITS_ALL) {
839 vm_page_zero_invalid(fs.m, TRUE);
840 printf("Warning: page %p partially invalid on fault\n", fs.m);
841 }
863
864 pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
842 pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
865
866 if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
867 pmap_prefault(fs.map->pmap, vaddr, fs.entry);
868 }
843 if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
844 pmap_prefault(fs.map->pmap, vaddr, fs.entry);
845 }
869
870 vm_page_flag_clear(fs.m, PG_ZERO);
871 vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED);
872 if (fault_flags & VM_FAULT_HOLD)
873 vm_page_hold(fs.m);
874
875 /*
876 * If the page is not wired down, then put it where the pageout daemon
877 * can find it.
878 */
846 vm_page_flag_clear(fs.m, PG_ZERO);
847 vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED);
848 if (fault_flags & VM_FAULT_HOLD)
849 vm_page_hold(fs.m);
850
851 /*
852 * If the page is not wired down, then put it where the pageout daemon
853 * can find it.
854 */
879
880 if (fault_flags & VM_FAULT_WIRE_MASK) {
881 if (wired)
882 vm_page_wire(fs.m);
883 else
884 vm_page_unwire(fs.m, 1);
885 } else {
886 vm_page_activate(fs.m);
887 }

--- 6 unchanged lines hidden (view full) ---

894 curproc->p_stats->p_ru.ru_minflt++;
895 }
896 }
897 mtx_unlock_spin(&sched_lock);
898
899 /*
900 * Unlock everything, and return
901 */
855 if (fault_flags & VM_FAULT_WIRE_MASK) {
856 if (wired)
857 vm_page_wire(fs.m);
858 else
859 vm_page_unwire(fs.m, 1);
860 } else {
861 vm_page_activate(fs.m);
862 }

--- 6 unchanged lines hidden (view full) ---

869 curproc->p_stats->p_ru.ru_minflt++;
870 }
871 }
872 mtx_unlock_spin(&sched_lock);
873
874 /*
875 * Unlock everything, and return
876 */
902
903 vm_page_wakeup(fs.m);
904 vm_object_deallocate(fs.first_object);
877 vm_page_wakeup(fs.m);
878 vm_object_deallocate(fs.first_object);
905
906 return (KERN_SUCCESS);
907
908}
909
910/*
911 * vm_fault_wire:
912 *
913 * Wire down a range of virtual addresses in a map.

--- 9 unchanged lines hidden (view full) ---

923 int rv;
924
925 pmap = vm_map_pmap(map);
926
927 /*
928 * Inform the physical mapping system that the range of addresses may
929 * not fault, so that page tables and such can be locked down as well.
930 */
879 return (KERN_SUCCESS);
880
881}
882
883/*
884 * vm_fault_wire:
885 *
886 * Wire down a range of virtual addresses in a map.

--- 9 unchanged lines hidden (view full) ---

896 int rv;
897
898 pmap = vm_map_pmap(map);
899
900 /*
901 * Inform the physical mapping system that the range of addresses may
902 * not fault, so that page tables and such can be locked down as well.
903 */
931
932 pmap_pageable(pmap, start, end, FALSE);
933
934 /*
935 * We simulate a fault to get the page and enter it in the physical
936 * map.
937 */
904 pmap_pageable(pmap, start, end, FALSE);
905
906 /*
907 * We simulate a fault to get the page and enter it in the physical
908 * map.
909 */
938
939 for (va = start; va < end; va += PAGE_SIZE) {
940 rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
941 VM_FAULT_CHANGE_WIRING);
942 if (rv) {
943 if (va != start)
944 vm_fault_unwire(map, start, va);
945 return (rv);
946 }

--- 21 unchanged lines hidden (view full) ---

968 GIANT_REQUIRED;
969
970 pmap = vm_map_pmap(map);
971
972 /*
973 * Inform the physical mapping system that the range of addresses may
974 * not fault, so that page tables and such can be locked down as well.
975 */
910 for (va = start; va < end; va += PAGE_SIZE) {
911 rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
912 VM_FAULT_CHANGE_WIRING);
913 if (rv) {
914 if (va != start)
915 vm_fault_unwire(map, start, va);
916 return (rv);
917 }

--- 21 unchanged lines hidden (view full) ---

939 GIANT_REQUIRED;
940
941 pmap = vm_map_pmap(map);
942
943 /*
944 * Inform the physical mapping system that the range of addresses may
945 * not fault, so that page tables and such can be locked down as well.
946 */
976
977 pmap_pageable(pmap, start, end, FALSE);
978
979 /*
980 * We simulate a fault to get the page and enter it in the physical
981 * map.
982 */
983 for (va = start; va < end; va += PAGE_SIZE) {
984 rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE);

--- 22 unchanged lines hidden (view full) ---

1007 pmap_t pmap;
1008
1009 pmap = vm_map_pmap(map);
1010
1011 /*
1012 * Since the pages are wired down, we must be able to get their
1013 * mappings from the physical map system.
1014 */
947 pmap_pageable(pmap, start, end, FALSE);
948
949 /*
950 * We simulate a fault to get the page and enter it in the physical
951 * map.
952 */
953 for (va = start; va < end; va += PAGE_SIZE) {
954 rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE);

--- 22 unchanged lines hidden (view full) ---

977 pmap_t pmap;
978
979 pmap = vm_map_pmap(map);
980
981 /*
982 * Since the pages are wired down, we must be able to get their
983 * mappings from the physical map system.
984 */
1015
1016 for (va = start; va < end; va += PAGE_SIZE) {
1017 pa = pmap_extract(pmap, va);
1018 if (pa != (vm_offset_t) 0) {
1019 pmap_change_wiring(pmap, va, FALSE);
1020 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1021 }
1022 }
1023
1024 /*
1025 * Inform the physical mapping system that the range of addresses may
1026 * fault, so that page tables and such may be unwired themselves.
1027 */
985 for (va = start; va < end; va += PAGE_SIZE) {
986 pa = pmap_extract(pmap, va);
987 if (pa != (vm_offset_t) 0) {
988 pmap_change_wiring(pmap, va, FALSE);
989 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
990 }
991 }
992
993 /*
994 * Inform the physical mapping system that the range of addresses may
995 * fault, so that page tables and such may be unwired themselves.
996 */
1028
1029 pmap_pageable(pmap, start, end, TRUE);
1030
1031}
1032
1033/*
1034 * Routine:
1035 * vm_fault_copy_entry
1036 * Function:
1037 * Copy all of the pages from a wired-down map entry to another.
1038 *
1039 * In/out conditions:
1040 * The source and destination maps must be locked for write.
1041 * The source map entry must be wired down (or be a sharing map
1042 * entry corresponding to a main map entry that is wired down).
1043 */
997 pmap_pageable(pmap, start, end, TRUE);
998
999}
1000
1001/*
1002 * Routine:
1003 * vm_fault_copy_entry
1004 * Function:
1005 * Copy all of the pages from a wired-down map entry to another.
1006 *
1007 * In/out conditions:
1008 * The source and destination maps must be locked for write.
1009 * The source map entry must be wired down (or be a sharing map
1010 * entry corresponding to a main map entry that is wired down).
1011 */
1044
1045void
1046vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
1047 vm_map_t dst_map;
1048 vm_map_t src_map;
1049 vm_map_entry_t dst_entry;
1050 vm_map_entry_t src_entry;
1051{
1052 vm_object_t dst_object;

--- 54 unchanged lines hidden (view full) ---

1107 if (src_m == NULL)
1108 panic("vm_fault_copy_wired: page missing");
1109
1110 vm_page_copy(src_m, dst_m);
1111
1112 /*
1113 * Enter it in the pmap...
1114 */
1012void
1013vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
1014 vm_map_t dst_map;
1015 vm_map_t src_map;
1016 vm_map_entry_t dst_entry;
1017 vm_map_entry_t src_entry;
1018{
1019 vm_object_t dst_object;

--- 54 unchanged lines hidden (view full) ---

1074 if (src_m == NULL)
1075 panic("vm_fault_copy_wired: page missing");
1076
1077 vm_page_copy(src_m, dst_m);
1078
1079 /*
1080 * Enter it in the pmap...
1081 */
1115
1116 vm_page_flag_clear(dst_m, PG_ZERO);
1117 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1118 vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
1119
1120 /*
1121 * Mark it no longer busy, and put it on the active list.
1122 */
1123 vm_page_activate(dst_m);

--- 44 unchanged lines hidden (view full) ---

1168 *reqpage = 0;
1169 marray[0] = m;
1170 return 1;
1171 }
1172
1173 /*
1174 * if the requested page is not available, then give up now
1175 */
1082 vm_page_flag_clear(dst_m, PG_ZERO);
1083 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1084 vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
1085
1086 /*
1087 * Mark it no longer busy, and put it on the active list.
1088 */
1089 vm_page_activate(dst_m);

--- 44 unchanged lines hidden (view full) ---

1134 *reqpage = 0;
1135 marray[0] = m;
1136 return 1;
1137 }
1138
1139 /*
1140 * if the requested page is not available, then give up now
1141 */
1176
1177 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1178 return 0;
1179 }
1180
1181 if ((cbehind == 0) && (cahead == 0)) {
1182 *reqpage = 0;
1183 marray[0] = m;
1184 return 1;

--- 24 unchanged lines hidden (view full) ---

1209 if (pindex > 0) {
1210 if (rbehind > pindex) {
1211 rbehind = pindex;
1212 startpindex = 0;
1213 } else {
1214 startpindex = pindex - rbehind;
1215 }
1216
1142 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1143 return 0;
1144 }
1145
1146 if ((cbehind == 0) && (cahead == 0)) {
1147 *reqpage = 0;
1148 marray[0] = m;
1149 return 1;

--- 24 unchanged lines hidden (view full) ---

1174 if (pindex > 0) {
1175 if (rbehind > pindex) {
1176 rbehind = pindex;
1177 startpindex = 0;
1178 } else {
1179 startpindex = pindex - rbehind;
1180 }
1181
1217 for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
1218 if (vm_page_lookup( object, tpindex)) {
1182 for (tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
1183 if (vm_page_lookup(object, tpindex)) {
1219 startpindex = tpindex + 1;
1220 break;
1221 }
1222 if (tpindex == 0)
1223 break;
1224 }
1225
1184 startpindex = tpindex + 1;
1185 break;
1186 }
1187 if (tpindex == 0)
1188 break;
1189 }
1190
1226 for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
1191 for (i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
1227
1228 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1229 if (rtm == NULL) {
1230 for (j = 0; j < i; j++) {
1231 vm_page_free(marray[j]);
1232 }
1233 marray[0] = m;
1234 *reqpage = 0;

--- 16 unchanged lines hidden (view full) ---

1251
1252 /*
1253 * scan forward for the read ahead pages
1254 */
1255 endpindex = tpindex + rahead;
1256 if (endpindex > object->size)
1257 endpindex = object->size;
1258
1192
1193 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1194 if (rtm == NULL) {
1195 for (j = 0; j < i; j++) {
1196 vm_page_free(marray[j]);
1197 }
1198 marray[0] = m;
1199 *reqpage = 0;

--- 16 unchanged lines hidden (view full) ---

1216
1217 /*
1218 * scan forward for the read ahead pages
1219 */
1220 endpindex = tpindex + rahead;
1221 if (endpindex > object->size)
1222 endpindex = object->size;
1223
1259 for( ; tpindex < endpindex; i++, tpindex++) {
1224 for (; tpindex < endpindex; i++, tpindex++) {
1260
1261 if (vm_page_lookup(object, tpindex)) {
1262 break;
1263 }
1264
1265 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1266 if (rtm == NULL) {
1267 break;
1268 }
1269
1270 marray[i] = rtm;
1271 }
1272
1273 /* return number of bytes of pages */
1274 return i;
1275}
1225
1226 if (vm_page_lookup(object, tpindex)) {
1227 break;
1228 }
1229
1230 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1231 if (rtm == NULL) {
1232 break;
1233 }
1234
1235 marray[i] = rtm;
1236 }
1237
1238 /* return number of bytes of pages */
1239 return i;
1240}