Deleted Added
full compact
vm_fault.c (92256) vm_fault.c (92588)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 52 unchanged lines hidden (view full) ---

61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 52 unchanged lines hidden (view full) ---

61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 *
69 * $FreeBSD: head/sys/vm/vm_fault.c 92256 2002-03-14 02:10:14Z green $
69 * $FreeBSD: head/sys/vm/vm_fault.c 92588 2002-03-18 15:08:09Z green $
70 */
71
72/*
73 * Page fault handling module.
74 */
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/kernel.h>

--- 28 unchanged lines hidden (view full) ---

106 vm_page_t m;
107 vm_object_t object;
108 vm_pindex_t pindex;
109 vm_page_t first_m;
110 vm_object_t first_object;
111 vm_pindex_t first_pindex;
112 vm_map_t map;
113 vm_map_entry_t entry;
70 */
71
72/*
73 * Page fault handling module.
74 */
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/kernel.h>

--- 28 unchanged lines hidden (view full) ---

106 vm_page_t m;
107 vm_object_t object;
108 vm_pindex_t pindex;
109 vm_page_t first_m;
110 vm_object_t first_object;
111 vm_pindex_t first_pindex;
112 vm_map_t map;
113 vm_map_entry_t entry;
114 enum {
115 LSV_FALSE, /* the lookup's lock has been dropped */
116 LSV_TRUE, /* the lookup's lock is still valid */
117 LSV_UPGRADED /* the lookup's lock is now exclusive */
118 } lookup_still_valid;
114 int lookup_still_valid;
119 struct vnode *vp;
120};
121
122static __inline void
123release_page(struct faultstate *fs)
124{
125 vm_page_wakeup(fs->m);
126 vm_page_deactivate(fs->m);
127 fs->m = NULL;
128}
129
130static __inline void
131unlock_map(struct faultstate *fs)
132{
115 struct vnode *vp;
116};
117
118static __inline void
119release_page(struct faultstate *fs)
120{
121 vm_page_wakeup(fs->m);
122 vm_page_deactivate(fs->m);
123 fs->m = NULL;
124}
125
126static __inline void
127unlock_map(struct faultstate *fs)
128{
133 if (fs->lookup_still_valid != LSV_FALSE) {
134 if (fs->lookup_still_valid == LSV_UPGRADED)
135 vm_map_lock_downgrade(fs->map);
129 if (fs->lookup_still_valid) {
136 vm_map_lookup_done(fs->map, fs->entry);
130 vm_map_lookup_done(fs->map, fs->entry);
137 fs->lookup_still_valid = LSV_FALSE;
131 fs->lookup_still_valid = FALSE;
138 }
139}
140
141static void
142_unlock_things(struct faultstate *fs, int dealloc)
143{
144 GIANT_REQUIRED;
145 vm_object_pip_wakeup(fs->object);

--- 137 unchanged lines hidden (view full) ---

283 vm_object_pip_add(fs.first_object, 1);
284
285 if ((fault_type & VM_PROT_WRITE) &&
286 (fs.first_object->type == OBJT_VNODE)) {
287 vm_freeze_copyopts(fs.first_object,
288 fs.first_pindex, fs.first_pindex + 1);
289 }
290
132 }
133}
134
135static void
136_unlock_things(struct faultstate *fs, int dealloc)
137{
138 GIANT_REQUIRED;
139 vm_object_pip_wakeup(fs->object);

--- 137 unchanged lines hidden (view full) ---

277 vm_object_pip_add(fs.first_object, 1);
278
279 if ((fault_type & VM_PROT_WRITE) &&
280 (fs.first_object->type == OBJT_VNODE)) {
281 vm_freeze_copyopts(fs.first_object,
282 fs.first_pindex, fs.first_pindex + 1);
283 }
284
291 fs.lookup_still_valid = LSV_TRUE;
285 fs.lookup_still_valid = TRUE;
292
293 if (wired)
294 fault_type = prot;
295
296 fs.first_m = NULL;
297
298 /*
299 * Search for the page at object/offset.

--- 357 unchanged lines hidden (view full) ---

657 /*
658 * We don't chase down the shadow chain
659 */
660 (fs.object == fs.first_object->backing_object) &&
661
662 /*
663 * grab the lock if we need to
664 */
286
287 if (wired)
288 fault_type = prot;
289
290 fs.first_m = NULL;
291
292 /*
293 * Search for the page at object/offset.

--- 357 unchanged lines hidden (view full) ---

651 /*
652 * We don't chase down the shadow chain
653 */
654 (fs.object == fs.first_object->backing_object) &&
655
656 /*
657 * grab the lock if we need to
658 */
665 (fs.lookup_still_valid != LSV_FALSE ||
666 vm_map_try_lock(fs.map) == 0)
659 (fs.lookup_still_valid ||
660 lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curthread) == 0)
667 ) {
661 ) {
668 if (fs.lookup_still_valid == LSV_FALSE)
669 fs.lookup_still_valid = LSV_UPGRADED;
662
663 fs.lookup_still_valid = 1;
670 /*
671 * get rid of the unnecessary page
672 */
673 vm_page_protect(fs.first_m, VM_PROT_NONE);
674 vm_page_free(fs.first_m);
675 fs.first_m = NULL;
676
677 /*

--- 38 unchanged lines hidden (view full) ---

716 prot &= ~VM_PROT_WRITE;
717 }
718 }
719
720 /*
721 * We must verify that the maps have not changed since our last
722 * lookup.
723 */
664 /*
665 * get rid of the unnecessary page
666 */
667 vm_page_protect(fs.first_m, VM_PROT_NONE);
668 vm_page_free(fs.first_m);
669 fs.first_m = NULL;
670
671 /*

--- 38 unchanged lines hidden (view full) ---

710 prot &= ~VM_PROT_WRITE;
711 }
712 }
713
714 /*
715 * We must verify that the maps have not changed since our last
716 * lookup.
717 */
724 if (fs.lookup_still_valid == LSV_FALSE &&
718 if (!fs.lookup_still_valid &&
725 (fs.map->timestamp != map_generation)) {
726 vm_object_t retry_object;
727 vm_pindex_t retry_pindex;
728 vm_prot_t retry_prot;
729
730 /*
731 * Since map entries may be pageable, make sure we can take a
732 * page fault on them.

--- 32 unchanged lines hidden (view full) ---

765 * list (the easiest thing to do here). If no one needs it,
766 * pageout will grab it eventually.
767 */
768 if (result != KERN_SUCCESS) {
769 release_page(&fs);
770 unlock_and_deallocate(&fs);
771 return (result);
772 }
719 (fs.map->timestamp != map_generation)) {
720 vm_object_t retry_object;
721 vm_pindex_t retry_pindex;
722 vm_prot_t retry_prot;
723
724 /*
725 * Since map entries may be pageable, make sure we can take a
726 * page fault on them.

--- 32 unchanged lines hidden (view full) ---

759 * list (the easiest thing to do here). If no one needs it,
760 * pageout will grab it eventually.
761 */
762 if (result != KERN_SUCCESS) {
763 release_page(&fs);
764 unlock_and_deallocate(&fs);
765 return (result);
766 }
773 fs.lookup_still_valid = LSV_TRUE;
767 fs.lookup_still_valid = TRUE;
774
775 if ((retry_object != fs.first_object) ||
776 (retry_pindex != fs.first_pindex)) {
777 release_page(&fs);
778 unlock_and_deallocate(&fs);
779 goto RetryFault;
780 }
781 /*

--- 465 unchanged lines hidden ---
768
769 if ((retry_object != fs.first_object) ||
770 (retry_pindex != fs.first_pindex)) {
771 release_page(&fs);
772 unlock_and_deallocate(&fs);
773 goto RetryFault;
774 }
775 /*

--- 465 unchanged lines hidden ---