1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 *
| 1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 *
|
64 * $FreeBSD: head/sys/vm/vm_map.c 92466 2002-03-17 03:19:31Z alc $
| 64 * $FreeBSD: head/sys/vm/vm_map.c 92588 2002-03-18 15:08:09Z green $
|
65 */ 66 67/* 68 * Virtual memory mapping module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/ktr.h> 74#include <sys/lock.h> 75#include <sys/mutex.h> 76#include <sys/proc.h> 77#include <sys/vmmeter.h> 78#include <sys/mman.h> 79#include <sys/vnode.h> 80#include <sys/resourcevar.h> 81 82#include <vm/vm.h> 83#include <vm/vm_param.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_page.h> 87#include <vm/vm_object.h> 88#include <vm/vm_pager.h> 89#include <vm/vm_kern.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_zone.h> 92#include <vm/swap_pager.h> 93 94/* 95 * Virtual memory maps provide for the mapping, protection, 96 * and sharing of virtual memory objects. In addition, 97 * this module provides for an efficient virtual copy of 98 * memory from one map to another. 99 * 100 * Synchronization is required prior to most operations. 101 * 102 * Maps consist of an ordered doubly-linked list of simple 103 * entries; a single hint is used to speed up lookups. 104 * 105 * Since portions of maps are specified by start/end addresses, 106 * which may not align with existing map entries, all 107 * routines merely "clip" entries to these start/end values. 108 * [That is, an entry is split into two, bordering at a 109 * start or end value.] Note that these clippings may not 110 * always be necessary (as the two resulting entries are then 111 * not changed); however, the clipping is done for convenience. 112 * 113 * As mentioned above, virtual copy operations are performed 114 * by copying VM object references from one map to 115 * another, and then marking both regions as copy-on-write. 116 */ 117 118/* 119 * vm_map_startup: 120 * 121 * Initialize the vm_map module. Must be called before 122 * any other vm_map routines. 123 * 124 * Map and entry structures are allocated from the general 125 * purpose memory pool with some exceptions: 126 * 127 * - The kernel map and kmem submap are allocated statically. 128 * - Kernel map entries are allocated out of a static pool. 129 * 130 * These restrictions are necessary since malloc() uses the 131 * maps and requires map entries. 132 */ 133 134static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store; 135static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone; 136static struct vm_object kmapentobj, mapentobj, mapobj; 137 138static struct vm_map_entry map_entry_init[MAX_MAPENT]; 139static struct vm_map_entry kmap_entry_init[MAX_KMAPENT]; 140static struct vm_map map_init[MAX_KMAP]; 141 142void 143vm_map_startup(void) 144{ 145 mapzone = &mapzone_store; 146 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 147 map_init, MAX_KMAP); 148 kmapentzone = &kmapentzone_store; 149 zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry), 150 kmap_entry_init, MAX_KMAPENT); 151 mapentzone = &mapentzone_store; 152 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 153 map_entry_init, MAX_MAPENT); 154} 155 156/* 157 * Allocate a vmspace structure, including a vm_map and pmap, 158 * and initialize those structures. The refcnt is set to 1. 159 * The remaining fields must be initialized by the caller. 160 */ 161struct vmspace * 162vmspace_alloc(min, max) 163 vm_offset_t min, max; 164{ 165 struct vmspace *vm; 166 167 GIANT_REQUIRED; 168 vm = zalloc(vmspace_zone); 169 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 170 vm_map_init(&vm->vm_map, min, max); 171 pmap_pinit(vmspace_pmap(vm)); 172 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 173 vm->vm_refcnt = 1; 174 vm->vm_shm = NULL; 175 vm->vm_freer = NULL; 176 return (vm); 177} 178 179void 180vm_init2(void) 181{ 182 zinitna(kmapentzone, &kmapentobj, 183 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1); 184 zinitna(mapentzone, &mapentobj, 185 NULL, 0, 0, 0, 1); 186 zinitna(mapzone, &mapobj, 187 NULL, 0, 0, 0, 1); 188 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3); 189 pmap_init2(); 190 vm_object_init2(); 191} 192 193static __inline void 194vmspace_dofree(struct vmspace *vm) 195{ 196 CTR1(KTR_VM, "vmspace_free: %p", vm); 197 /* 198 * Lock the map, to wait out all other references to it. 199 * Delete all of the mappings and pages they hold, then call 200 * the pmap module to reclaim anything left. 201 */ 202 vm_map_lock(&vm->vm_map); 203 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 204 vm->vm_map.max_offset); 205 vm_map_unlock(&vm->vm_map); 206 pmap_release(vmspace_pmap(vm)); 207 vm_map_destroy(&vm->vm_map); 208 zfree(vmspace_zone, vm); 209} 210 211void 212vmspace_free(struct vmspace *vm) 213{ 214 GIANT_REQUIRED; 215 216 if (vm->vm_refcnt == 0) 217 panic("vmspace_free: attempt to free already freed vmspace"); 218 219 if (--vm->vm_refcnt == 0) 220 vmspace_dofree(vm); 221} 222 223void 224vmspace_exitfree(struct proc *p) 225{ 226 GIANT_REQUIRED; 227 228 if (p == p->p_vmspace->vm_freer) 229 vmspace_dofree(p->p_vmspace); 230} 231 232/* 233 * vmspace_swap_count() - count the approximate swap useage in pages for a 234 * vmspace. 235 * 236 * Swap useage is determined by taking the proportional swap used by 237 * VM objects backing the VM map. To make up for fractional losses, 238 * if the VM object has any swap use at all the associated map entries 239 * count for at least 1 swap page. 240 */ 241int 242vmspace_swap_count(struct vmspace *vmspace) 243{ 244 vm_map_t map = &vmspace->vm_map; 245 vm_map_entry_t cur; 246 int count = 0; 247 248 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 249 vm_object_t object; 250 251 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 252 (object = cur->object.vm_object) != NULL && 253 object->type == OBJT_SWAP 254 ) { 255 int n = (cur->end - cur->start) / PAGE_SIZE; 256 257 if (object->un_pager.swp.swp_bcount) { 258 count += object->un_pager.swp.swp_bcount * 259 SWAP_META_PAGES * n / object->size + 1; 260 } 261 } 262 } 263 return (count); 264} 265 266u_char 267vm_map_entry_behavior(struct vm_map_entry *entry) 268{ 269 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 270} 271 272void 273vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 274{ 275 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 276 (behavior & MAP_ENTRY_BEHAV_MASK); 277} 278 279void
| 65 */ 66 67/* 68 * Virtual memory mapping module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/ktr.h> 74#include <sys/lock.h> 75#include <sys/mutex.h> 76#include <sys/proc.h> 77#include <sys/vmmeter.h> 78#include <sys/mman.h> 79#include <sys/vnode.h> 80#include <sys/resourcevar.h> 81 82#include <vm/vm.h> 83#include <vm/vm_param.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_page.h> 87#include <vm/vm_object.h> 88#include <vm/vm_pager.h> 89#include <vm/vm_kern.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_zone.h> 92#include <vm/swap_pager.h> 93 94/* 95 * Virtual memory maps provide for the mapping, protection, 96 * and sharing of virtual memory objects. In addition, 97 * this module provides for an efficient virtual copy of 98 * memory from one map to another. 99 * 100 * Synchronization is required prior to most operations. 101 * 102 * Maps consist of an ordered doubly-linked list of simple 103 * entries; a single hint is used to speed up lookups. 104 * 105 * Since portions of maps are specified by start/end addresses, 106 * which may not align with existing map entries, all 107 * routines merely "clip" entries to these start/end values. 108 * [That is, an entry is split into two, bordering at a 109 * start or end value.] Note that these clippings may not 110 * always be necessary (as the two resulting entries are then 111 * not changed); however, the clipping is done for convenience. 112 * 113 * As mentioned above, virtual copy operations are performed 114 * by copying VM object references from one map to 115 * another, and then marking both regions as copy-on-write. 116 */ 117 118/* 119 * vm_map_startup: 120 * 121 * Initialize the vm_map module. Must be called before 122 * any other vm_map routines. 123 * 124 * Map and entry structures are allocated from the general 125 * purpose memory pool with some exceptions: 126 * 127 * - The kernel map and kmem submap are allocated statically. 128 * - Kernel map entries are allocated out of a static pool. 129 * 130 * These restrictions are necessary since malloc() uses the 131 * maps and requires map entries. 132 */ 133 134static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store; 135static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone; 136static struct vm_object kmapentobj, mapentobj, mapobj; 137 138static struct vm_map_entry map_entry_init[MAX_MAPENT]; 139static struct vm_map_entry kmap_entry_init[MAX_KMAPENT]; 140static struct vm_map map_init[MAX_KMAP]; 141 142void 143vm_map_startup(void) 144{ 145 mapzone = &mapzone_store; 146 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 147 map_init, MAX_KMAP); 148 kmapentzone = &kmapentzone_store; 149 zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry), 150 kmap_entry_init, MAX_KMAPENT); 151 mapentzone = &mapentzone_store; 152 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 153 map_entry_init, MAX_MAPENT); 154} 155 156/* 157 * Allocate a vmspace structure, including a vm_map and pmap, 158 * and initialize those structures. The refcnt is set to 1. 159 * The remaining fields must be initialized by the caller. 160 */ 161struct vmspace * 162vmspace_alloc(min, max) 163 vm_offset_t min, max; 164{ 165 struct vmspace *vm; 166 167 GIANT_REQUIRED; 168 vm = zalloc(vmspace_zone); 169 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 170 vm_map_init(&vm->vm_map, min, max); 171 pmap_pinit(vmspace_pmap(vm)); 172 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 173 vm->vm_refcnt = 1; 174 vm->vm_shm = NULL; 175 vm->vm_freer = NULL; 176 return (vm); 177} 178 179void 180vm_init2(void) 181{ 182 zinitna(kmapentzone, &kmapentobj, 183 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1); 184 zinitna(mapentzone, &mapentobj, 185 NULL, 0, 0, 0, 1); 186 zinitna(mapzone, &mapobj, 187 NULL, 0, 0, 0, 1); 188 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3); 189 pmap_init2(); 190 vm_object_init2(); 191} 192 193static __inline void 194vmspace_dofree(struct vmspace *vm) 195{ 196 CTR1(KTR_VM, "vmspace_free: %p", vm); 197 /* 198 * Lock the map, to wait out all other references to it. 199 * Delete all of the mappings and pages they hold, then call 200 * the pmap module to reclaim anything left. 201 */ 202 vm_map_lock(&vm->vm_map); 203 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 204 vm->vm_map.max_offset); 205 vm_map_unlock(&vm->vm_map); 206 pmap_release(vmspace_pmap(vm)); 207 vm_map_destroy(&vm->vm_map); 208 zfree(vmspace_zone, vm); 209} 210 211void 212vmspace_free(struct vmspace *vm) 213{ 214 GIANT_REQUIRED; 215 216 if (vm->vm_refcnt == 0) 217 panic("vmspace_free: attempt to free already freed vmspace"); 218 219 if (--vm->vm_refcnt == 0) 220 vmspace_dofree(vm); 221} 222 223void 224vmspace_exitfree(struct proc *p) 225{ 226 GIANT_REQUIRED; 227 228 if (p == p->p_vmspace->vm_freer) 229 vmspace_dofree(p->p_vmspace); 230} 231 232/* 233 * vmspace_swap_count() - count the approximate swap useage in pages for a 234 * vmspace. 235 * 236 * Swap useage is determined by taking the proportional swap used by 237 * VM objects backing the VM map. To make up for fractional losses, 238 * if the VM object has any swap use at all the associated map entries 239 * count for at least 1 swap page. 240 */ 241int 242vmspace_swap_count(struct vmspace *vmspace) 243{ 244 vm_map_t map = &vmspace->vm_map; 245 vm_map_entry_t cur; 246 int count = 0; 247 248 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 249 vm_object_t object; 250 251 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 252 (object = cur->object.vm_object) != NULL && 253 object->type == OBJT_SWAP 254 ) { 255 int n = (cur->end - cur->start) / PAGE_SIZE; 256 257 if (object->un_pager.swp.swp_bcount) { 258 count += object->un_pager.swp.swp_bcount * 259 SWAP_META_PAGES * n / object->size + 1; 260 } 261 } 262 } 263 return (count); 264} 265 266u_char 267vm_map_entry_behavior(struct vm_map_entry *entry) 268{ 269 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 270} 271 272void 273vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 274{ 275 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 276 (behavior & MAP_ENTRY_BEHAV_MASK); 277} 278 279void
|
280_vm_map_lock(vm_map_t map, const char *file, int line)
| 280vm_map_lock(vm_map_t map)
|
281{ 282 vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
| 281{ 282 vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
|
283 _sx_xlock(&map->lock, file, line);
| 283 if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0) 284 panic("vm_map_lock: failed to get lock");
|
284 map->timestamp++; 285} 286
| 285 map->timestamp++; 286} 287
|
287int 288_vm_map_try_lock(vm_map_t map, const char *file, int line) 289{ 290 vm_map_printf("trying to lock map LK_EXCLUSIVE: %p\n", map); 291 if (_sx_try_xlock(&map->lock, file, line)) { 292 map->timestamp++; 293 return (0); 294 } 295 return (EWOULDBLOCK); 296} 297
| |
298void
| 288void
|
299_vm_map_unlock(vm_map_t map, const char *file, int line)
| 289vm_map_unlock(vm_map_t map)
|
300{ 301 vm_map_printf("locking map LK_RELEASE: %p\n", map);
| 290{ 291 vm_map_printf("locking map LK_RELEASE: %p\n", map);
|
302 _sx_xunlock(&map->lock, file, line);
| 292 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
|
303} 304 305void
| 293} 294 295void
|
306_vm_map_lock_read(vm_map_t map, const char *file, int line)
| 296vm_map_lock_read(vm_map_t map)
|
307{ 308 vm_map_printf("locking map LK_SHARED: %p\n", map);
| 297{ 298 vm_map_printf("locking map LK_SHARED: %p\n", map);
|
309 _sx_slock(&map->lock, file, line);
| 299 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
|
310} 311 312void
| 300} 301 302void
|
313_vm_map_unlock_read(vm_map_t map, const char *file, int line)
| 303vm_map_unlock_read(vm_map_t map)
|
314{ 315 vm_map_printf("locking map LK_RELEASE: %p\n", map);
| 304{ 305 vm_map_printf("locking map LK_RELEASE: %p\n", map);
|
316 _sx_sunlock(&map->lock, file, line);
| 306 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
|
317} 318
| 307} 308
|
319int 320_vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 321{
| 309static __inline__ int 310_vm_map_lock_upgrade(vm_map_t map, struct thread *td) { 311 int error; 312
|
322 vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
| 313 vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
|
323 if (_sx_try_upgrade(&map->lock, file, line)) {
| 314 error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td); 315 if (error == 0)
|
324 map->timestamp++;
| 316 map->timestamp++;
|
325 return (0); 326 } 327 return (EWOULDBLOCK);
| 317 return error;
|
328} 329
| 318} 319
|
| 320int 321vm_map_lock_upgrade(vm_map_t map) 322{ 323 return (_vm_map_lock_upgrade(map, curthread)); 324} 325
|
330void
| 326void
|
331_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
| 327vm_map_lock_downgrade(vm_map_t map)
|
332{ 333 vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
| 328{ 329 vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
|
334 _sx_downgrade(&map->lock, file, line);
| 330 lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
|
335} 336
| 331} 332
|
| 333void 334vm_map_set_recursive(vm_map_t map) 335{ 336 mtx_lock((map)->lock.lk_interlock); 337 map->lock.lk_flags |= LK_CANRECURSE; 338 mtx_unlock((map)->lock.lk_interlock); 339} 340 341void 342vm_map_clear_recursive(vm_map_t map) 343{ 344 mtx_lock((map)->lock.lk_interlock); 345 map->lock.lk_flags &= ~LK_CANRECURSE; 346 mtx_unlock((map)->lock.lk_interlock); 347} 348
|
337vm_offset_t 338vm_map_min(vm_map_t map) 339{ 340 return (map->min_offset); 341} 342 343vm_offset_t 344vm_map_max(vm_map_t map) 345{ 346 return (map->max_offset); 347} 348 349struct pmap * 350vm_map_pmap(vm_map_t map) 351{ 352 return (map->pmap); 353} 354 355struct pmap * 356vmspace_pmap(struct vmspace *vmspace) 357{ 358 return &vmspace->vm_pmap; 359} 360 361long 362vmspace_resident_count(struct vmspace *vmspace) 363{ 364 return pmap_resident_count(vmspace_pmap(vmspace)); 365} 366 367/* 368 * vm_map_create: 369 * 370 * Creates and returns a new empty VM map with 371 * the given physical map structure, and having 372 * the given lower and upper address bounds. 373 */ 374vm_map_t 375vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 376{ 377 vm_map_t result; 378 379 GIANT_REQUIRED; 380 381 result = zalloc(mapzone); 382 CTR1(KTR_VM, "vm_map_create: %p", result); 383 vm_map_init(result, min, max); 384 result->pmap = pmap; 385 return (result); 386} 387 388/* 389 * Initialize an existing vm_map structure 390 * such as that in the vmspace structure. 391 * The pmap is set elsewhere. 392 */ 393void 394vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 395{ 396 GIANT_REQUIRED; 397 398 map->header.next = map->header.prev = &map->header; 399 map->nentries = 0; 400 map->size = 0; 401 map->system_map = 0; 402 map->infork = 0; 403 map->min_offset = min; 404 map->max_offset = max; 405 map->first_free = &map->header; 406 map->hint = &map->header; 407 map->timestamp = 0;
| 349vm_offset_t 350vm_map_min(vm_map_t map) 351{ 352 return (map->min_offset); 353} 354 355vm_offset_t 356vm_map_max(vm_map_t map) 357{ 358 return (map->max_offset); 359} 360 361struct pmap * 362vm_map_pmap(vm_map_t map) 363{ 364 return (map->pmap); 365} 366 367struct pmap * 368vmspace_pmap(struct vmspace *vmspace) 369{ 370 return &vmspace->vm_pmap; 371} 372 373long 374vmspace_resident_count(struct vmspace *vmspace) 375{ 376 return pmap_resident_count(vmspace_pmap(vmspace)); 377} 378 379/* 380 * vm_map_create: 381 * 382 * Creates and returns a new empty VM map with 383 * the given physical map structure, and having 384 * the given lower and upper address bounds. 385 */ 386vm_map_t 387vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 388{ 389 vm_map_t result; 390 391 GIANT_REQUIRED; 392 393 result = zalloc(mapzone); 394 CTR1(KTR_VM, "vm_map_create: %p", result); 395 vm_map_init(result, min, max); 396 result->pmap = pmap; 397 return (result); 398} 399 400/* 401 * Initialize an existing vm_map structure 402 * such as that in the vmspace structure. 403 * The pmap is set elsewhere. 404 */ 405void 406vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 407{ 408 GIANT_REQUIRED; 409 410 map->header.next = map->header.prev = &map->header; 411 map->nentries = 0; 412 map->size = 0; 413 map->system_map = 0; 414 map->infork = 0; 415 map->min_offset = min; 416 map->max_offset = max; 417 map->first_free = &map->header; 418 map->hint = &map->header; 419 map->timestamp = 0;
|
408 sx_init(&map->lock, "thrd_sleep");
| 420 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
|
409} 410 411void 412vm_map_destroy(map) 413 struct vm_map *map; 414{ 415 GIANT_REQUIRED;
| 421} 422 423void 424vm_map_destroy(map) 425 struct vm_map *map; 426{ 427 GIANT_REQUIRED;
|
416 sx_destroy(&map->lock);
| 428 lockdestroy(&map->lock);
|
417} 418 419/* 420 * vm_map_entry_dispose: [ internal use only ] 421 * 422 * Inverse of vm_map_entry_create. 423 */ 424static void 425vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 426{ 427 zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry); 428} 429 430/* 431 * vm_map_entry_create: [ internal use only ] 432 * 433 * Allocates a VM map entry for insertion. 434 * No entry fields are filled in. 435 */ 436static vm_map_entry_t 437vm_map_entry_create(vm_map_t map) 438{ 439 vm_map_entry_t new_entry; 440 441 new_entry = zalloc((map->system_map || !mapentzone) ? 442 kmapentzone : mapentzone); 443 if (new_entry == NULL) 444 panic("vm_map_entry_create: kernel resources exhausted"); 445 return (new_entry); 446} 447 448/* 449 * vm_map_entry_{un,}link: 450 * 451 * Insert/remove entries from maps. 452 */ 453static __inline void 454vm_map_entry_link(vm_map_t map, 455 vm_map_entry_t after_where, 456 vm_map_entry_t entry) 457{ 458 459 CTR4(KTR_VM, 460 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 461 map->nentries, entry, after_where); 462 map->nentries++; 463 entry->prev = after_where; 464 entry->next = after_where->next; 465 entry->next->prev = entry; 466 after_where->next = entry; 467} 468 469static __inline void 470vm_map_entry_unlink(vm_map_t map, 471 vm_map_entry_t entry) 472{ 473 vm_map_entry_t prev = entry->prev; 474 vm_map_entry_t next = entry->next; 475 476 next->prev = prev; 477 prev->next = next; 478 map->nentries--; 479 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 480 map->nentries, entry); 481} 482 483/* 484 * SAVE_HINT: 485 * 486 * Saves the specified entry as the hint for 487 * future lookups. 488 */ 489#define SAVE_HINT(map,value) \ 490 (map)->hint = (value); 491 492/* 493 * vm_map_lookup_entry: [ internal use only ] 494 * 495 * Finds the map entry containing (or 496 * immediately preceding) the specified address 497 * in the given map; the entry is returned 498 * in the "entry" parameter. The boolean 499 * result indicates whether the address is 500 * actually contained in the map. 501 */ 502boolean_t 503vm_map_lookup_entry( 504 vm_map_t map, 505 vm_offset_t address, 506 vm_map_entry_t *entry) /* OUT */ 507{ 508 vm_map_entry_t cur; 509 vm_map_entry_t last; 510 511 GIANT_REQUIRED; 512 /* 513 * Start looking either from the head of the list, or from the hint. 514 */ 515 cur = map->hint; 516 517 if (cur == &map->header) 518 cur = cur->next; 519 520 if (address >= cur->start) { 521 /* 522 * Go from hint to end of list. 523 * 524 * But first, make a quick check to see if we are already looking 525 * at the entry we want (which is usually the case). Note also 526 * that we don't need to save the hint here... it is the same 527 * hint (unless we are at the header, in which case the hint 528 * didn't buy us anything anyway). 529 */ 530 last = &map->header; 531 if ((cur != last) && (cur->end > address)) { 532 *entry = cur; 533 return (TRUE); 534 } 535 } else { 536 /* 537 * Go from start to hint, *inclusively* 538 */ 539 last = cur->next; 540 cur = map->header.next; 541 } 542 543 /* 544 * Search linearly 545 */ 546 while (cur != last) { 547 if (cur->end > address) { 548 if (address >= cur->start) { 549 /* 550 * Save this lookup for future hints, and 551 * return 552 */ 553 *entry = cur; 554 SAVE_HINT(map, cur); 555 return (TRUE); 556 } 557 break; 558 } 559 cur = cur->next; 560 } 561 *entry = cur->prev; 562 SAVE_HINT(map, *entry); 563 return (FALSE); 564} 565 566/* 567 * vm_map_insert: 568 * 569 * Inserts the given whole VM object into the target 570 * map at the specified address range. The object's 571 * size should match that of the address range. 572 * 573 * Requires that the map be locked, and leaves it so. 574 * 575 * If object is non-NULL, ref count must be bumped by caller 576 * prior to making call to account for the new entry. 577 */ 578int 579vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 580 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 581 int cow) 582{ 583 vm_map_entry_t new_entry; 584 vm_map_entry_t prev_entry; 585 vm_map_entry_t temp_entry; 586 vm_eflags_t protoeflags; 587 588 GIANT_REQUIRED; 589 590 /* 591 * Check that the start and end points are not bogus. 592 */ 593 if ((start < map->min_offset) || (end > map->max_offset) || 594 (start >= end)) 595 return (KERN_INVALID_ADDRESS); 596 597 /* 598 * Find the entry prior to the proposed starting address; if it's part 599 * of an existing entry, this range is bogus. 600 */ 601 if (vm_map_lookup_entry(map, start, &temp_entry)) 602 return (KERN_NO_SPACE); 603 604 prev_entry = temp_entry; 605 606 /* 607 * Assert that the next entry doesn't overlap the end point. 608 */ 609 if ((prev_entry->next != &map->header) && 610 (prev_entry->next->start < end)) 611 return (KERN_NO_SPACE); 612 613 protoeflags = 0; 614 615 if (cow & MAP_COPY_ON_WRITE) 616 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 617 618 if (cow & MAP_NOFAULT) { 619 protoeflags |= MAP_ENTRY_NOFAULT; 620 621 KASSERT(object == NULL, 622 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 623 } 624 if (cow & MAP_DISABLE_SYNCER) 625 protoeflags |= MAP_ENTRY_NOSYNC; 626 if (cow & MAP_DISABLE_COREDUMP) 627 protoeflags |= MAP_ENTRY_NOCOREDUMP; 628 629 if (object) { 630 /* 631 * When object is non-NULL, it could be shared with another 632 * process. We have to set or clear OBJ_ONEMAPPING 633 * appropriately. 634 */ 635 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 636 vm_object_clear_flag(object, OBJ_ONEMAPPING); 637 } 638 } 639 else if ((prev_entry != &map->header) && 640 (prev_entry->eflags == protoeflags) && 641 (prev_entry->end == start) && 642 (prev_entry->wired_count == 0) && 643 ((prev_entry->object.vm_object == NULL) || 644 vm_object_coalesce(prev_entry->object.vm_object, 645 OFF_TO_IDX(prev_entry->offset), 646 (vm_size_t)(prev_entry->end - prev_entry->start), 647 (vm_size_t)(end - prev_entry->end)))) { 648 /* 649 * We were able to extend the object. Determine if we 650 * can extend the previous map entry to include the 651 * new range as well. 652 */ 653 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 654 (prev_entry->protection == prot) && 655 (prev_entry->max_protection == max)) { 656 map->size += (end - prev_entry->end); 657 prev_entry->end = end; 658 vm_map_simplify_entry(map, prev_entry); 659 return (KERN_SUCCESS); 660 } 661 662 /* 663 * If we can extend the object but cannot extend the 664 * map entry, we have to create a new map entry. We 665 * must bump the ref count on the extended object to 666 * account for it. object may be NULL. 667 */ 668 object = prev_entry->object.vm_object; 669 offset = prev_entry->offset + 670 (prev_entry->end - prev_entry->start); 671 vm_object_reference(object); 672 } 673 674 /* 675 * NOTE: if conditionals fail, object can be NULL here. This occurs 676 * in things like the buffer map where we manage kva but do not manage 677 * backing objects. 678 */ 679 680 /* 681 * Create a new entry 682 */ 683 new_entry = vm_map_entry_create(map); 684 new_entry->start = start; 685 new_entry->end = end; 686 687 new_entry->eflags = protoeflags; 688 new_entry->object.vm_object = object; 689 new_entry->offset = offset; 690 new_entry->avail_ssize = 0; 691 692 new_entry->inheritance = VM_INHERIT_DEFAULT; 693 new_entry->protection = prot; 694 new_entry->max_protection = max; 695 new_entry->wired_count = 0; 696 697 /* 698 * Insert the new entry into the list 699 */ 700 vm_map_entry_link(map, prev_entry, new_entry); 701 map->size += new_entry->end - new_entry->start; 702 703 /* 704 * Update the free space hint 705 */ 706 if ((map->first_free == prev_entry) && 707 (prev_entry->end >= new_entry->start)) { 708 map->first_free = new_entry; 709 } 710 711#if 0 712 /* 713 * Temporarily removed to avoid MAP_STACK panic, due to 714 * MAP_STACK being a huge hack. Will be added back in 715 * when MAP_STACK (and the user stack mapping) is fixed. 716 */ 717 /* 718 * It may be possible to simplify the entry 719 */ 720 vm_map_simplify_entry(map, new_entry); 721#endif 722 723 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 724 pmap_object_init_pt(map->pmap, start, 725 object, OFF_TO_IDX(offset), end - start, 726 cow & MAP_PREFAULT_PARTIAL); 727 } 728 729 return (KERN_SUCCESS); 730} 731 732/* 733 * Find sufficient space for `length' bytes in the given map, starting at 734 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 735 */ 736int 737vm_map_findspace( 738 vm_map_t map, 739 vm_offset_t start, 740 vm_size_t length, 741 vm_offset_t *addr) 742{ 743 vm_map_entry_t entry, next; 744 vm_offset_t end; 745 746 GIANT_REQUIRED; 747 if (start < map->min_offset) 748 start = map->min_offset; 749 if (start > map->max_offset) 750 return (1); 751 752 /* 753 * Look for the first possible address; if there's already something 754 * at this address, we have to start after it. 755 */ 756 if (start == map->min_offset) { 757 if ((entry = map->first_free) != &map->header) 758 start = entry->end; 759 } else { 760 vm_map_entry_t tmp; 761 762 if (vm_map_lookup_entry(map, start, &tmp)) 763 start = tmp->end; 764 entry = tmp; 765 } 766 767 /* 768 * Look through the rest of the map, trying to fit a new region in the 769 * gap between existing regions, or after the very last region. 770 */ 771 for (;; start = (entry = next)->end) { 772 /* 773 * Find the end of the proposed new region. Be sure we didn't 774 * go beyond the end of the map, or wrap around the address; 775 * if so, we lose. Otherwise, if this is the last entry, or 776 * if the proposed new region fits before the next entry, we 777 * win. 778 */ 779 end = start + length; 780 if (end > map->max_offset || end < start) 781 return (1); 782 next = entry->next; 783 if (next == &map->header || next->start >= end) 784 break; 785 } 786 SAVE_HINT(map, entry); 787 *addr = start; 788 if (map == kernel_map) { 789 vm_offset_t ksize; 790 if ((ksize = round_page(start + length)) > kernel_vm_end) { 791 pmap_growkernel(ksize); 792 } 793 } 794 return (0); 795} 796 797/* 798 * vm_map_find finds an unallocated region in the target address 799 * map with the given length. The search is defined to be 800 * first-fit from the specified address; the region found is 801 * returned in the same parameter. 802 * 803 * If object is non-NULL, ref count must be bumped by caller 804 * prior to making call to account for the new entry. 805 */ 806int 807vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 808 vm_offset_t *addr, /* IN/OUT */ 809 vm_size_t length, boolean_t find_space, vm_prot_t prot, 810 vm_prot_t max, int cow) 811{ 812 vm_offset_t start; 813 int result, s = 0; 814 815 GIANT_REQUIRED; 816 817 start = *addr; 818 819 if (map == kmem_map) 820 s = splvm(); 821 822 vm_map_lock(map); 823 if (find_space) { 824 if (vm_map_findspace(map, start, length, addr)) { 825 vm_map_unlock(map); 826 if (map == kmem_map) 827 splx(s); 828 return (KERN_NO_SPACE); 829 } 830 start = *addr; 831 } 832 result = vm_map_insert(map, object, offset, 833 start, start + length, prot, max, cow); 834 vm_map_unlock(map); 835 836 if (map == kmem_map) 837 splx(s); 838 839 return (result); 840} 841 842/* 843 * vm_map_simplify_entry: 844 * 845 * Simplify the given map entry by merging with either neighbor. This 846 * routine also has the ability to merge with both neighbors. 847 * 848 * The map must be locked. 849 * 850 * This routine guarentees that the passed entry remains valid (though 851 * possibly extended). When merging, this routine may delete one or 852 * both neighbors. 853 */ 854void 855vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 856{ 857 vm_map_entry_t next, prev; 858 vm_size_t prevsize, esize; 859 860 GIANT_REQUIRED; 861 862 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 863 return; 864 865 prev = entry->prev; 866 if (prev != &map->header) { 867 prevsize = prev->end - prev->start; 868 if ( (prev->end == entry->start) && 869 (prev->object.vm_object == entry->object.vm_object) && 870 (!prev->object.vm_object || 871 (prev->offset + prevsize == entry->offset)) && 872 (prev->eflags == entry->eflags) && 873 (prev->protection == entry->protection) && 874 (prev->max_protection == entry->max_protection) && 875 (prev->inheritance == entry->inheritance) && 876 (prev->wired_count == entry->wired_count)) { 877 if (map->first_free == prev) 878 map->first_free = entry; 879 if (map->hint == prev) 880 map->hint = entry; 881 vm_map_entry_unlink(map, prev); 882 entry->start = prev->start; 883 entry->offset = prev->offset; 884 if (prev->object.vm_object) 885 vm_object_deallocate(prev->object.vm_object); 886 vm_map_entry_dispose(map, prev); 887 } 888 } 889 890 next = entry->next; 891 if (next != &map->header) { 892 esize = entry->end - entry->start; 893 if ((entry->end == next->start) && 894 (next->object.vm_object == entry->object.vm_object) && 895 (!entry->object.vm_object || 896 (entry->offset + esize == next->offset)) && 897 (next->eflags == entry->eflags) && 898 (next->protection == entry->protection) && 899 (next->max_protection == entry->max_protection) && 900 (next->inheritance == entry->inheritance) && 901 (next->wired_count == entry->wired_count)) { 902 if (map->first_free == next) 903 map->first_free = entry; 904 if (map->hint == next) 905 map->hint = entry; 906 vm_map_entry_unlink(map, next); 907 entry->end = next->end; 908 if (next->object.vm_object) 909 vm_object_deallocate(next->object.vm_object); 910 vm_map_entry_dispose(map, next); 911 } 912 } 913} 914/* 915 * vm_map_clip_start: [ internal use only ] 916 * 917 * Asserts that the given entry begins at or after 918 * the specified address; if necessary, 919 * it splits the entry into two. 920 */ 921#define vm_map_clip_start(map, entry, startaddr) \ 922{ \ 923 if (startaddr > entry->start) \ 924 _vm_map_clip_start(map, entry, startaddr); \ 925} 926 927/* 928 * This routine is called only when it is known that 929 * the entry must be split. 930 */ 931static void 932_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 933{ 934 vm_map_entry_t new_entry; 935 936 /* 937 * Split off the front portion -- note that we must insert the new 938 * entry BEFORE this one, so that this entry has the specified 939 * starting address. 940 */ 941 vm_map_simplify_entry(map, entry); 942 943 /* 944 * If there is no object backing this entry, we might as well create 945 * one now. If we defer it, an object can get created after the map 946 * is clipped, and individual objects will be created for the split-up 947 * map. This is a bit of a hack, but is also about the best place to 948 * put this improvement. 949 */ 950 if (entry->object.vm_object == NULL && !map->system_map) { 951 vm_object_t object; 952 object = vm_object_allocate(OBJT_DEFAULT, 953 atop(entry->end - entry->start)); 954 entry->object.vm_object = object; 955 entry->offset = 0; 956 } 957 958 new_entry = vm_map_entry_create(map); 959 *new_entry = *entry; 960 961 new_entry->end = start; 962 entry->offset += (start - entry->start); 963 entry->start = start; 964 965 vm_map_entry_link(map, entry->prev, new_entry); 966 967 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 968 vm_object_reference(new_entry->object.vm_object); 969 } 970} 971 972/* 973 * vm_map_clip_end: [ internal use only ] 974 * 975 * Asserts that the given entry ends at or before 976 * the specified address; if necessary, 977 * it splits the entry into two. 978 */ 979#define vm_map_clip_end(map, entry, endaddr) \ 980{ \ 981 if (endaddr < entry->end) \ 982 _vm_map_clip_end(map, entry, endaddr); \ 983} 984 985/* 986 * This routine is called only when it is known that 987 * the entry must be split. 988 */ 989static void 990_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 991{ 992 vm_map_entry_t new_entry; 993 994 /* 995 * If there is no object backing this entry, we might as well create 996 * one now. If we defer it, an object can get created after the map 997 * is clipped, and individual objects will be created for the split-up 998 * map. This is a bit of a hack, but is also about the best place to 999 * put this improvement. 1000 */ 1001 if (entry->object.vm_object == NULL && !map->system_map) { 1002 vm_object_t object; 1003 object = vm_object_allocate(OBJT_DEFAULT, 1004 atop(entry->end - entry->start)); 1005 entry->object.vm_object = object; 1006 entry->offset = 0; 1007 } 1008 1009 /* 1010 * Create a new entry and insert it AFTER the specified entry 1011 */ 1012 new_entry = vm_map_entry_create(map); 1013 *new_entry = *entry; 1014 1015 new_entry->start = entry->end = end; 1016 new_entry->offset += (end - entry->start); 1017 1018 vm_map_entry_link(map, entry, new_entry); 1019 1020 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1021 vm_object_reference(new_entry->object.vm_object); 1022 } 1023} 1024 1025/* 1026 * VM_MAP_RANGE_CHECK: [ internal use only ] 1027 * 1028 * Asserts that the starting and ending region 1029 * addresses fall within the valid range of the map. 1030 */ 1031#define VM_MAP_RANGE_CHECK(map, start, end) \ 1032 { \ 1033 if (start < vm_map_min(map)) \ 1034 start = vm_map_min(map); \ 1035 if (end > vm_map_max(map)) \ 1036 end = vm_map_max(map); \ 1037 if (start > end) \ 1038 start = end; \ 1039 } 1040 1041/* 1042 * vm_map_submap: [ kernel use only ] 1043 * 1044 * Mark the given range as handled by a subordinate map. 1045 * 1046 * This range must have been created with vm_map_find, 1047 * and no other operations may have been performed on this 1048 * range prior to calling vm_map_submap. 1049 * 1050 * Only a limited number of operations can be performed 1051 * within this rage after calling vm_map_submap: 1052 * vm_fault 1053 * [Don't try vm_map_copy!] 1054 * 1055 * To remove a submapping, one must first remove the 1056 * range from the superior map, and then destroy the 1057 * submap (if desired). [Better yet, don't try it.] 1058 */ 1059int 1060vm_map_submap( 1061 vm_map_t map, 1062 vm_offset_t start, 1063 vm_offset_t end, 1064 vm_map_t submap) 1065{ 1066 vm_map_entry_t entry; 1067 int result = KERN_INVALID_ARGUMENT; 1068 1069 GIANT_REQUIRED; 1070 1071 vm_map_lock(map); 1072 1073 VM_MAP_RANGE_CHECK(map, start, end); 1074 1075 if (vm_map_lookup_entry(map, start, &entry)) { 1076 vm_map_clip_start(map, entry, start); 1077 } else 1078 entry = entry->next; 1079 1080 vm_map_clip_end(map, entry, end); 1081 1082 if ((entry->start == start) && (entry->end == end) && 1083 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1084 (entry->object.vm_object == NULL)) { 1085 entry->object.sub_map = submap; 1086 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1087 result = KERN_SUCCESS; 1088 } 1089 vm_map_unlock(map); 1090 1091 return (result); 1092} 1093 1094/* 1095 * vm_map_protect: 1096 * 1097 * Sets the protection of the specified address 1098 * region in the target map. If "set_max" is 1099 * specified, the maximum protection is to be set; 1100 * otherwise, only the current protection is affected. 1101 */ 1102int 1103vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1104 vm_prot_t new_prot, boolean_t set_max) 1105{ 1106 vm_map_entry_t current; 1107 vm_map_entry_t entry; 1108 1109 GIANT_REQUIRED; 1110 vm_map_lock(map); 1111 1112 VM_MAP_RANGE_CHECK(map, start, end); 1113 1114 if (vm_map_lookup_entry(map, start, &entry)) { 1115 vm_map_clip_start(map, entry, start); 1116 } else { 1117 entry = entry->next; 1118 } 1119 1120 /* 1121 * Make a first pass to check for protection violations. 1122 */ 1123 current = entry; 1124 while ((current != &map->header) && (current->start < end)) { 1125 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1126 vm_map_unlock(map); 1127 return (KERN_INVALID_ARGUMENT); 1128 } 1129 if ((new_prot & current->max_protection) != new_prot) { 1130 vm_map_unlock(map); 1131 return (KERN_PROTECTION_FAILURE); 1132 } 1133 current = current->next; 1134 } 1135 1136 /* 1137 * Go back and fix up protections. [Note that clipping is not 1138 * necessary the second time.] 1139 */ 1140 current = entry; 1141 while ((current != &map->header) && (current->start < end)) { 1142 vm_prot_t old_prot; 1143 1144 vm_map_clip_end(map, current, end); 1145 1146 old_prot = current->protection; 1147 if (set_max) 1148 current->protection = 1149 (current->max_protection = new_prot) & 1150 old_prot; 1151 else 1152 current->protection = new_prot; 1153 1154 /* 1155 * Update physical map if necessary. Worry about copy-on-write 1156 * here -- CHECK THIS XXX 1157 */ 1158 if (current->protection != old_prot) { 1159#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1160 VM_PROT_ALL) 1161 pmap_protect(map->pmap, current->start, 1162 current->end, 1163 current->protection & MASK(current)); 1164#undef MASK 1165 } 1166 vm_map_simplify_entry(map, current); 1167 current = current->next; 1168 } 1169 vm_map_unlock(map); 1170 return (KERN_SUCCESS); 1171} 1172 1173/* 1174 * vm_map_madvise: 1175 * 1176 * This routine traverses a processes map handling the madvise 1177 * system call. Advisories are classified as either those effecting 1178 * the vm_map_entry structure, or those effecting the underlying 1179 * objects. 1180 */ 1181int 1182vm_map_madvise( 1183 vm_map_t map, 1184 vm_offset_t start, 1185 vm_offset_t end, 1186 int behav) 1187{ 1188 vm_map_entry_t current, entry; 1189 int modify_map = 0; 1190 1191 GIANT_REQUIRED; 1192 1193 /* 1194 * Some madvise calls directly modify the vm_map_entry, in which case 1195 * we need to use an exclusive lock on the map and we need to perform 1196 * various clipping operations. Otherwise we only need a read-lock 1197 * on the map. 1198 */ 1199 switch(behav) { 1200 case MADV_NORMAL: 1201 case MADV_SEQUENTIAL: 1202 case MADV_RANDOM: 1203 case MADV_NOSYNC: 1204 case MADV_AUTOSYNC: 1205 case MADV_NOCORE: 1206 case MADV_CORE: 1207 modify_map = 1; 1208 vm_map_lock(map); 1209 break; 1210 case MADV_WILLNEED: 1211 case MADV_DONTNEED: 1212 case MADV_FREE: 1213 vm_map_lock_read(map); 1214 break; 1215 default: 1216 return (KERN_INVALID_ARGUMENT); 1217 } 1218 1219 /* 1220 * Locate starting entry and clip if necessary. 1221 */ 1222 VM_MAP_RANGE_CHECK(map, start, end); 1223 1224 if (vm_map_lookup_entry(map, start, &entry)) { 1225 if (modify_map) 1226 vm_map_clip_start(map, entry, start); 1227 } else { 1228 entry = entry->next; 1229 } 1230 1231 if (modify_map) { 1232 /* 1233 * madvise behaviors that are implemented in the vm_map_entry. 1234 * 1235 * We clip the vm_map_entry so that behavioral changes are 1236 * limited to the specified address range. 1237 */ 1238 for (current = entry; 1239 (current != &map->header) && (current->start < end); 1240 current = current->next 1241 ) { 1242 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1243 continue; 1244 1245 vm_map_clip_end(map, current, end); 1246 1247 switch (behav) { 1248 case MADV_NORMAL: 1249 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1250 break; 1251 case MADV_SEQUENTIAL: 1252 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1253 break; 1254 case MADV_RANDOM: 1255 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1256 break; 1257 case MADV_NOSYNC: 1258 current->eflags |= MAP_ENTRY_NOSYNC; 1259 break; 1260 case MADV_AUTOSYNC: 1261 current->eflags &= ~MAP_ENTRY_NOSYNC; 1262 break; 1263 case MADV_NOCORE: 1264 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1265 break; 1266 case MADV_CORE: 1267 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1268 break; 1269 default: 1270 break; 1271 } 1272 vm_map_simplify_entry(map, current); 1273 } 1274 vm_map_unlock(map); 1275 } else { 1276 vm_pindex_t pindex; 1277 int count; 1278 1279 /* 1280 * madvise behaviors that are implemented in the underlying 1281 * vm_object. 1282 * 1283 * Since we don't clip the vm_map_entry, we have to clip 1284 * the vm_object pindex and count. 1285 */ 1286 for (current = entry; 1287 (current != &map->header) && (current->start < end); 1288 current = current->next 1289 ) { 1290 vm_offset_t useStart; 1291 1292 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1293 continue; 1294 1295 pindex = OFF_TO_IDX(current->offset); 1296 count = atop(current->end - current->start); 1297 useStart = current->start; 1298 1299 if (current->start < start) { 1300 pindex += atop(start - current->start); 1301 count -= atop(start - current->start); 1302 useStart = start; 1303 } 1304 if (current->end > end) 1305 count -= atop(current->end - end); 1306 1307 if (count <= 0) 1308 continue; 1309 1310 vm_object_madvise(current->object.vm_object, 1311 pindex, count, behav); 1312 if (behav == MADV_WILLNEED) { 1313 pmap_object_init_pt( 1314 map->pmap, 1315 useStart, 1316 current->object.vm_object, 1317 pindex, 1318 (count << PAGE_SHIFT), 1319 MAP_PREFAULT_MADVISE 1320 ); 1321 } 1322 } 1323 vm_map_unlock_read(map); 1324 } 1325 return (0); 1326} 1327 1328 1329/* 1330 * vm_map_inherit: 1331 * 1332 * Sets the inheritance of the specified address 1333 * range in the target map. Inheritance 1334 * affects how the map will be shared with 1335 * child maps at the time of vm_map_fork. 1336 */ 1337int 1338vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1339 vm_inherit_t new_inheritance) 1340{ 1341 vm_map_entry_t entry; 1342 vm_map_entry_t temp_entry; 1343 1344 GIANT_REQUIRED; 1345 1346 switch (new_inheritance) { 1347 case VM_INHERIT_NONE: 1348 case VM_INHERIT_COPY: 1349 case VM_INHERIT_SHARE: 1350 break; 1351 default: 1352 return (KERN_INVALID_ARGUMENT); 1353 } 1354 1355 vm_map_lock(map); 1356 1357 VM_MAP_RANGE_CHECK(map, start, end); 1358 1359 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1360 entry = temp_entry; 1361 vm_map_clip_start(map, entry, start); 1362 } else 1363 entry = temp_entry->next; 1364 1365 while ((entry != &map->header) && (entry->start < end)) { 1366 vm_map_clip_end(map, entry, end); 1367 1368 entry->inheritance = new_inheritance; 1369 1370 vm_map_simplify_entry(map, entry); 1371 1372 entry = entry->next; 1373 } 1374 1375 vm_map_unlock(map); 1376 return (KERN_SUCCESS); 1377} 1378 1379/* 1380 * Implement the semantics of mlock 1381 */ 1382int 1383vm_map_user_pageable( 1384 vm_map_t map, 1385 vm_offset_t start, 1386 vm_offset_t end, 1387 boolean_t new_pageable) 1388{ 1389 vm_map_entry_t entry; 1390 vm_map_entry_t start_entry; 1391 vm_offset_t estart; 1392 vm_offset_t eend; 1393 int rv; 1394 1395 vm_map_lock(map); 1396 VM_MAP_RANGE_CHECK(map, start, end); 1397 1398 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1399 vm_map_unlock(map); 1400 return (KERN_INVALID_ADDRESS); 1401 } 1402 1403 if (new_pageable) { 1404 1405 entry = start_entry; 1406 vm_map_clip_start(map, entry, start); 1407 1408 /* 1409 * Now decrement the wiring count for each region. If a region 1410 * becomes completely unwired, unwire its physical pages and 1411 * mappings. 1412 */ 1413 while ((entry != &map->header) && (entry->start < end)) { 1414 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1415 vm_map_clip_end(map, entry, end); 1416 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1417 entry->wired_count--; 1418 if (entry->wired_count == 0) 1419 vm_fault_unwire(map, entry->start, entry->end); 1420 } 1421 vm_map_simplify_entry(map,entry); 1422 entry = entry->next; 1423 } 1424 } else { 1425 1426 entry = start_entry; 1427 1428 while ((entry != &map->header) && (entry->start < end)) { 1429 1430 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1431 entry = entry->next; 1432 continue; 1433 } 1434 1435 if (entry->wired_count != 0) { 1436 entry->wired_count++; 1437 entry->eflags |= MAP_ENTRY_USER_WIRED; 1438 entry = entry->next; 1439 continue; 1440 } 1441 1442 /* Here on entry being newly wired */ 1443 1444 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1445 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1446 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1447 1448 vm_object_shadow(&entry->object.vm_object, 1449 &entry->offset, 1450 atop(entry->end - entry->start)); 1451 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1452 1453 } else if (entry->object.vm_object == NULL && 1454 !map->system_map) { 1455 1456 entry->object.vm_object = 1457 vm_object_allocate(OBJT_DEFAULT, 1458 atop(entry->end - entry->start)); 1459 entry->offset = (vm_offset_t) 0; 1460 1461 } 1462 } 1463 1464 vm_map_clip_start(map, entry, start); 1465 vm_map_clip_end(map, entry, end); 1466 1467 entry->wired_count++; 1468 entry->eflags |= MAP_ENTRY_USER_WIRED; 1469 estart = entry->start; 1470 eend = entry->end; 1471 1472 /* First we need to allow map modifications */
| 429} 430 431/* 432 * vm_map_entry_dispose: [ internal use only ] 433 * 434 * Inverse of vm_map_entry_create. 435 */ 436static void 437vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 438{ 439 zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry); 440} 441 442/* 443 * vm_map_entry_create: [ internal use only ] 444 * 445 * Allocates a VM map entry for insertion. 446 * No entry fields are filled in. 447 */ 448static vm_map_entry_t 449vm_map_entry_create(vm_map_t map) 450{ 451 vm_map_entry_t new_entry; 452 453 new_entry = zalloc((map->system_map || !mapentzone) ? 454 kmapentzone : mapentzone); 455 if (new_entry == NULL) 456 panic("vm_map_entry_create: kernel resources exhausted"); 457 return (new_entry); 458} 459 460/* 461 * vm_map_entry_{un,}link: 462 * 463 * Insert/remove entries from maps. 464 */ 465static __inline void 466vm_map_entry_link(vm_map_t map, 467 vm_map_entry_t after_where, 468 vm_map_entry_t entry) 469{ 470 471 CTR4(KTR_VM, 472 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 473 map->nentries, entry, after_where); 474 map->nentries++; 475 entry->prev = after_where; 476 entry->next = after_where->next; 477 entry->next->prev = entry; 478 after_where->next = entry; 479} 480 481static __inline void 482vm_map_entry_unlink(vm_map_t map, 483 vm_map_entry_t entry) 484{ 485 vm_map_entry_t prev = entry->prev; 486 vm_map_entry_t next = entry->next; 487 488 next->prev = prev; 489 prev->next = next; 490 map->nentries--; 491 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 492 map->nentries, entry); 493} 494 495/* 496 * SAVE_HINT: 497 * 498 * Saves the specified entry as the hint for 499 * future lookups. 500 */ 501#define SAVE_HINT(map,value) \ 502 (map)->hint = (value); 503 504/* 505 * vm_map_lookup_entry: [ internal use only ] 506 * 507 * Finds the map entry containing (or 508 * immediately preceding) the specified address 509 * in the given map; the entry is returned 510 * in the "entry" parameter. The boolean 511 * result indicates whether the address is 512 * actually contained in the map. 513 */ 514boolean_t 515vm_map_lookup_entry( 516 vm_map_t map, 517 vm_offset_t address, 518 vm_map_entry_t *entry) /* OUT */ 519{ 520 vm_map_entry_t cur; 521 vm_map_entry_t last; 522 523 GIANT_REQUIRED; 524 /* 525 * Start looking either from the head of the list, or from the hint. 526 */ 527 cur = map->hint; 528 529 if (cur == &map->header) 530 cur = cur->next; 531 532 if (address >= cur->start) { 533 /* 534 * Go from hint to end of list. 535 * 536 * But first, make a quick check to see if we are already looking 537 * at the entry we want (which is usually the case). Note also 538 * that we don't need to save the hint here... it is the same 539 * hint (unless we are at the header, in which case the hint 540 * didn't buy us anything anyway). 541 */ 542 last = &map->header; 543 if ((cur != last) && (cur->end > address)) { 544 *entry = cur; 545 return (TRUE); 546 } 547 } else { 548 /* 549 * Go from start to hint, *inclusively* 550 */ 551 last = cur->next; 552 cur = map->header.next; 553 } 554 555 /* 556 * Search linearly 557 */ 558 while (cur != last) { 559 if (cur->end > address) { 560 if (address >= cur->start) { 561 /* 562 * Save this lookup for future hints, and 563 * return 564 */ 565 *entry = cur; 566 SAVE_HINT(map, cur); 567 return (TRUE); 568 } 569 break; 570 } 571 cur = cur->next; 572 } 573 *entry = cur->prev; 574 SAVE_HINT(map, *entry); 575 return (FALSE); 576} 577 578/* 579 * vm_map_insert: 580 * 581 * Inserts the given whole VM object into the target 582 * map at the specified address range. The object's 583 * size should match that of the address range. 584 * 585 * Requires that the map be locked, and leaves it so. 586 * 587 * If object is non-NULL, ref count must be bumped by caller 588 * prior to making call to account for the new entry. 589 */ 590int 591vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 592 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 593 int cow) 594{ 595 vm_map_entry_t new_entry; 596 vm_map_entry_t prev_entry; 597 vm_map_entry_t temp_entry; 598 vm_eflags_t protoeflags; 599 600 GIANT_REQUIRED; 601 602 /* 603 * Check that the start and end points are not bogus. 604 */ 605 if ((start < map->min_offset) || (end > map->max_offset) || 606 (start >= end)) 607 return (KERN_INVALID_ADDRESS); 608 609 /* 610 * Find the entry prior to the proposed starting address; if it's part 611 * of an existing entry, this range is bogus. 612 */ 613 if (vm_map_lookup_entry(map, start, &temp_entry)) 614 return (KERN_NO_SPACE); 615 616 prev_entry = temp_entry; 617 618 /* 619 * Assert that the next entry doesn't overlap the end point. 620 */ 621 if ((prev_entry->next != &map->header) && 622 (prev_entry->next->start < end)) 623 return (KERN_NO_SPACE); 624 625 protoeflags = 0; 626 627 if (cow & MAP_COPY_ON_WRITE) 628 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 629 630 if (cow & MAP_NOFAULT) { 631 protoeflags |= MAP_ENTRY_NOFAULT; 632 633 KASSERT(object == NULL, 634 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 635 } 636 if (cow & MAP_DISABLE_SYNCER) 637 protoeflags |= MAP_ENTRY_NOSYNC; 638 if (cow & MAP_DISABLE_COREDUMP) 639 protoeflags |= MAP_ENTRY_NOCOREDUMP; 640 641 if (object) { 642 /* 643 * When object is non-NULL, it could be shared with another 644 * process. We have to set or clear OBJ_ONEMAPPING 645 * appropriately. 646 */ 647 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 648 vm_object_clear_flag(object, OBJ_ONEMAPPING); 649 } 650 } 651 else if ((prev_entry != &map->header) && 652 (prev_entry->eflags == protoeflags) && 653 (prev_entry->end == start) && 654 (prev_entry->wired_count == 0) && 655 ((prev_entry->object.vm_object == NULL) || 656 vm_object_coalesce(prev_entry->object.vm_object, 657 OFF_TO_IDX(prev_entry->offset), 658 (vm_size_t)(prev_entry->end - prev_entry->start), 659 (vm_size_t)(end - prev_entry->end)))) { 660 /* 661 * We were able to extend the object. Determine if we 662 * can extend the previous map entry to include the 663 * new range as well. 664 */ 665 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 666 (prev_entry->protection == prot) && 667 (prev_entry->max_protection == max)) { 668 map->size += (end - prev_entry->end); 669 prev_entry->end = end; 670 vm_map_simplify_entry(map, prev_entry); 671 return (KERN_SUCCESS); 672 } 673 674 /* 675 * If we can extend the object but cannot extend the 676 * map entry, we have to create a new map entry. We 677 * must bump the ref count on the extended object to 678 * account for it. object may be NULL. 679 */ 680 object = prev_entry->object.vm_object; 681 offset = prev_entry->offset + 682 (prev_entry->end - prev_entry->start); 683 vm_object_reference(object); 684 } 685 686 /* 687 * NOTE: if conditionals fail, object can be NULL here. This occurs 688 * in things like the buffer map where we manage kva but do not manage 689 * backing objects. 690 */ 691 692 /* 693 * Create a new entry 694 */ 695 new_entry = vm_map_entry_create(map); 696 new_entry->start = start; 697 new_entry->end = end; 698 699 new_entry->eflags = protoeflags; 700 new_entry->object.vm_object = object; 701 new_entry->offset = offset; 702 new_entry->avail_ssize = 0; 703 704 new_entry->inheritance = VM_INHERIT_DEFAULT; 705 new_entry->protection = prot; 706 new_entry->max_protection = max; 707 new_entry->wired_count = 0; 708 709 /* 710 * Insert the new entry into the list 711 */ 712 vm_map_entry_link(map, prev_entry, new_entry); 713 map->size += new_entry->end - new_entry->start; 714 715 /* 716 * Update the free space hint 717 */ 718 if ((map->first_free == prev_entry) && 719 (prev_entry->end >= new_entry->start)) { 720 map->first_free = new_entry; 721 } 722 723#if 0 724 /* 725 * Temporarily removed to avoid MAP_STACK panic, due to 726 * MAP_STACK being a huge hack. Will be added back in 727 * when MAP_STACK (and the user stack mapping) is fixed. 728 */ 729 /* 730 * It may be possible to simplify the entry 731 */ 732 vm_map_simplify_entry(map, new_entry); 733#endif 734 735 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 736 pmap_object_init_pt(map->pmap, start, 737 object, OFF_TO_IDX(offset), end - start, 738 cow & MAP_PREFAULT_PARTIAL); 739 } 740 741 return (KERN_SUCCESS); 742} 743 744/* 745 * Find sufficient space for `length' bytes in the given map, starting at 746 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 747 */ 748int 749vm_map_findspace( 750 vm_map_t map, 751 vm_offset_t start, 752 vm_size_t length, 753 vm_offset_t *addr) 754{ 755 vm_map_entry_t entry, next; 756 vm_offset_t end; 757 758 GIANT_REQUIRED; 759 if (start < map->min_offset) 760 start = map->min_offset; 761 if (start > map->max_offset) 762 return (1); 763 764 /* 765 * Look for the first possible address; if there's already something 766 * at this address, we have to start after it. 767 */ 768 if (start == map->min_offset) { 769 if ((entry = map->first_free) != &map->header) 770 start = entry->end; 771 } else { 772 vm_map_entry_t tmp; 773 774 if (vm_map_lookup_entry(map, start, &tmp)) 775 start = tmp->end; 776 entry = tmp; 777 } 778 779 /* 780 * Look through the rest of the map, trying to fit a new region in the 781 * gap between existing regions, or after the very last region. 782 */ 783 for (;; start = (entry = next)->end) { 784 /* 785 * Find the end of the proposed new region. Be sure we didn't 786 * go beyond the end of the map, or wrap around the address; 787 * if so, we lose. Otherwise, if this is the last entry, or 788 * if the proposed new region fits before the next entry, we 789 * win. 790 */ 791 end = start + length; 792 if (end > map->max_offset || end < start) 793 return (1); 794 next = entry->next; 795 if (next == &map->header || next->start >= end) 796 break; 797 } 798 SAVE_HINT(map, entry); 799 *addr = start; 800 if (map == kernel_map) { 801 vm_offset_t ksize; 802 if ((ksize = round_page(start + length)) > kernel_vm_end) { 803 pmap_growkernel(ksize); 804 } 805 } 806 return (0); 807} 808 809/* 810 * vm_map_find finds an unallocated region in the target address 811 * map with the given length. The search is defined to be 812 * first-fit from the specified address; the region found is 813 * returned in the same parameter. 814 * 815 * If object is non-NULL, ref count must be bumped by caller 816 * prior to making call to account for the new entry. 817 */ 818int 819vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 820 vm_offset_t *addr, /* IN/OUT */ 821 vm_size_t length, boolean_t find_space, vm_prot_t prot, 822 vm_prot_t max, int cow) 823{ 824 vm_offset_t start; 825 int result, s = 0; 826 827 GIANT_REQUIRED; 828 829 start = *addr; 830 831 if (map == kmem_map) 832 s = splvm(); 833 834 vm_map_lock(map); 835 if (find_space) { 836 if (vm_map_findspace(map, start, length, addr)) { 837 vm_map_unlock(map); 838 if (map == kmem_map) 839 splx(s); 840 return (KERN_NO_SPACE); 841 } 842 start = *addr; 843 } 844 result = vm_map_insert(map, object, offset, 845 start, start + length, prot, max, cow); 846 vm_map_unlock(map); 847 848 if (map == kmem_map) 849 splx(s); 850 851 return (result); 852} 853 854/* 855 * vm_map_simplify_entry: 856 * 857 * Simplify the given map entry by merging with either neighbor. This 858 * routine also has the ability to merge with both neighbors. 859 * 860 * The map must be locked. 861 * 862 * This routine guarentees that the passed entry remains valid (though 863 * possibly extended). When merging, this routine may delete one or 864 * both neighbors. 865 */ 866void 867vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 868{ 869 vm_map_entry_t next, prev; 870 vm_size_t prevsize, esize; 871 872 GIANT_REQUIRED; 873 874 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 875 return; 876 877 prev = entry->prev; 878 if (prev != &map->header) { 879 prevsize = prev->end - prev->start; 880 if ( (prev->end == entry->start) && 881 (prev->object.vm_object == entry->object.vm_object) && 882 (!prev->object.vm_object || 883 (prev->offset + prevsize == entry->offset)) && 884 (prev->eflags == entry->eflags) && 885 (prev->protection == entry->protection) && 886 (prev->max_protection == entry->max_protection) && 887 (prev->inheritance == entry->inheritance) && 888 (prev->wired_count == entry->wired_count)) { 889 if (map->first_free == prev) 890 map->first_free = entry; 891 if (map->hint == prev) 892 map->hint = entry; 893 vm_map_entry_unlink(map, prev); 894 entry->start = prev->start; 895 entry->offset = prev->offset; 896 if (prev->object.vm_object) 897 vm_object_deallocate(prev->object.vm_object); 898 vm_map_entry_dispose(map, prev); 899 } 900 } 901 902 next = entry->next; 903 if (next != &map->header) { 904 esize = entry->end - entry->start; 905 if ((entry->end == next->start) && 906 (next->object.vm_object == entry->object.vm_object) && 907 (!entry->object.vm_object || 908 (entry->offset + esize == next->offset)) && 909 (next->eflags == entry->eflags) && 910 (next->protection == entry->protection) && 911 (next->max_protection == entry->max_protection) && 912 (next->inheritance == entry->inheritance) && 913 (next->wired_count == entry->wired_count)) { 914 if (map->first_free == next) 915 map->first_free = entry; 916 if (map->hint == next) 917 map->hint = entry; 918 vm_map_entry_unlink(map, next); 919 entry->end = next->end; 920 if (next->object.vm_object) 921 vm_object_deallocate(next->object.vm_object); 922 vm_map_entry_dispose(map, next); 923 } 924 } 925} 926/* 927 * vm_map_clip_start: [ internal use only ] 928 * 929 * Asserts that the given entry begins at or after 930 * the specified address; if necessary, 931 * it splits the entry into two. 932 */ 933#define vm_map_clip_start(map, entry, startaddr) \ 934{ \ 935 if (startaddr > entry->start) \ 936 _vm_map_clip_start(map, entry, startaddr); \ 937} 938 939/* 940 * This routine is called only when it is known that 941 * the entry must be split. 942 */ 943static void 944_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 945{ 946 vm_map_entry_t new_entry; 947 948 /* 949 * Split off the front portion -- note that we must insert the new 950 * entry BEFORE this one, so that this entry has the specified 951 * starting address. 952 */ 953 vm_map_simplify_entry(map, entry); 954 955 /* 956 * If there is no object backing this entry, we might as well create 957 * one now. If we defer it, an object can get created after the map 958 * is clipped, and individual objects will be created for the split-up 959 * map. This is a bit of a hack, but is also about the best place to 960 * put this improvement. 961 */ 962 if (entry->object.vm_object == NULL && !map->system_map) { 963 vm_object_t object; 964 object = vm_object_allocate(OBJT_DEFAULT, 965 atop(entry->end - entry->start)); 966 entry->object.vm_object = object; 967 entry->offset = 0; 968 } 969 970 new_entry = vm_map_entry_create(map); 971 *new_entry = *entry; 972 973 new_entry->end = start; 974 entry->offset += (start - entry->start); 975 entry->start = start; 976 977 vm_map_entry_link(map, entry->prev, new_entry); 978 979 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 980 vm_object_reference(new_entry->object.vm_object); 981 } 982} 983 984/* 985 * vm_map_clip_end: [ internal use only ] 986 * 987 * Asserts that the given entry ends at or before 988 * the specified address; if necessary, 989 * it splits the entry into two. 990 */ 991#define vm_map_clip_end(map, entry, endaddr) \ 992{ \ 993 if (endaddr < entry->end) \ 994 _vm_map_clip_end(map, entry, endaddr); \ 995} 996 997/* 998 * This routine is called only when it is known that 999 * the entry must be split. 1000 */ 1001static void 1002_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1003{ 1004 vm_map_entry_t new_entry; 1005 1006 /* 1007 * If there is no object backing this entry, we might as well create 1008 * one now. If we defer it, an object can get created after the map 1009 * is clipped, and individual objects will be created for the split-up 1010 * map. This is a bit of a hack, but is also about the best place to 1011 * put this improvement. 1012 */ 1013 if (entry->object.vm_object == NULL && !map->system_map) { 1014 vm_object_t object; 1015 object = vm_object_allocate(OBJT_DEFAULT, 1016 atop(entry->end - entry->start)); 1017 entry->object.vm_object = object; 1018 entry->offset = 0; 1019 } 1020 1021 /* 1022 * Create a new entry and insert it AFTER the specified entry 1023 */ 1024 new_entry = vm_map_entry_create(map); 1025 *new_entry = *entry; 1026 1027 new_entry->start = entry->end = end; 1028 new_entry->offset += (end - entry->start); 1029 1030 vm_map_entry_link(map, entry, new_entry); 1031 1032 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1033 vm_object_reference(new_entry->object.vm_object); 1034 } 1035} 1036 1037/* 1038 * VM_MAP_RANGE_CHECK: [ internal use only ] 1039 * 1040 * Asserts that the starting and ending region 1041 * addresses fall within the valid range of the map. 1042 */ 1043#define VM_MAP_RANGE_CHECK(map, start, end) \ 1044 { \ 1045 if (start < vm_map_min(map)) \ 1046 start = vm_map_min(map); \ 1047 if (end > vm_map_max(map)) \ 1048 end = vm_map_max(map); \ 1049 if (start > end) \ 1050 start = end; \ 1051 } 1052 1053/* 1054 * vm_map_submap: [ kernel use only ] 1055 * 1056 * Mark the given range as handled by a subordinate map. 1057 * 1058 * This range must have been created with vm_map_find, 1059 * and no other operations may have been performed on this 1060 * range prior to calling vm_map_submap. 1061 * 1062 * Only a limited number of operations can be performed 1063 * within this rage after calling vm_map_submap: 1064 * vm_fault 1065 * [Don't try vm_map_copy!] 1066 * 1067 * To remove a submapping, one must first remove the 1068 * range from the superior map, and then destroy the 1069 * submap (if desired). [Better yet, don't try it.] 1070 */ 1071int 1072vm_map_submap( 1073 vm_map_t map, 1074 vm_offset_t start, 1075 vm_offset_t end, 1076 vm_map_t submap) 1077{ 1078 vm_map_entry_t entry; 1079 int result = KERN_INVALID_ARGUMENT; 1080 1081 GIANT_REQUIRED; 1082 1083 vm_map_lock(map); 1084 1085 VM_MAP_RANGE_CHECK(map, start, end); 1086 1087 if (vm_map_lookup_entry(map, start, &entry)) { 1088 vm_map_clip_start(map, entry, start); 1089 } else 1090 entry = entry->next; 1091 1092 vm_map_clip_end(map, entry, end); 1093 1094 if ((entry->start == start) && (entry->end == end) && 1095 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1096 (entry->object.vm_object == NULL)) { 1097 entry->object.sub_map = submap; 1098 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1099 result = KERN_SUCCESS; 1100 } 1101 vm_map_unlock(map); 1102 1103 return (result); 1104} 1105 1106/* 1107 * vm_map_protect: 1108 * 1109 * Sets the protection of the specified address 1110 * region in the target map. If "set_max" is 1111 * specified, the maximum protection is to be set; 1112 * otherwise, only the current protection is affected. 1113 */ 1114int 1115vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1116 vm_prot_t new_prot, boolean_t set_max) 1117{ 1118 vm_map_entry_t current; 1119 vm_map_entry_t entry; 1120 1121 GIANT_REQUIRED; 1122 vm_map_lock(map); 1123 1124 VM_MAP_RANGE_CHECK(map, start, end); 1125 1126 if (vm_map_lookup_entry(map, start, &entry)) { 1127 vm_map_clip_start(map, entry, start); 1128 } else { 1129 entry = entry->next; 1130 } 1131 1132 /* 1133 * Make a first pass to check for protection violations. 1134 */ 1135 current = entry; 1136 while ((current != &map->header) && (current->start < end)) { 1137 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1138 vm_map_unlock(map); 1139 return (KERN_INVALID_ARGUMENT); 1140 } 1141 if ((new_prot & current->max_protection) != new_prot) { 1142 vm_map_unlock(map); 1143 return (KERN_PROTECTION_FAILURE); 1144 } 1145 current = current->next; 1146 } 1147 1148 /* 1149 * Go back and fix up protections. [Note that clipping is not 1150 * necessary the second time.] 1151 */ 1152 current = entry; 1153 while ((current != &map->header) && (current->start < end)) { 1154 vm_prot_t old_prot; 1155 1156 vm_map_clip_end(map, current, end); 1157 1158 old_prot = current->protection; 1159 if (set_max) 1160 current->protection = 1161 (current->max_protection = new_prot) & 1162 old_prot; 1163 else 1164 current->protection = new_prot; 1165 1166 /* 1167 * Update physical map if necessary. Worry about copy-on-write 1168 * here -- CHECK THIS XXX 1169 */ 1170 if (current->protection != old_prot) { 1171#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1172 VM_PROT_ALL) 1173 pmap_protect(map->pmap, current->start, 1174 current->end, 1175 current->protection & MASK(current)); 1176#undef MASK 1177 } 1178 vm_map_simplify_entry(map, current); 1179 current = current->next; 1180 } 1181 vm_map_unlock(map); 1182 return (KERN_SUCCESS); 1183} 1184 1185/* 1186 * vm_map_madvise: 1187 * 1188 * This routine traverses a processes map handling the madvise 1189 * system call. Advisories are classified as either those effecting 1190 * the vm_map_entry structure, or those effecting the underlying 1191 * objects. 1192 */ 1193int 1194vm_map_madvise( 1195 vm_map_t map, 1196 vm_offset_t start, 1197 vm_offset_t end, 1198 int behav) 1199{ 1200 vm_map_entry_t current, entry; 1201 int modify_map = 0; 1202 1203 GIANT_REQUIRED; 1204 1205 /* 1206 * Some madvise calls directly modify the vm_map_entry, in which case 1207 * we need to use an exclusive lock on the map and we need to perform 1208 * various clipping operations. Otherwise we only need a read-lock 1209 * on the map. 1210 */ 1211 switch(behav) { 1212 case MADV_NORMAL: 1213 case MADV_SEQUENTIAL: 1214 case MADV_RANDOM: 1215 case MADV_NOSYNC: 1216 case MADV_AUTOSYNC: 1217 case MADV_NOCORE: 1218 case MADV_CORE: 1219 modify_map = 1; 1220 vm_map_lock(map); 1221 break; 1222 case MADV_WILLNEED: 1223 case MADV_DONTNEED: 1224 case MADV_FREE: 1225 vm_map_lock_read(map); 1226 break; 1227 default: 1228 return (KERN_INVALID_ARGUMENT); 1229 } 1230 1231 /* 1232 * Locate starting entry and clip if necessary. 1233 */ 1234 VM_MAP_RANGE_CHECK(map, start, end); 1235 1236 if (vm_map_lookup_entry(map, start, &entry)) { 1237 if (modify_map) 1238 vm_map_clip_start(map, entry, start); 1239 } else { 1240 entry = entry->next; 1241 } 1242 1243 if (modify_map) { 1244 /* 1245 * madvise behaviors that are implemented in the vm_map_entry. 1246 * 1247 * We clip the vm_map_entry so that behavioral changes are 1248 * limited to the specified address range. 1249 */ 1250 for (current = entry; 1251 (current != &map->header) && (current->start < end); 1252 current = current->next 1253 ) { 1254 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1255 continue; 1256 1257 vm_map_clip_end(map, current, end); 1258 1259 switch (behav) { 1260 case MADV_NORMAL: 1261 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1262 break; 1263 case MADV_SEQUENTIAL: 1264 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1265 break; 1266 case MADV_RANDOM: 1267 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1268 break; 1269 case MADV_NOSYNC: 1270 current->eflags |= MAP_ENTRY_NOSYNC; 1271 break; 1272 case MADV_AUTOSYNC: 1273 current->eflags &= ~MAP_ENTRY_NOSYNC; 1274 break; 1275 case MADV_NOCORE: 1276 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1277 break; 1278 case MADV_CORE: 1279 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1280 break; 1281 default: 1282 break; 1283 } 1284 vm_map_simplify_entry(map, current); 1285 } 1286 vm_map_unlock(map); 1287 } else { 1288 vm_pindex_t pindex; 1289 int count; 1290 1291 /* 1292 * madvise behaviors that are implemented in the underlying 1293 * vm_object. 1294 * 1295 * Since we don't clip the vm_map_entry, we have to clip 1296 * the vm_object pindex and count. 1297 */ 1298 for (current = entry; 1299 (current != &map->header) && (current->start < end); 1300 current = current->next 1301 ) { 1302 vm_offset_t useStart; 1303 1304 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1305 continue; 1306 1307 pindex = OFF_TO_IDX(current->offset); 1308 count = atop(current->end - current->start); 1309 useStart = current->start; 1310 1311 if (current->start < start) { 1312 pindex += atop(start - current->start); 1313 count -= atop(start - current->start); 1314 useStart = start; 1315 } 1316 if (current->end > end) 1317 count -= atop(current->end - end); 1318 1319 if (count <= 0) 1320 continue; 1321 1322 vm_object_madvise(current->object.vm_object, 1323 pindex, count, behav); 1324 if (behav == MADV_WILLNEED) { 1325 pmap_object_init_pt( 1326 map->pmap, 1327 useStart, 1328 current->object.vm_object, 1329 pindex, 1330 (count << PAGE_SHIFT), 1331 MAP_PREFAULT_MADVISE 1332 ); 1333 } 1334 } 1335 vm_map_unlock_read(map); 1336 } 1337 return (0); 1338} 1339 1340 1341/* 1342 * vm_map_inherit: 1343 * 1344 * Sets the inheritance of the specified address 1345 * range in the target map. Inheritance 1346 * affects how the map will be shared with 1347 * child maps at the time of vm_map_fork. 1348 */ 1349int 1350vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1351 vm_inherit_t new_inheritance) 1352{ 1353 vm_map_entry_t entry; 1354 vm_map_entry_t temp_entry; 1355 1356 GIANT_REQUIRED; 1357 1358 switch (new_inheritance) { 1359 case VM_INHERIT_NONE: 1360 case VM_INHERIT_COPY: 1361 case VM_INHERIT_SHARE: 1362 break; 1363 default: 1364 return (KERN_INVALID_ARGUMENT); 1365 } 1366 1367 vm_map_lock(map); 1368 1369 VM_MAP_RANGE_CHECK(map, start, end); 1370 1371 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1372 entry = temp_entry; 1373 vm_map_clip_start(map, entry, start); 1374 } else 1375 entry = temp_entry->next; 1376 1377 while ((entry != &map->header) && (entry->start < end)) { 1378 vm_map_clip_end(map, entry, end); 1379 1380 entry->inheritance = new_inheritance; 1381 1382 vm_map_simplify_entry(map, entry); 1383 1384 entry = entry->next; 1385 } 1386 1387 vm_map_unlock(map); 1388 return (KERN_SUCCESS); 1389} 1390 1391/* 1392 * Implement the semantics of mlock 1393 */ 1394int 1395vm_map_user_pageable( 1396 vm_map_t map, 1397 vm_offset_t start, 1398 vm_offset_t end, 1399 boolean_t new_pageable) 1400{ 1401 vm_map_entry_t entry; 1402 vm_map_entry_t start_entry; 1403 vm_offset_t estart; 1404 vm_offset_t eend; 1405 int rv; 1406 1407 vm_map_lock(map); 1408 VM_MAP_RANGE_CHECK(map, start, end); 1409 1410 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1411 vm_map_unlock(map); 1412 return (KERN_INVALID_ADDRESS); 1413 } 1414 1415 if (new_pageable) { 1416 1417 entry = start_entry; 1418 vm_map_clip_start(map, entry, start); 1419 1420 /* 1421 * Now decrement the wiring count for each region. If a region 1422 * becomes completely unwired, unwire its physical pages and 1423 * mappings. 1424 */ 1425 while ((entry != &map->header) && (entry->start < end)) { 1426 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1427 vm_map_clip_end(map, entry, end); 1428 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1429 entry->wired_count--; 1430 if (entry->wired_count == 0) 1431 vm_fault_unwire(map, entry->start, entry->end); 1432 } 1433 vm_map_simplify_entry(map,entry); 1434 entry = entry->next; 1435 } 1436 } else { 1437 1438 entry = start_entry; 1439 1440 while ((entry != &map->header) && (entry->start < end)) { 1441 1442 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1443 entry = entry->next; 1444 continue; 1445 } 1446 1447 if (entry->wired_count != 0) { 1448 entry->wired_count++; 1449 entry->eflags |= MAP_ENTRY_USER_WIRED; 1450 entry = entry->next; 1451 continue; 1452 } 1453 1454 /* Here on entry being newly wired */ 1455 1456 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1457 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1458 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1459 1460 vm_object_shadow(&entry->object.vm_object, 1461 &entry->offset, 1462 atop(entry->end - entry->start)); 1463 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1464 1465 } else if (entry->object.vm_object == NULL && 1466 !map->system_map) { 1467 1468 entry->object.vm_object = 1469 vm_object_allocate(OBJT_DEFAULT, 1470 atop(entry->end - entry->start)); 1471 entry->offset = (vm_offset_t) 0; 1472 1473 } 1474 } 1475 1476 vm_map_clip_start(map, entry, start); 1477 vm_map_clip_end(map, entry, end); 1478 1479 entry->wired_count++; 1480 entry->eflags |= MAP_ENTRY_USER_WIRED; 1481 estart = entry->start; 1482 eend = entry->end; 1483 1484 /* First we need to allow map modifications */
|
| 1485 vm_map_set_recursive(map); 1486 vm_map_lock_downgrade(map);
|
1473 map->timestamp++; 1474 1475 rv = vm_fault_user_wire(map, entry->start, entry->end); 1476 if (rv) {
| 1487 map->timestamp++; 1488 1489 rv = vm_fault_user_wire(map, entry->start, entry->end); 1490 if (rv) {
|
1477 vm_map_lock(map);
| 1491
|
1478 entry->wired_count--; 1479 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
| 1492 entry->wired_count--; 1493 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
|
| 1494 1495 vm_map_clear_recursive(map);
|
1480 vm_map_unlock(map); 1481 1482 /* 1483 * At this point, the map is unlocked, and 1484 * entry might no longer be valid. Use copy 1485 * of entry start value obtained while entry 1486 * was valid. 1487 */ 1488 (void) vm_map_user_pageable(map, start, estart, 1489 TRUE); 1490 return rv; 1491 } 1492
| 1496 vm_map_unlock(map); 1497 1498 /* 1499 * At this point, the map is unlocked, and 1500 * entry might no longer be valid. Use copy 1501 * of entry start value obtained while entry 1502 * was valid. 1503 */ 1504 (void) vm_map_user_pageable(map, start, estart, 1505 TRUE); 1506 return rv; 1507 } 1508
|
1493 /* 1494 * XXX- This is only okay because we have the 1495 * Giant lock. If the VM system were to be 1496 * reentrant, we'd know that we really can't 1497 * do this. Still, this behavior is no worse 1498 * than the old recursion... 1499 */ 1500 if (vm_map_try_lock(map)) {
| 1509 vm_map_clear_recursive(map); 1510 if (vm_map_lock_upgrade(map)) {
|
1501 vm_map_lock(map); 1502 if (vm_map_lookup_entry(map, estart, &entry) 1503 == FALSE) { 1504 vm_map_unlock(map); 1505 /* 1506 * vm_fault_user_wire succeded, thus 1507 * the area between start and eend 1508 * is wired and has to be unwired 1509 * here as part of the cleanup. 1510 */ 1511 (void) vm_map_user_pageable(map, 1512 start, 1513 eend, 1514 TRUE); 1515 return (KERN_INVALID_ADDRESS); 1516 } 1517 } 1518 vm_map_simplify_entry(map,entry); 1519 } 1520 } 1521 map->timestamp++; 1522 vm_map_unlock(map); 1523 return KERN_SUCCESS; 1524} 1525 1526/* 1527 * vm_map_pageable: 1528 * 1529 * Sets the pageability of the specified address 1530 * range in the target map. Regions specified 1531 * as not pageable require locked-down physical 1532 * memory and physical page maps. 1533 * 1534 * The map must not be locked, but a reference 1535 * must remain to the map throughout the call. 1536 */ 1537int 1538vm_map_pageable( 1539 vm_map_t map, 1540 vm_offset_t start, 1541 vm_offset_t end, 1542 boolean_t new_pageable) 1543{ 1544 vm_map_entry_t entry; 1545 vm_map_entry_t start_entry; 1546 vm_offset_t failed = 0; 1547 int rv; 1548 1549 GIANT_REQUIRED; 1550 1551 vm_map_lock(map); 1552 1553 VM_MAP_RANGE_CHECK(map, start, end); 1554 1555 /* 1556 * Only one pageability change may take place at one time, since 1557 * vm_fault assumes it will be called only once for each 1558 * wiring/unwiring. Therefore, we have to make sure we're actually 1559 * changing the pageability for the entire region. We do so before 1560 * making any changes. 1561 */ 1562 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1563 vm_map_unlock(map); 1564 return (KERN_INVALID_ADDRESS); 1565 } 1566 entry = start_entry; 1567 1568 /* 1569 * Actions are rather different for wiring and unwiring, so we have 1570 * two separate cases. 1571 */ 1572 if (new_pageable) { 1573 vm_map_clip_start(map, entry, start); 1574 1575 /* 1576 * Unwiring. First ensure that the range to be unwired is 1577 * really wired down and that there are no holes. 1578 */ 1579 while ((entry != &map->header) && (entry->start < end)) { 1580 if (entry->wired_count == 0 || 1581 (entry->end < end && 1582 (entry->next == &map->header || 1583 entry->next->start > entry->end))) { 1584 vm_map_unlock(map); 1585 return (KERN_INVALID_ARGUMENT); 1586 } 1587 entry = entry->next; 1588 } 1589 1590 /* 1591 * Now decrement the wiring count for each region. If a region 1592 * becomes completely unwired, unwire its physical pages and 1593 * mappings. 1594 */ 1595 entry = start_entry; 1596 while ((entry != &map->header) && (entry->start < end)) { 1597 vm_map_clip_end(map, entry, end); 1598 1599 entry->wired_count--; 1600 if (entry->wired_count == 0) 1601 vm_fault_unwire(map, entry->start, entry->end); 1602 1603 vm_map_simplify_entry(map, entry); 1604 1605 entry = entry->next; 1606 } 1607 } else { 1608 /* 1609 * Wiring. We must do this in two passes: 1610 * 1611 * 1. Holding the write lock, we create any shadow or zero-fill 1612 * objects that need to be created. Then we clip each map 1613 * entry to the region to be wired and increment its wiring 1614 * count. We create objects before clipping the map entries 1615 * to avoid object proliferation. 1616 * 1617 * 2. We downgrade to a read lock, and call vm_fault_wire to 1618 * fault in the pages for any newly wired area (wired_count is 1619 * 1). 1620 * 1621 * Downgrading to a read lock for vm_fault_wire avoids a possible 1622 * deadlock with another process that may have faulted on one 1623 * of the pages to be wired (it would mark the page busy, 1624 * blocking us, then in turn block on the map lock that we 1625 * hold). Because of problems in the recursive lock package, 1626 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1627 * any actions that require the write lock must be done 1628 * beforehand. Because we keep the read lock on the map, the 1629 * copy-on-write status of the entries we modify here cannot 1630 * change. 1631 */ 1632 1633 /* 1634 * Pass 1. 1635 */ 1636 while ((entry != &map->header) && (entry->start < end)) { 1637 if (entry->wired_count == 0) { 1638 1639 /* 1640 * Perform actions of vm_map_lookup that need 1641 * the write lock on the map: create a shadow 1642 * object for a copy-on-write region, or an 1643 * object for a zero-fill region. 1644 * 1645 * We don't have to do this for entries that 1646 * point to sub maps, because we won't 1647 * hold the lock on the sub map. 1648 */ 1649 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1650 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1651 if (copyflag && 1652 ((entry->protection & VM_PROT_WRITE) != 0)) { 1653 1654 vm_object_shadow(&entry->object.vm_object, 1655 &entry->offset, 1656 atop(entry->end - entry->start)); 1657 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1658 } else if (entry->object.vm_object == NULL && 1659 !map->system_map) { 1660 entry->object.vm_object = 1661 vm_object_allocate(OBJT_DEFAULT, 1662 atop(entry->end - entry->start)); 1663 entry->offset = (vm_offset_t) 0; 1664 } 1665 } 1666 } 1667 vm_map_clip_start(map, entry, start); 1668 vm_map_clip_end(map, entry, end); 1669 entry->wired_count++; 1670 1671 /* 1672 * Check for holes 1673 */ 1674 if (entry->end < end && 1675 (entry->next == &map->header || 1676 entry->next->start > entry->end)) { 1677 /* 1678 * Found one. Object creation actions do not 1679 * need to be undone, but the wired counts 1680 * need to be restored. 1681 */ 1682 while (entry != &map->header && entry->end > start) { 1683 entry->wired_count--; 1684 entry = entry->prev; 1685 } 1686 vm_map_unlock(map); 1687 return (KERN_INVALID_ARGUMENT); 1688 } 1689 entry = entry->next; 1690 } 1691 1692 /* 1693 * Pass 2. 1694 */ 1695 1696 /* 1697 * HACK HACK HACK HACK 1698 * 1699 * If we are wiring in the kernel map or a submap of it, 1700 * unlock the map to avoid deadlocks. We trust that the 1701 * kernel is well-behaved, and therefore will not do 1702 * anything destructive to this region of the map while 1703 * we have it unlocked. We cannot trust user processes 1704 * to do the same. 1705 * 1706 * HACK HACK HACK HACK 1707 */ 1708 if (vm_map_pmap(map) == kernel_pmap) { 1709 vm_map_unlock(map); /* trust me ... */ 1710 } else { 1711 vm_map_lock_downgrade(map); 1712 } 1713 1714 rv = 0; 1715 entry = start_entry; 1716 while (entry != &map->header && entry->start < end) { 1717 /* 1718 * If vm_fault_wire fails for any page we need to undo 1719 * what has been done. We decrement the wiring count 1720 * for those pages which have not yet been wired (now) 1721 * and unwire those that have (later). 1722 * 1723 * XXX this violates the locking protocol on the map, 1724 * needs to be fixed. 1725 */ 1726 if (rv) 1727 entry->wired_count--; 1728 else if (entry->wired_count == 1) { 1729 rv = vm_fault_wire(map, entry->start, entry->end); 1730 if (rv) { 1731 failed = entry->start; 1732 entry->wired_count--; 1733 } 1734 } 1735 entry = entry->next; 1736 } 1737
| 1511 vm_map_lock(map); 1512 if (vm_map_lookup_entry(map, estart, &entry) 1513 == FALSE) { 1514 vm_map_unlock(map); 1515 /* 1516 * vm_fault_user_wire succeded, thus 1517 * the area between start and eend 1518 * is wired and has to be unwired 1519 * here as part of the cleanup. 1520 */ 1521 (void) vm_map_user_pageable(map, 1522 start, 1523 eend, 1524 TRUE); 1525 return (KERN_INVALID_ADDRESS); 1526 } 1527 } 1528 vm_map_simplify_entry(map,entry); 1529 } 1530 } 1531 map->timestamp++; 1532 vm_map_unlock(map); 1533 return KERN_SUCCESS; 1534} 1535 1536/* 1537 * vm_map_pageable: 1538 * 1539 * Sets the pageability of the specified address 1540 * range in the target map. Regions specified 1541 * as not pageable require locked-down physical 1542 * memory and physical page maps. 1543 * 1544 * The map must not be locked, but a reference 1545 * must remain to the map throughout the call. 1546 */ 1547int 1548vm_map_pageable( 1549 vm_map_t map, 1550 vm_offset_t start, 1551 vm_offset_t end, 1552 boolean_t new_pageable) 1553{ 1554 vm_map_entry_t entry; 1555 vm_map_entry_t start_entry; 1556 vm_offset_t failed = 0; 1557 int rv; 1558 1559 GIANT_REQUIRED; 1560 1561 vm_map_lock(map); 1562 1563 VM_MAP_RANGE_CHECK(map, start, end); 1564 1565 /* 1566 * Only one pageability change may take place at one time, since 1567 * vm_fault assumes it will be called only once for each 1568 * wiring/unwiring. Therefore, we have to make sure we're actually 1569 * changing the pageability for the entire region. We do so before 1570 * making any changes. 1571 */ 1572 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1573 vm_map_unlock(map); 1574 return (KERN_INVALID_ADDRESS); 1575 } 1576 entry = start_entry; 1577 1578 /* 1579 * Actions are rather different for wiring and unwiring, so we have 1580 * two separate cases. 1581 */ 1582 if (new_pageable) { 1583 vm_map_clip_start(map, entry, start); 1584 1585 /* 1586 * Unwiring. First ensure that the range to be unwired is 1587 * really wired down and that there are no holes. 1588 */ 1589 while ((entry != &map->header) && (entry->start < end)) { 1590 if (entry->wired_count == 0 || 1591 (entry->end < end && 1592 (entry->next == &map->header || 1593 entry->next->start > entry->end))) { 1594 vm_map_unlock(map); 1595 return (KERN_INVALID_ARGUMENT); 1596 } 1597 entry = entry->next; 1598 } 1599 1600 /* 1601 * Now decrement the wiring count for each region. If a region 1602 * becomes completely unwired, unwire its physical pages and 1603 * mappings. 1604 */ 1605 entry = start_entry; 1606 while ((entry != &map->header) && (entry->start < end)) { 1607 vm_map_clip_end(map, entry, end); 1608 1609 entry->wired_count--; 1610 if (entry->wired_count == 0) 1611 vm_fault_unwire(map, entry->start, entry->end); 1612 1613 vm_map_simplify_entry(map, entry); 1614 1615 entry = entry->next; 1616 } 1617 } else { 1618 /* 1619 * Wiring. We must do this in two passes: 1620 * 1621 * 1. Holding the write lock, we create any shadow or zero-fill 1622 * objects that need to be created. Then we clip each map 1623 * entry to the region to be wired and increment its wiring 1624 * count. We create objects before clipping the map entries 1625 * to avoid object proliferation. 1626 * 1627 * 2. We downgrade to a read lock, and call vm_fault_wire to 1628 * fault in the pages for any newly wired area (wired_count is 1629 * 1). 1630 * 1631 * Downgrading to a read lock for vm_fault_wire avoids a possible 1632 * deadlock with another process that may have faulted on one 1633 * of the pages to be wired (it would mark the page busy, 1634 * blocking us, then in turn block on the map lock that we 1635 * hold). Because of problems in the recursive lock package, 1636 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1637 * any actions that require the write lock must be done 1638 * beforehand. Because we keep the read lock on the map, the 1639 * copy-on-write status of the entries we modify here cannot 1640 * change. 1641 */ 1642 1643 /* 1644 * Pass 1. 1645 */ 1646 while ((entry != &map->header) && (entry->start < end)) { 1647 if (entry->wired_count == 0) { 1648 1649 /* 1650 * Perform actions of vm_map_lookup that need 1651 * the write lock on the map: create a shadow 1652 * object for a copy-on-write region, or an 1653 * object for a zero-fill region. 1654 * 1655 * We don't have to do this for entries that 1656 * point to sub maps, because we won't 1657 * hold the lock on the sub map. 1658 */ 1659 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1660 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1661 if (copyflag && 1662 ((entry->protection & VM_PROT_WRITE) != 0)) { 1663 1664 vm_object_shadow(&entry->object.vm_object, 1665 &entry->offset, 1666 atop(entry->end - entry->start)); 1667 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1668 } else if (entry->object.vm_object == NULL && 1669 !map->system_map) { 1670 entry->object.vm_object = 1671 vm_object_allocate(OBJT_DEFAULT, 1672 atop(entry->end - entry->start)); 1673 entry->offset = (vm_offset_t) 0; 1674 } 1675 } 1676 } 1677 vm_map_clip_start(map, entry, start); 1678 vm_map_clip_end(map, entry, end); 1679 entry->wired_count++; 1680 1681 /* 1682 * Check for holes 1683 */ 1684 if (entry->end < end && 1685 (entry->next == &map->header || 1686 entry->next->start > entry->end)) { 1687 /* 1688 * Found one. Object creation actions do not 1689 * need to be undone, but the wired counts 1690 * need to be restored. 1691 */ 1692 while (entry != &map->header && entry->end > start) { 1693 entry->wired_count--; 1694 entry = entry->prev; 1695 } 1696 vm_map_unlock(map); 1697 return (KERN_INVALID_ARGUMENT); 1698 } 1699 entry = entry->next; 1700 } 1701 1702 /* 1703 * Pass 2. 1704 */ 1705 1706 /* 1707 * HACK HACK HACK HACK 1708 * 1709 * If we are wiring in the kernel map or a submap of it, 1710 * unlock the map to avoid deadlocks. We trust that the 1711 * kernel is well-behaved, and therefore will not do 1712 * anything destructive to this region of the map while 1713 * we have it unlocked. We cannot trust user processes 1714 * to do the same. 1715 * 1716 * HACK HACK HACK HACK 1717 */ 1718 if (vm_map_pmap(map) == kernel_pmap) { 1719 vm_map_unlock(map); /* trust me ... */ 1720 } else { 1721 vm_map_lock_downgrade(map); 1722 } 1723 1724 rv = 0; 1725 entry = start_entry; 1726 while (entry != &map->header && entry->start < end) { 1727 /* 1728 * If vm_fault_wire fails for any page we need to undo 1729 * what has been done. We decrement the wiring count 1730 * for those pages which have not yet been wired (now) 1731 * and unwire those that have (later). 1732 * 1733 * XXX this violates the locking protocol on the map, 1734 * needs to be fixed. 1735 */ 1736 if (rv) 1737 entry->wired_count--; 1738 else if (entry->wired_count == 1) { 1739 rv = vm_fault_wire(map, entry->start, entry->end); 1740 if (rv) { 1741 failed = entry->start; 1742 entry->wired_count--; 1743 } 1744 } 1745 entry = entry->next; 1746 } 1747
|
| 1748 if (vm_map_pmap(map) == kernel_pmap) { 1749 vm_map_lock(map); 1750 }
|
1738 if (rv) {
| 1751 if (rv) {
|
1739 if (vm_map_pmap(map) != kernel_pmap) 1740 vm_map_unlock_read(map);
| 1752 vm_map_unlock(map);
|
1741 (void) vm_map_pageable(map, start, failed, TRUE); 1742 return (rv);
| 1753 (void) vm_map_pageable(map, start, failed, TRUE); 1754 return (rv);
|
1743 } else if (vm_map_pmap(map) == kernel_pmap) { 1744 vm_map_lock(map);
| |
1745 } 1746 /* 1747 * An exclusive lock on the map is needed in order to call 1748 * vm_map_simplify_entry(). If the current lock on the map 1749 * is only a shared lock, an upgrade is needed. 1750 */ 1751 if (vm_map_pmap(map) != kernel_pmap && 1752 vm_map_lock_upgrade(map)) {
| 1755 } 1756 /* 1757 * An exclusive lock on the map is needed in order to call 1758 * vm_map_simplify_entry(). If the current lock on the map 1759 * is only a shared lock, an upgrade is needed. 1760 */ 1761 if (vm_map_pmap(map) != kernel_pmap && 1762 vm_map_lock_upgrade(map)) {
|
1753 vm_map_unlock_read(map);
| |
1754 vm_map_lock(map); 1755 if (vm_map_lookup_entry(map, start, &start_entry) == 1756 FALSE) { 1757 vm_map_unlock(map); 1758 return KERN_SUCCESS; 1759 } 1760 } 1761 vm_map_simplify_entry(map, start_entry); 1762 } 1763 1764 vm_map_unlock(map); 1765 1766 return (KERN_SUCCESS); 1767} 1768 1769/* 1770 * vm_map_clean 1771 * 1772 * Push any dirty cached pages in the address range to their pager. 1773 * If syncio is TRUE, dirty pages are written synchronously. 1774 * If invalidate is TRUE, any cached pages are freed as well. 1775 * 1776 * Returns an error if any part of the specified range is not mapped. 1777 */ 1778int 1779vm_map_clean( 1780 vm_map_t map, 1781 vm_offset_t start, 1782 vm_offset_t end, 1783 boolean_t syncio, 1784 boolean_t invalidate) 1785{ 1786 vm_map_entry_t current; 1787 vm_map_entry_t entry; 1788 vm_size_t size; 1789 vm_object_t object; 1790 vm_ooffset_t offset; 1791 1792 GIANT_REQUIRED; 1793 1794 vm_map_lock_read(map); 1795 VM_MAP_RANGE_CHECK(map, start, end); 1796 if (!vm_map_lookup_entry(map, start, &entry)) { 1797 vm_map_unlock_read(map); 1798 return (KERN_INVALID_ADDRESS); 1799 } 1800 /* 1801 * Make a first pass to check for holes. 1802 */ 1803 for (current = entry; current->start < end; current = current->next) { 1804 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1805 vm_map_unlock_read(map); 1806 return (KERN_INVALID_ARGUMENT); 1807 } 1808 if (end > current->end && 1809 (current->next == &map->header || 1810 current->end != current->next->start)) { 1811 vm_map_unlock_read(map); 1812 return (KERN_INVALID_ADDRESS); 1813 } 1814 } 1815 1816 if (invalidate) 1817 pmap_remove(vm_map_pmap(map), start, end); 1818 /* 1819 * Make a second pass, cleaning/uncaching pages from the indicated 1820 * objects as we go. 1821 */ 1822 for (current = entry; current->start < end; current = current->next) { 1823 offset = current->offset + (start - current->start); 1824 size = (end <= current->end ? end : current->end) - start; 1825 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1826 vm_map_t smap; 1827 vm_map_entry_t tentry; 1828 vm_size_t tsize; 1829 1830 smap = current->object.sub_map; 1831 vm_map_lock_read(smap); 1832 (void) vm_map_lookup_entry(smap, offset, &tentry); 1833 tsize = tentry->end - offset; 1834 if (tsize < size) 1835 size = tsize; 1836 object = tentry->object.vm_object; 1837 offset = tentry->offset + (offset - tentry->start); 1838 vm_map_unlock_read(smap); 1839 } else { 1840 object = current->object.vm_object; 1841 } 1842 /* 1843 * Note that there is absolutely no sense in writing out 1844 * anonymous objects, so we track down the vnode object 1845 * to write out. 1846 * We invalidate (remove) all pages from the address space 1847 * anyway, for semantic correctness. 1848 * 1849 * note: certain anonymous maps, such as MAP_NOSYNC maps, 1850 * may start out with a NULL object. 1851 */ 1852 while (object && object->backing_object) { 1853 object = object->backing_object; 1854 offset += object->backing_object_offset; 1855 if (object->size < OFF_TO_IDX(offset + size)) 1856 size = IDX_TO_OFF(object->size) - offset; 1857 } 1858 if (object && (object->type == OBJT_VNODE) && 1859 (current->protection & VM_PROT_WRITE)) { 1860 /* 1861 * Flush pages if writing is allowed, invalidate them 1862 * if invalidation requested. Pages undergoing I/O 1863 * will be ignored by vm_object_page_remove(). 1864 * 1865 * We cannot lock the vnode and then wait for paging 1866 * to complete without deadlocking against vm_fault. 1867 * Instead we simply call vm_object_page_remove() and 1868 * allow it to block internally on a page-by-page 1869 * basis when it encounters pages undergoing async 1870 * I/O. 1871 */ 1872 int flags; 1873 1874 vm_object_reference(object); 1875 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread); 1876 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1877 flags |= invalidate ? OBJPC_INVAL : 0; 1878 vm_object_page_clean(object, 1879 OFF_TO_IDX(offset), 1880 OFF_TO_IDX(offset + size + PAGE_MASK), 1881 flags); 1882 if (invalidate) { 1883 /*vm_object_pip_wait(object, "objmcl");*/ 1884 vm_object_page_remove(object, 1885 OFF_TO_IDX(offset), 1886 OFF_TO_IDX(offset + size + PAGE_MASK), 1887 FALSE); 1888 } 1889 VOP_UNLOCK(object->handle, 0, curthread); 1890 vm_object_deallocate(object); 1891 } 1892 start += size; 1893 } 1894 1895 vm_map_unlock_read(map); 1896 return (KERN_SUCCESS); 1897} 1898 1899/* 1900 * vm_map_entry_unwire: [ internal use only ] 1901 * 1902 * Make the region specified by this entry pageable. 1903 * 1904 * The map in question should be locked. 1905 * [This is the reason for this routine's existence.] 1906 */ 1907static void 1908vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 1909{ 1910 vm_fault_unwire(map, entry->start, entry->end); 1911 entry->wired_count = 0; 1912} 1913 1914/* 1915 * vm_map_entry_delete: [ internal use only ] 1916 * 1917 * Deallocate the given entry from the target map. 1918 */ 1919static void 1920vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 1921{ 1922 vm_map_entry_unlink(map, entry); 1923 map->size -= entry->end - entry->start; 1924 1925 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1926 vm_object_deallocate(entry->object.vm_object); 1927 } 1928 1929 vm_map_entry_dispose(map, entry); 1930} 1931 1932/* 1933 * vm_map_delete: [ internal use only ] 1934 * 1935 * Deallocates the given address range from the target 1936 * map. 1937 */ 1938int 1939vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 1940{ 1941 vm_object_t object; 1942 vm_map_entry_t entry; 1943 vm_map_entry_t first_entry; 1944 1945 GIANT_REQUIRED; 1946 1947 /* 1948 * Find the start of the region, and clip it 1949 */ 1950 if (!vm_map_lookup_entry(map, start, &first_entry)) 1951 entry = first_entry->next; 1952 else { 1953 entry = first_entry; 1954 vm_map_clip_start(map, entry, start); 1955 /* 1956 * Fix the lookup hint now, rather than each time though the 1957 * loop. 1958 */ 1959 SAVE_HINT(map, entry->prev); 1960 } 1961 1962 /* 1963 * Save the free space hint 1964 */ 1965 if (entry == &map->header) { 1966 map->first_free = &map->header; 1967 } else if (map->first_free->start >= start) { 1968 map->first_free = entry->prev; 1969 } 1970 1971 /* 1972 * Step through all entries in this region 1973 */ 1974 while ((entry != &map->header) && (entry->start < end)) { 1975 vm_map_entry_t next; 1976 vm_offset_t s, e; 1977 vm_pindex_t offidxstart, offidxend, count; 1978 1979 vm_map_clip_end(map, entry, end); 1980 1981 s = entry->start; 1982 e = entry->end; 1983 next = entry->next; 1984 1985 offidxstart = OFF_TO_IDX(entry->offset); 1986 count = OFF_TO_IDX(e - s); 1987 object = entry->object.vm_object; 1988 1989 /* 1990 * Unwire before removing addresses from the pmap; otherwise, 1991 * unwiring will put the entries back in the pmap. 1992 */ 1993 if (entry->wired_count != 0) { 1994 vm_map_entry_unwire(map, entry); 1995 } 1996 1997 offidxend = offidxstart + count; 1998 1999 if ((object == kernel_object) || (object == kmem_object)) { 2000 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2001 } else { 2002 pmap_remove(map->pmap, s, e); 2003 if (object != NULL && 2004 object->ref_count != 1 && 2005 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 2006 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2007 vm_object_collapse(object); 2008 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2009 if (object->type == OBJT_SWAP) { 2010 swap_pager_freespace(object, offidxstart, count); 2011 } 2012 if (offidxend >= object->size && 2013 offidxstart < object->size) { 2014 object->size = offidxstart; 2015 } 2016 } 2017 } 2018 2019 /* 2020 * Delete the entry (which may delete the object) only after 2021 * removing all pmap entries pointing to its pages. 2022 * (Otherwise, its page frames may be reallocated, and any 2023 * modify bits will be set in the wrong object!) 2024 */ 2025 vm_map_entry_delete(map, entry); 2026 entry = next; 2027 } 2028 return (KERN_SUCCESS); 2029} 2030 2031/* 2032 * vm_map_remove: 2033 * 2034 * Remove the given address range from the target map. 2035 * This is the exported form of vm_map_delete. 2036 */ 2037int 2038vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2039{ 2040 int result, s = 0; 2041 2042 GIANT_REQUIRED; 2043 2044 if (map == kmem_map) 2045 s = splvm(); 2046 2047 vm_map_lock(map); 2048 VM_MAP_RANGE_CHECK(map, start, end); 2049 result = vm_map_delete(map, start, end); 2050 vm_map_unlock(map); 2051 2052 if (map == kmem_map) 2053 splx(s); 2054 2055 return (result); 2056} 2057 2058/* 2059 * vm_map_check_protection: 2060 * 2061 * Assert that the target map allows the specified 2062 * privilege on the entire address region given. 2063 * The entire region must be allocated. 2064 */ 2065boolean_t 2066vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2067 vm_prot_t protection) 2068{ 2069 vm_map_entry_t entry; 2070 vm_map_entry_t tmp_entry; 2071 2072 GIANT_REQUIRED; 2073 2074 vm_map_lock_read(map); 2075 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2076 vm_map_unlock_read(map); 2077 return (FALSE); 2078 } 2079 entry = tmp_entry; 2080 2081 while (start < end) { 2082 if (entry == &map->header) { 2083 vm_map_unlock_read(map); 2084 return (FALSE); 2085 } 2086 /* 2087 * No holes allowed! 2088 */ 2089 if (start < entry->start) { 2090 vm_map_unlock_read(map); 2091 return (FALSE); 2092 } 2093 /* 2094 * Check protection associated with entry. 2095 */ 2096 if ((entry->protection & protection) != protection) { 2097 vm_map_unlock_read(map); 2098 return (FALSE); 2099 } 2100 /* go to next entry */ 2101 start = entry->end; 2102 entry = entry->next; 2103 } 2104 vm_map_unlock_read(map); 2105 return (TRUE); 2106} 2107 2108/* 2109 * Split the pages in a map entry into a new object. This affords 2110 * easier removal of unused pages, and keeps object inheritance from 2111 * being a negative impact on memory usage. 2112 */ 2113static void 2114vm_map_split(vm_map_entry_t entry) 2115{ 2116 vm_page_t m; 2117 vm_object_t orig_object, new_object, source; 2118 vm_offset_t s, e; 2119 vm_pindex_t offidxstart, offidxend, idx; 2120 vm_size_t size; 2121 vm_ooffset_t offset; 2122 2123 GIANT_REQUIRED; 2124 2125 orig_object = entry->object.vm_object; 2126 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2127 return; 2128 if (orig_object->ref_count <= 1) 2129 return; 2130 2131 offset = entry->offset; 2132 s = entry->start; 2133 e = entry->end; 2134 2135 offidxstart = OFF_TO_IDX(offset); 2136 offidxend = offidxstart + OFF_TO_IDX(e - s); 2137 size = offidxend - offidxstart; 2138 2139 new_object = vm_pager_allocate(orig_object->type, 2140 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 2141 if (new_object == NULL) 2142 return; 2143 2144 source = orig_object->backing_object; 2145 if (source != NULL) { 2146 vm_object_reference(source); /* Referenced by new_object */ 2147 TAILQ_INSERT_TAIL(&source->shadow_head, 2148 new_object, shadow_list); 2149 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2150 new_object->backing_object_offset = 2151 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 2152 new_object->backing_object = source; 2153 source->shadow_count++; 2154 source->generation++; 2155 } 2156 2157 for (idx = 0; idx < size; idx++) { 2158 vm_page_t m; 2159 2160 retry: 2161 m = vm_page_lookup(orig_object, offidxstart + idx); 2162 if (m == NULL) 2163 continue; 2164 2165 /* 2166 * We must wait for pending I/O to complete before we can 2167 * rename the page. 2168 * 2169 * We do not have to VM_PROT_NONE the page as mappings should 2170 * not be changed by this operation. 2171 */ 2172 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2173 goto retry; 2174 2175 vm_page_busy(m); 2176 vm_page_rename(m, new_object, idx); 2177 /* page automatically made dirty by rename and cache handled */ 2178 vm_page_busy(m); 2179 } 2180 2181 if (orig_object->type == OBJT_SWAP) { 2182 vm_object_pip_add(orig_object, 1); 2183 /* 2184 * copy orig_object pages into new_object 2185 * and destroy unneeded pages in 2186 * shadow object. 2187 */ 2188 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2189 vm_object_pip_wakeup(orig_object); 2190 } 2191 2192 for (idx = 0; idx < size; idx++) { 2193 m = vm_page_lookup(new_object, idx); 2194 if (m) { 2195 vm_page_wakeup(m); 2196 } 2197 } 2198 2199 entry->object.vm_object = new_object; 2200 entry->offset = 0LL; 2201 vm_object_deallocate(orig_object); 2202} 2203 2204/* 2205 * vm_map_copy_entry: 2206 * 2207 * Copies the contents of the source entry to the destination 2208 * entry. The entries *must* be aligned properly. 2209 */ 2210static void 2211vm_map_copy_entry( 2212 vm_map_t src_map, 2213 vm_map_t dst_map, 2214 vm_map_entry_t src_entry, 2215 vm_map_entry_t dst_entry) 2216{ 2217 vm_object_t src_object; 2218 2219 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2220 return; 2221 2222 if (src_entry->wired_count == 0) { 2223 2224 /* 2225 * If the source entry is marked needs_copy, it is already 2226 * write-protected. 2227 */ 2228 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2229 pmap_protect(src_map->pmap, 2230 src_entry->start, 2231 src_entry->end, 2232 src_entry->protection & ~VM_PROT_WRITE); 2233 } 2234 2235 /* 2236 * Make a copy of the object. 2237 */ 2238 if ((src_object = src_entry->object.vm_object) != NULL) { 2239 2240 if ((src_object->handle == NULL) && 2241 (src_object->type == OBJT_DEFAULT || 2242 src_object->type == OBJT_SWAP)) { 2243 vm_object_collapse(src_object); 2244 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2245 vm_map_split(src_entry); 2246 src_object = src_entry->object.vm_object; 2247 } 2248 } 2249 2250 vm_object_reference(src_object); 2251 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2252 dst_entry->object.vm_object = src_object; 2253 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2254 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2255 dst_entry->offset = src_entry->offset; 2256 } else { 2257 dst_entry->object.vm_object = NULL; 2258 dst_entry->offset = 0; 2259 } 2260 2261 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2262 dst_entry->end - dst_entry->start, src_entry->start); 2263 } else { 2264 /* 2265 * Of course, wired down pages can't be set copy-on-write. 2266 * Cause wired pages to be copied into the new map by 2267 * simulating faults (the new pages are pageable) 2268 */ 2269 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2270 } 2271} 2272 2273/* 2274 * vmspace_fork: 2275 * Create a new process vmspace structure and vm_map 2276 * based on those of an existing process. The new map 2277 * is based on the old map, according to the inheritance 2278 * values on the regions in that map. 2279 * 2280 * The source map must not be locked. 2281 */ 2282struct vmspace * 2283vmspace_fork(struct vmspace *vm1) 2284{ 2285 struct vmspace *vm2; 2286 vm_map_t old_map = &vm1->vm_map; 2287 vm_map_t new_map; 2288 vm_map_entry_t old_entry; 2289 vm_map_entry_t new_entry; 2290 vm_object_t object; 2291 2292 GIANT_REQUIRED; 2293 2294 vm_map_lock(old_map); 2295 old_map->infork = 1; 2296 2297 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2298 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2299 (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy); 2300 new_map = &vm2->vm_map; /* XXX */ 2301 new_map->timestamp = 1; 2302 2303 old_entry = old_map->header.next; 2304 2305 while (old_entry != &old_map->header) { 2306 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2307 panic("vm_map_fork: encountered a submap"); 2308 2309 switch (old_entry->inheritance) { 2310 case VM_INHERIT_NONE: 2311 break; 2312 2313 case VM_INHERIT_SHARE: 2314 /* 2315 * Clone the entry, creating the shared object if necessary. 2316 */ 2317 object = old_entry->object.vm_object; 2318 if (object == NULL) { 2319 object = vm_object_allocate(OBJT_DEFAULT, 2320 atop(old_entry->end - old_entry->start)); 2321 old_entry->object.vm_object = object; 2322 old_entry->offset = (vm_offset_t) 0; 2323 } 2324 2325 /* 2326 * Add the reference before calling vm_object_shadow 2327 * to insure that a shadow object is created. 2328 */ 2329 vm_object_reference(object); 2330 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2331 vm_object_shadow(&old_entry->object.vm_object, 2332 &old_entry->offset, 2333 atop(old_entry->end - old_entry->start)); 2334 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2335 /* Transfer the second reference too. */ 2336 vm_object_reference( 2337 old_entry->object.vm_object); 2338 vm_object_deallocate(object); 2339 object = old_entry->object.vm_object; 2340 } 2341 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2342 2343 /* 2344 * Clone the entry, referencing the shared object. 2345 */ 2346 new_entry = vm_map_entry_create(new_map); 2347 *new_entry = *old_entry; 2348 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2349 new_entry->wired_count = 0; 2350 2351 /* 2352 * Insert the entry into the new map -- we know we're 2353 * inserting at the end of the new map. 2354 */ 2355 vm_map_entry_link(new_map, new_map->header.prev, 2356 new_entry); 2357 2358 /* 2359 * Update the physical map 2360 */ 2361 pmap_copy(new_map->pmap, old_map->pmap, 2362 new_entry->start, 2363 (old_entry->end - old_entry->start), 2364 old_entry->start); 2365 break; 2366 2367 case VM_INHERIT_COPY: 2368 /* 2369 * Clone the entry and link into the map. 2370 */ 2371 new_entry = vm_map_entry_create(new_map); 2372 *new_entry = *old_entry; 2373 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2374 new_entry->wired_count = 0; 2375 new_entry->object.vm_object = NULL; 2376 vm_map_entry_link(new_map, new_map->header.prev, 2377 new_entry); 2378 vm_map_copy_entry(old_map, new_map, old_entry, 2379 new_entry); 2380 break; 2381 } 2382 old_entry = old_entry->next; 2383 } 2384 2385 new_map->size = old_map->size; 2386 old_map->infork = 0; 2387 vm_map_unlock(old_map); 2388 2389 return (vm2); 2390} 2391 2392int 2393vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2394 vm_prot_t prot, vm_prot_t max, int cow) 2395{ 2396 vm_map_entry_t prev_entry; 2397 vm_map_entry_t new_stack_entry; 2398 vm_size_t init_ssize; 2399 int rv; 2400 2401 GIANT_REQUIRED; 2402 2403 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2404 return (KERN_NO_SPACE); 2405 2406 if (max_ssize < sgrowsiz) 2407 init_ssize = max_ssize; 2408 else 2409 init_ssize = sgrowsiz; 2410 2411 vm_map_lock(map); 2412 2413 /* If addr is already mapped, no go */ 2414 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2415 vm_map_unlock(map); 2416 return (KERN_NO_SPACE); 2417 } 2418 2419 /* If we can't accomodate max_ssize in the current mapping, 2420 * no go. However, we need to be aware that subsequent user 2421 * mappings might map into the space we have reserved for 2422 * stack, and currently this space is not protected. 2423 * 2424 * Hopefully we will at least detect this condition 2425 * when we try to grow the stack. 2426 */ 2427 if ((prev_entry->next != &map->header) && 2428 (prev_entry->next->start < addrbos + max_ssize)) { 2429 vm_map_unlock(map); 2430 return (KERN_NO_SPACE); 2431 } 2432 2433 /* We initially map a stack of only init_ssize. We will 2434 * grow as needed later. Since this is to be a grow 2435 * down stack, we map at the top of the range. 2436 * 2437 * Note: we would normally expect prot and max to be 2438 * VM_PROT_ALL, and cow to be 0. Possibly we should 2439 * eliminate these as input parameters, and just 2440 * pass these values here in the insert call. 2441 */ 2442 rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize, 2443 addrbos + max_ssize, prot, max, cow); 2444 2445 /* Now set the avail_ssize amount */ 2446 if (rv == KERN_SUCCESS){ 2447 if (prev_entry != &map->header) 2448 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize); 2449 new_stack_entry = prev_entry->next; 2450 if (new_stack_entry->end != addrbos + max_ssize || 2451 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2452 panic ("Bad entry start/end for new stack entry"); 2453 else 2454 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2455 } 2456 2457 vm_map_unlock(map); 2458 return (rv); 2459} 2460 2461/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2462 * desired address is already mapped, or if we successfully grow 2463 * the stack. Also returns KERN_SUCCESS if addr is outside the 2464 * stack range (this is strange, but preserves compatibility with 2465 * the grow function in vm_machdep.c). 2466 */ 2467int 2468vm_map_growstack (struct proc *p, vm_offset_t addr) 2469{ 2470 vm_map_entry_t prev_entry; 2471 vm_map_entry_t stack_entry; 2472 vm_map_entry_t new_stack_entry; 2473 struct vmspace *vm = p->p_vmspace; 2474 vm_map_t map = &vm->vm_map; 2475 vm_offset_t end; 2476 int grow_amount; 2477 int rv; 2478 int is_procstack; 2479 2480 GIANT_REQUIRED; 2481 2482Retry: 2483 vm_map_lock_read(map); 2484 2485 /* If addr is already in the entry range, no need to grow.*/ 2486 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2487 vm_map_unlock_read(map); 2488 return (KERN_SUCCESS); 2489 } 2490 2491 if ((stack_entry = prev_entry->next) == &map->header) { 2492 vm_map_unlock_read(map); 2493 return (KERN_SUCCESS); 2494 } 2495 if (prev_entry == &map->header) 2496 end = stack_entry->start - stack_entry->avail_ssize; 2497 else 2498 end = prev_entry->end; 2499 2500 /* This next test mimics the old grow function in vm_machdep.c. 2501 * It really doesn't quite make sense, but we do it anyway 2502 * for compatibility. 2503 * 2504 * If not growable stack, return success. This signals the 2505 * caller to proceed as he would normally with normal vm. 2506 */ 2507 if (stack_entry->avail_ssize < 1 || 2508 addr >= stack_entry->start || 2509 addr < stack_entry->start - stack_entry->avail_ssize) { 2510 vm_map_unlock_read(map); 2511 return (KERN_SUCCESS); 2512 } 2513 2514 /* Find the minimum grow amount */ 2515 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2516 if (grow_amount > stack_entry->avail_ssize) { 2517 vm_map_unlock_read(map); 2518 return (KERN_NO_SPACE); 2519 } 2520 2521 /* If there is no longer enough space between the entries 2522 * nogo, and adjust the available space. Note: this 2523 * should only happen if the user has mapped into the 2524 * stack area after the stack was created, and is 2525 * probably an error. 2526 * 2527 * This also effectively destroys any guard page the user 2528 * might have intended by limiting the stack size. 2529 */ 2530 if (grow_amount > stack_entry->start - end) {
| 1763 vm_map_lock(map); 1764 if (vm_map_lookup_entry(map, start, &start_entry) == 1765 FALSE) { 1766 vm_map_unlock(map); 1767 return KERN_SUCCESS; 1768 } 1769 } 1770 vm_map_simplify_entry(map, start_entry); 1771 } 1772 1773 vm_map_unlock(map); 1774 1775 return (KERN_SUCCESS); 1776} 1777 1778/* 1779 * vm_map_clean 1780 * 1781 * Push any dirty cached pages in the address range to their pager. 1782 * If syncio is TRUE, dirty pages are written synchronously. 1783 * If invalidate is TRUE, any cached pages are freed as well. 1784 * 1785 * Returns an error if any part of the specified range is not mapped. 1786 */ 1787int 1788vm_map_clean( 1789 vm_map_t map, 1790 vm_offset_t start, 1791 vm_offset_t end, 1792 boolean_t syncio, 1793 boolean_t invalidate) 1794{ 1795 vm_map_entry_t current; 1796 vm_map_entry_t entry; 1797 vm_size_t size; 1798 vm_object_t object; 1799 vm_ooffset_t offset; 1800 1801 GIANT_REQUIRED; 1802 1803 vm_map_lock_read(map); 1804 VM_MAP_RANGE_CHECK(map, start, end); 1805 if (!vm_map_lookup_entry(map, start, &entry)) { 1806 vm_map_unlock_read(map); 1807 return (KERN_INVALID_ADDRESS); 1808 } 1809 /* 1810 * Make a first pass to check for holes. 1811 */ 1812 for (current = entry; current->start < end; current = current->next) { 1813 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1814 vm_map_unlock_read(map); 1815 return (KERN_INVALID_ARGUMENT); 1816 } 1817 if (end > current->end && 1818 (current->next == &map->header || 1819 current->end != current->next->start)) { 1820 vm_map_unlock_read(map); 1821 return (KERN_INVALID_ADDRESS); 1822 } 1823 } 1824 1825 if (invalidate) 1826 pmap_remove(vm_map_pmap(map), start, end); 1827 /* 1828 * Make a second pass, cleaning/uncaching pages from the indicated 1829 * objects as we go. 1830 */ 1831 for (current = entry; current->start < end; current = current->next) { 1832 offset = current->offset + (start - current->start); 1833 size = (end <= current->end ? end : current->end) - start; 1834 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1835 vm_map_t smap; 1836 vm_map_entry_t tentry; 1837 vm_size_t tsize; 1838 1839 smap = current->object.sub_map; 1840 vm_map_lock_read(smap); 1841 (void) vm_map_lookup_entry(smap, offset, &tentry); 1842 tsize = tentry->end - offset; 1843 if (tsize < size) 1844 size = tsize; 1845 object = tentry->object.vm_object; 1846 offset = tentry->offset + (offset - tentry->start); 1847 vm_map_unlock_read(smap); 1848 } else { 1849 object = current->object.vm_object; 1850 } 1851 /* 1852 * Note that there is absolutely no sense in writing out 1853 * anonymous objects, so we track down the vnode object 1854 * to write out. 1855 * We invalidate (remove) all pages from the address space 1856 * anyway, for semantic correctness. 1857 * 1858 * note: certain anonymous maps, such as MAP_NOSYNC maps, 1859 * may start out with a NULL object. 1860 */ 1861 while (object && object->backing_object) { 1862 object = object->backing_object; 1863 offset += object->backing_object_offset; 1864 if (object->size < OFF_TO_IDX(offset + size)) 1865 size = IDX_TO_OFF(object->size) - offset; 1866 } 1867 if (object && (object->type == OBJT_VNODE) && 1868 (current->protection & VM_PROT_WRITE)) { 1869 /* 1870 * Flush pages if writing is allowed, invalidate them 1871 * if invalidation requested. Pages undergoing I/O 1872 * will be ignored by vm_object_page_remove(). 1873 * 1874 * We cannot lock the vnode and then wait for paging 1875 * to complete without deadlocking against vm_fault. 1876 * Instead we simply call vm_object_page_remove() and 1877 * allow it to block internally on a page-by-page 1878 * basis when it encounters pages undergoing async 1879 * I/O. 1880 */ 1881 int flags; 1882 1883 vm_object_reference(object); 1884 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread); 1885 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1886 flags |= invalidate ? OBJPC_INVAL : 0; 1887 vm_object_page_clean(object, 1888 OFF_TO_IDX(offset), 1889 OFF_TO_IDX(offset + size + PAGE_MASK), 1890 flags); 1891 if (invalidate) { 1892 /*vm_object_pip_wait(object, "objmcl");*/ 1893 vm_object_page_remove(object, 1894 OFF_TO_IDX(offset), 1895 OFF_TO_IDX(offset + size + PAGE_MASK), 1896 FALSE); 1897 } 1898 VOP_UNLOCK(object->handle, 0, curthread); 1899 vm_object_deallocate(object); 1900 } 1901 start += size; 1902 } 1903 1904 vm_map_unlock_read(map); 1905 return (KERN_SUCCESS); 1906} 1907 1908/* 1909 * vm_map_entry_unwire: [ internal use only ] 1910 * 1911 * Make the region specified by this entry pageable. 1912 * 1913 * The map in question should be locked. 1914 * [This is the reason for this routine's existence.] 1915 */ 1916static void 1917vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 1918{ 1919 vm_fault_unwire(map, entry->start, entry->end); 1920 entry->wired_count = 0; 1921} 1922 1923/* 1924 * vm_map_entry_delete: [ internal use only ] 1925 * 1926 * Deallocate the given entry from the target map. 1927 */ 1928static void 1929vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 1930{ 1931 vm_map_entry_unlink(map, entry); 1932 map->size -= entry->end - entry->start; 1933 1934 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1935 vm_object_deallocate(entry->object.vm_object); 1936 } 1937 1938 vm_map_entry_dispose(map, entry); 1939} 1940 1941/* 1942 * vm_map_delete: [ internal use only ] 1943 * 1944 * Deallocates the given address range from the target 1945 * map. 1946 */ 1947int 1948vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 1949{ 1950 vm_object_t object; 1951 vm_map_entry_t entry; 1952 vm_map_entry_t first_entry; 1953 1954 GIANT_REQUIRED; 1955 1956 /* 1957 * Find the start of the region, and clip it 1958 */ 1959 if (!vm_map_lookup_entry(map, start, &first_entry)) 1960 entry = first_entry->next; 1961 else { 1962 entry = first_entry; 1963 vm_map_clip_start(map, entry, start); 1964 /* 1965 * Fix the lookup hint now, rather than each time though the 1966 * loop. 1967 */ 1968 SAVE_HINT(map, entry->prev); 1969 } 1970 1971 /* 1972 * Save the free space hint 1973 */ 1974 if (entry == &map->header) { 1975 map->first_free = &map->header; 1976 } else if (map->first_free->start >= start) { 1977 map->first_free = entry->prev; 1978 } 1979 1980 /* 1981 * Step through all entries in this region 1982 */ 1983 while ((entry != &map->header) && (entry->start < end)) { 1984 vm_map_entry_t next; 1985 vm_offset_t s, e; 1986 vm_pindex_t offidxstart, offidxend, count; 1987 1988 vm_map_clip_end(map, entry, end); 1989 1990 s = entry->start; 1991 e = entry->end; 1992 next = entry->next; 1993 1994 offidxstart = OFF_TO_IDX(entry->offset); 1995 count = OFF_TO_IDX(e - s); 1996 object = entry->object.vm_object; 1997 1998 /* 1999 * Unwire before removing addresses from the pmap; otherwise, 2000 * unwiring will put the entries back in the pmap. 2001 */ 2002 if (entry->wired_count != 0) { 2003 vm_map_entry_unwire(map, entry); 2004 } 2005 2006 offidxend = offidxstart + count; 2007 2008 if ((object == kernel_object) || (object == kmem_object)) { 2009 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2010 } else { 2011 pmap_remove(map->pmap, s, e); 2012 if (object != NULL && 2013 object->ref_count != 1 && 2014 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 2015 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2016 vm_object_collapse(object); 2017 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2018 if (object->type == OBJT_SWAP) { 2019 swap_pager_freespace(object, offidxstart, count); 2020 } 2021 if (offidxend >= object->size && 2022 offidxstart < object->size) { 2023 object->size = offidxstart; 2024 } 2025 } 2026 } 2027 2028 /* 2029 * Delete the entry (which may delete the object) only after 2030 * removing all pmap entries pointing to its pages. 2031 * (Otherwise, its page frames may be reallocated, and any 2032 * modify bits will be set in the wrong object!) 2033 */ 2034 vm_map_entry_delete(map, entry); 2035 entry = next; 2036 } 2037 return (KERN_SUCCESS); 2038} 2039 2040/* 2041 * vm_map_remove: 2042 * 2043 * Remove the given address range from the target map. 2044 * This is the exported form of vm_map_delete. 2045 */ 2046int 2047vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2048{ 2049 int result, s = 0; 2050 2051 GIANT_REQUIRED; 2052 2053 if (map == kmem_map) 2054 s = splvm(); 2055 2056 vm_map_lock(map); 2057 VM_MAP_RANGE_CHECK(map, start, end); 2058 result = vm_map_delete(map, start, end); 2059 vm_map_unlock(map); 2060 2061 if (map == kmem_map) 2062 splx(s); 2063 2064 return (result); 2065} 2066 2067/* 2068 * vm_map_check_protection: 2069 * 2070 * Assert that the target map allows the specified 2071 * privilege on the entire address region given. 2072 * The entire region must be allocated. 2073 */ 2074boolean_t 2075vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2076 vm_prot_t protection) 2077{ 2078 vm_map_entry_t entry; 2079 vm_map_entry_t tmp_entry; 2080 2081 GIANT_REQUIRED; 2082 2083 vm_map_lock_read(map); 2084 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2085 vm_map_unlock_read(map); 2086 return (FALSE); 2087 } 2088 entry = tmp_entry; 2089 2090 while (start < end) { 2091 if (entry == &map->header) { 2092 vm_map_unlock_read(map); 2093 return (FALSE); 2094 } 2095 /* 2096 * No holes allowed! 2097 */ 2098 if (start < entry->start) { 2099 vm_map_unlock_read(map); 2100 return (FALSE); 2101 } 2102 /* 2103 * Check protection associated with entry. 2104 */ 2105 if ((entry->protection & protection) != protection) { 2106 vm_map_unlock_read(map); 2107 return (FALSE); 2108 } 2109 /* go to next entry */ 2110 start = entry->end; 2111 entry = entry->next; 2112 } 2113 vm_map_unlock_read(map); 2114 return (TRUE); 2115} 2116 2117/* 2118 * Split the pages in a map entry into a new object. This affords 2119 * easier removal of unused pages, and keeps object inheritance from 2120 * being a negative impact on memory usage. 2121 */ 2122static void 2123vm_map_split(vm_map_entry_t entry) 2124{ 2125 vm_page_t m; 2126 vm_object_t orig_object, new_object, source; 2127 vm_offset_t s, e; 2128 vm_pindex_t offidxstart, offidxend, idx; 2129 vm_size_t size; 2130 vm_ooffset_t offset; 2131 2132 GIANT_REQUIRED; 2133 2134 orig_object = entry->object.vm_object; 2135 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2136 return; 2137 if (orig_object->ref_count <= 1) 2138 return; 2139 2140 offset = entry->offset; 2141 s = entry->start; 2142 e = entry->end; 2143 2144 offidxstart = OFF_TO_IDX(offset); 2145 offidxend = offidxstart + OFF_TO_IDX(e - s); 2146 size = offidxend - offidxstart; 2147 2148 new_object = vm_pager_allocate(orig_object->type, 2149 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 2150 if (new_object == NULL) 2151 return; 2152 2153 source = orig_object->backing_object; 2154 if (source != NULL) { 2155 vm_object_reference(source); /* Referenced by new_object */ 2156 TAILQ_INSERT_TAIL(&source->shadow_head, 2157 new_object, shadow_list); 2158 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2159 new_object->backing_object_offset = 2160 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 2161 new_object->backing_object = source; 2162 source->shadow_count++; 2163 source->generation++; 2164 } 2165 2166 for (idx = 0; idx < size; idx++) { 2167 vm_page_t m; 2168 2169 retry: 2170 m = vm_page_lookup(orig_object, offidxstart + idx); 2171 if (m == NULL) 2172 continue; 2173 2174 /* 2175 * We must wait for pending I/O to complete before we can 2176 * rename the page. 2177 * 2178 * We do not have to VM_PROT_NONE the page as mappings should 2179 * not be changed by this operation. 2180 */ 2181 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2182 goto retry; 2183 2184 vm_page_busy(m); 2185 vm_page_rename(m, new_object, idx); 2186 /* page automatically made dirty by rename and cache handled */ 2187 vm_page_busy(m); 2188 } 2189 2190 if (orig_object->type == OBJT_SWAP) { 2191 vm_object_pip_add(orig_object, 1); 2192 /* 2193 * copy orig_object pages into new_object 2194 * and destroy unneeded pages in 2195 * shadow object. 2196 */ 2197 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2198 vm_object_pip_wakeup(orig_object); 2199 } 2200 2201 for (idx = 0; idx < size; idx++) { 2202 m = vm_page_lookup(new_object, idx); 2203 if (m) { 2204 vm_page_wakeup(m); 2205 } 2206 } 2207 2208 entry->object.vm_object = new_object; 2209 entry->offset = 0LL; 2210 vm_object_deallocate(orig_object); 2211} 2212 2213/* 2214 * vm_map_copy_entry: 2215 * 2216 * Copies the contents of the source entry to the destination 2217 * entry. The entries *must* be aligned properly. 2218 */ 2219static void 2220vm_map_copy_entry( 2221 vm_map_t src_map, 2222 vm_map_t dst_map, 2223 vm_map_entry_t src_entry, 2224 vm_map_entry_t dst_entry) 2225{ 2226 vm_object_t src_object; 2227 2228 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2229 return; 2230 2231 if (src_entry->wired_count == 0) { 2232 2233 /* 2234 * If the source entry is marked needs_copy, it is already 2235 * write-protected. 2236 */ 2237 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2238 pmap_protect(src_map->pmap, 2239 src_entry->start, 2240 src_entry->end, 2241 src_entry->protection & ~VM_PROT_WRITE); 2242 } 2243 2244 /* 2245 * Make a copy of the object. 2246 */ 2247 if ((src_object = src_entry->object.vm_object) != NULL) { 2248 2249 if ((src_object->handle == NULL) && 2250 (src_object->type == OBJT_DEFAULT || 2251 src_object->type == OBJT_SWAP)) { 2252 vm_object_collapse(src_object); 2253 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2254 vm_map_split(src_entry); 2255 src_object = src_entry->object.vm_object; 2256 } 2257 } 2258 2259 vm_object_reference(src_object); 2260 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2261 dst_entry->object.vm_object = src_object; 2262 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2263 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2264 dst_entry->offset = src_entry->offset; 2265 } else { 2266 dst_entry->object.vm_object = NULL; 2267 dst_entry->offset = 0; 2268 } 2269 2270 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2271 dst_entry->end - dst_entry->start, src_entry->start); 2272 } else { 2273 /* 2274 * Of course, wired down pages can't be set copy-on-write. 2275 * Cause wired pages to be copied into the new map by 2276 * simulating faults (the new pages are pageable) 2277 */ 2278 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2279 } 2280} 2281 2282/* 2283 * vmspace_fork: 2284 * Create a new process vmspace structure and vm_map 2285 * based on those of an existing process. The new map 2286 * is based on the old map, according to the inheritance 2287 * values on the regions in that map. 2288 * 2289 * The source map must not be locked. 2290 */ 2291struct vmspace * 2292vmspace_fork(struct vmspace *vm1) 2293{ 2294 struct vmspace *vm2; 2295 vm_map_t old_map = &vm1->vm_map; 2296 vm_map_t new_map; 2297 vm_map_entry_t old_entry; 2298 vm_map_entry_t new_entry; 2299 vm_object_t object; 2300 2301 GIANT_REQUIRED; 2302 2303 vm_map_lock(old_map); 2304 old_map->infork = 1; 2305 2306 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2307 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2308 (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy); 2309 new_map = &vm2->vm_map; /* XXX */ 2310 new_map->timestamp = 1; 2311 2312 old_entry = old_map->header.next; 2313 2314 while (old_entry != &old_map->header) { 2315 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2316 panic("vm_map_fork: encountered a submap"); 2317 2318 switch (old_entry->inheritance) { 2319 case VM_INHERIT_NONE: 2320 break; 2321 2322 case VM_INHERIT_SHARE: 2323 /* 2324 * Clone the entry, creating the shared object if necessary. 2325 */ 2326 object = old_entry->object.vm_object; 2327 if (object == NULL) { 2328 object = vm_object_allocate(OBJT_DEFAULT, 2329 atop(old_entry->end - old_entry->start)); 2330 old_entry->object.vm_object = object; 2331 old_entry->offset = (vm_offset_t) 0; 2332 } 2333 2334 /* 2335 * Add the reference before calling vm_object_shadow 2336 * to insure that a shadow object is created. 2337 */ 2338 vm_object_reference(object); 2339 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2340 vm_object_shadow(&old_entry->object.vm_object, 2341 &old_entry->offset, 2342 atop(old_entry->end - old_entry->start)); 2343 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2344 /* Transfer the second reference too. */ 2345 vm_object_reference( 2346 old_entry->object.vm_object); 2347 vm_object_deallocate(object); 2348 object = old_entry->object.vm_object; 2349 } 2350 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2351 2352 /* 2353 * Clone the entry, referencing the shared object. 2354 */ 2355 new_entry = vm_map_entry_create(new_map); 2356 *new_entry = *old_entry; 2357 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2358 new_entry->wired_count = 0; 2359 2360 /* 2361 * Insert the entry into the new map -- we know we're 2362 * inserting at the end of the new map. 2363 */ 2364 vm_map_entry_link(new_map, new_map->header.prev, 2365 new_entry); 2366 2367 /* 2368 * Update the physical map 2369 */ 2370 pmap_copy(new_map->pmap, old_map->pmap, 2371 new_entry->start, 2372 (old_entry->end - old_entry->start), 2373 old_entry->start); 2374 break; 2375 2376 case VM_INHERIT_COPY: 2377 /* 2378 * Clone the entry and link into the map. 2379 */ 2380 new_entry = vm_map_entry_create(new_map); 2381 *new_entry = *old_entry; 2382 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2383 new_entry->wired_count = 0; 2384 new_entry->object.vm_object = NULL; 2385 vm_map_entry_link(new_map, new_map->header.prev, 2386 new_entry); 2387 vm_map_copy_entry(old_map, new_map, old_entry, 2388 new_entry); 2389 break; 2390 } 2391 old_entry = old_entry->next; 2392 } 2393 2394 new_map->size = old_map->size; 2395 old_map->infork = 0; 2396 vm_map_unlock(old_map); 2397 2398 return (vm2); 2399} 2400 2401int 2402vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2403 vm_prot_t prot, vm_prot_t max, int cow) 2404{ 2405 vm_map_entry_t prev_entry; 2406 vm_map_entry_t new_stack_entry; 2407 vm_size_t init_ssize; 2408 int rv; 2409 2410 GIANT_REQUIRED; 2411 2412 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2413 return (KERN_NO_SPACE); 2414 2415 if (max_ssize < sgrowsiz) 2416 init_ssize = max_ssize; 2417 else 2418 init_ssize = sgrowsiz; 2419 2420 vm_map_lock(map); 2421 2422 /* If addr is already mapped, no go */ 2423 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2424 vm_map_unlock(map); 2425 return (KERN_NO_SPACE); 2426 } 2427 2428 /* If we can't accomodate max_ssize in the current mapping, 2429 * no go. However, we need to be aware that subsequent user 2430 * mappings might map into the space we have reserved for 2431 * stack, and currently this space is not protected. 2432 * 2433 * Hopefully we will at least detect this condition 2434 * when we try to grow the stack. 2435 */ 2436 if ((prev_entry->next != &map->header) && 2437 (prev_entry->next->start < addrbos + max_ssize)) { 2438 vm_map_unlock(map); 2439 return (KERN_NO_SPACE); 2440 } 2441 2442 /* We initially map a stack of only init_ssize. We will 2443 * grow as needed later. Since this is to be a grow 2444 * down stack, we map at the top of the range. 2445 * 2446 * Note: we would normally expect prot and max to be 2447 * VM_PROT_ALL, and cow to be 0. Possibly we should 2448 * eliminate these as input parameters, and just 2449 * pass these values here in the insert call. 2450 */ 2451 rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize, 2452 addrbos + max_ssize, prot, max, cow); 2453 2454 /* Now set the avail_ssize amount */ 2455 if (rv == KERN_SUCCESS){ 2456 if (prev_entry != &map->header) 2457 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize); 2458 new_stack_entry = prev_entry->next; 2459 if (new_stack_entry->end != addrbos + max_ssize || 2460 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2461 panic ("Bad entry start/end for new stack entry"); 2462 else 2463 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2464 } 2465 2466 vm_map_unlock(map); 2467 return (rv); 2468} 2469 2470/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2471 * desired address is already mapped, or if we successfully grow 2472 * the stack. Also returns KERN_SUCCESS if addr is outside the 2473 * stack range (this is strange, but preserves compatibility with 2474 * the grow function in vm_machdep.c). 2475 */ 2476int 2477vm_map_growstack (struct proc *p, vm_offset_t addr) 2478{ 2479 vm_map_entry_t prev_entry; 2480 vm_map_entry_t stack_entry; 2481 vm_map_entry_t new_stack_entry; 2482 struct vmspace *vm = p->p_vmspace; 2483 vm_map_t map = &vm->vm_map; 2484 vm_offset_t end; 2485 int grow_amount; 2486 int rv; 2487 int is_procstack; 2488 2489 GIANT_REQUIRED; 2490 2491Retry: 2492 vm_map_lock_read(map); 2493 2494 /* If addr is already in the entry range, no need to grow.*/ 2495 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2496 vm_map_unlock_read(map); 2497 return (KERN_SUCCESS); 2498 } 2499 2500 if ((stack_entry = prev_entry->next) == &map->header) { 2501 vm_map_unlock_read(map); 2502 return (KERN_SUCCESS); 2503 } 2504 if (prev_entry == &map->header) 2505 end = stack_entry->start - stack_entry->avail_ssize; 2506 else 2507 end = prev_entry->end; 2508 2509 /* This next test mimics the old grow function in vm_machdep.c. 2510 * It really doesn't quite make sense, but we do it anyway 2511 * for compatibility. 2512 * 2513 * If not growable stack, return success. This signals the 2514 * caller to proceed as he would normally with normal vm. 2515 */ 2516 if (stack_entry->avail_ssize < 1 || 2517 addr >= stack_entry->start || 2518 addr < stack_entry->start - stack_entry->avail_ssize) { 2519 vm_map_unlock_read(map); 2520 return (KERN_SUCCESS); 2521 } 2522 2523 /* Find the minimum grow amount */ 2524 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2525 if (grow_amount > stack_entry->avail_ssize) { 2526 vm_map_unlock_read(map); 2527 return (KERN_NO_SPACE); 2528 } 2529 2530 /* If there is no longer enough space between the entries 2531 * nogo, and adjust the available space. Note: this 2532 * should only happen if the user has mapped into the 2533 * stack area after the stack was created, and is 2534 * probably an error. 2535 * 2536 * This also effectively destroys any guard page the user 2537 * might have intended by limiting the stack size. 2538 */ 2539 if (grow_amount > stack_entry->start - end) {
|
2531 if (vm_map_lock_upgrade(map)) { 2532 vm_map_unlock_read(map);
| 2540 if (vm_map_lock_upgrade(map))
|
2533 goto Retry;
| 2541 goto Retry;
|
2534 }
| |
2535 2536 stack_entry->avail_ssize = stack_entry->start - end; 2537 2538 vm_map_unlock(map); 2539 return (KERN_NO_SPACE); 2540 } 2541 2542 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2543 2544 /* If this is the main process stack, see if we're over the 2545 * stack limit. 2546 */ 2547 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2548 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2549 vm_map_unlock_read(map); 2550 return (KERN_NO_SPACE); 2551 } 2552 2553 /* Round up the grow amount modulo SGROWSIZ */ 2554 grow_amount = roundup (grow_amount, sgrowsiz); 2555 if (grow_amount > stack_entry->avail_ssize) { 2556 grow_amount = stack_entry->avail_ssize; 2557 } 2558 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2559 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2560 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2561 ctob(vm->vm_ssize); 2562 } 2563
| 2542 2543 stack_entry->avail_ssize = stack_entry->start - end; 2544 2545 vm_map_unlock(map); 2546 return (KERN_NO_SPACE); 2547 } 2548 2549 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2550 2551 /* If this is the main process stack, see if we're over the 2552 * stack limit. 2553 */ 2554 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2555 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2556 vm_map_unlock_read(map); 2557 return (KERN_NO_SPACE); 2558 } 2559 2560 /* Round up the grow amount modulo SGROWSIZ */ 2561 grow_amount = roundup (grow_amount, sgrowsiz); 2562 if (grow_amount > stack_entry->avail_ssize) { 2563 grow_amount = stack_entry->avail_ssize; 2564 } 2565 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2566 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2567 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2568 ctob(vm->vm_ssize); 2569 } 2570
|
2564 if (vm_map_lock_upgrade(map)) { 2565 vm_map_unlock_read(map);
| 2571 if (vm_map_lock_upgrade(map))
|
2566 goto Retry;
| 2572 goto Retry;
|
2567 }
| |
2568 2569 /* Get the preliminary new entry start value */ 2570 addr = stack_entry->start - grow_amount; 2571 2572 /* If this puts us into the previous entry, cut back our growth 2573 * to the available space. Also, see the note above. 2574 */ 2575 if (addr < end) { 2576 stack_entry->avail_ssize = stack_entry->start - end; 2577 addr = end; 2578 } 2579 2580 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2581 VM_PROT_ALL, 2582 VM_PROT_ALL, 2583 0); 2584 2585 /* Adjust the available stack space by the amount we grew. */ 2586 if (rv == KERN_SUCCESS) { 2587 if (prev_entry != &map->header) 2588 vm_map_clip_end(map, prev_entry, addr); 2589 new_stack_entry = prev_entry->next; 2590 if (new_stack_entry->end != stack_entry->start || 2591 new_stack_entry->start != addr) 2592 panic ("Bad stack grow start/end in new stack entry"); 2593 else { 2594 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2595 (new_stack_entry->end - 2596 new_stack_entry->start); 2597 if (is_procstack) 2598 vm->vm_ssize += btoc(new_stack_entry->end - 2599 new_stack_entry->start); 2600 } 2601 } 2602 2603 vm_map_unlock(map); 2604 return (rv); 2605} 2606 2607/* 2608 * Unshare the specified VM space for exec. If other processes are 2609 * mapped to it, then create a new one. The new vmspace is null. 2610 */ 2611void 2612vmspace_exec(struct proc *p) 2613{ 2614 struct vmspace *oldvmspace = p->p_vmspace; 2615 struct vmspace *newvmspace; 2616 vm_map_t map = &p->p_vmspace->vm_map; 2617 2618 GIANT_REQUIRED; 2619 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2620 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2621 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2622 /* 2623 * This code is written like this for prototype purposes. The 2624 * goal is to avoid running down the vmspace here, but let the 2625 * other process's that are still using the vmspace to finally 2626 * run it down. Even though there is little or no chance of blocking 2627 * here, it is a good idea to keep this form for future mods. 2628 */ 2629 p->p_vmspace = newvmspace; 2630 pmap_pinit2(vmspace_pmap(newvmspace)); 2631 vmspace_free(oldvmspace); 2632 if (p == curthread->td_proc) /* XXXKSE ? */ 2633 pmap_activate(curthread); 2634} 2635 2636/* 2637 * Unshare the specified VM space for forcing COW. This 2638 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2639 */ 2640void 2641vmspace_unshare(struct proc *p) 2642{ 2643 struct vmspace *oldvmspace = p->p_vmspace; 2644 struct vmspace *newvmspace; 2645 2646 GIANT_REQUIRED; 2647 if (oldvmspace->vm_refcnt == 1) 2648 return; 2649 newvmspace = vmspace_fork(oldvmspace); 2650 p->p_vmspace = newvmspace; 2651 pmap_pinit2(vmspace_pmap(newvmspace)); 2652 vmspace_free(oldvmspace); 2653 if (p == curthread->td_proc) /* XXXKSE ? */ 2654 pmap_activate(curthread); 2655} 2656 2657/* 2658 * vm_map_lookup: 2659 * 2660 * Finds the VM object, offset, and 2661 * protection for a given virtual address in the 2662 * specified map, assuming a page fault of the 2663 * type specified. 2664 * 2665 * Leaves the map in question locked for read; return 2666 * values are guaranteed until a vm_map_lookup_done 2667 * call is performed. Note that the map argument 2668 * is in/out; the returned map must be used in 2669 * the call to vm_map_lookup_done. 2670 * 2671 * A handle (out_entry) is returned for use in 2672 * vm_map_lookup_done, to make that fast. 2673 * 2674 * If a lookup is requested with "write protection" 2675 * specified, the map may be changed to perform virtual 2676 * copying operations, although the data referenced will 2677 * remain the same. 2678 */ 2679int 2680vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2681 vm_offset_t vaddr, 2682 vm_prot_t fault_typea, 2683 vm_map_entry_t *out_entry, /* OUT */ 2684 vm_object_t *object, /* OUT */ 2685 vm_pindex_t *pindex, /* OUT */ 2686 vm_prot_t *out_prot, /* OUT */ 2687 boolean_t *wired) /* OUT */ 2688{ 2689 vm_map_entry_t entry; 2690 vm_map_t map = *var_map; 2691 vm_prot_t prot; 2692 vm_prot_t fault_type = fault_typea; 2693 2694 GIANT_REQUIRED; 2695RetryLookup:; 2696 /* 2697 * Lookup the faulting address. 2698 */ 2699 2700 vm_map_lock_read(map); 2701#define RETURN(why) \ 2702 { \ 2703 vm_map_unlock_read(map); \ 2704 return (why); \ 2705 } 2706 2707 /* 2708 * If the map has an interesting hint, try it before calling full 2709 * blown lookup routine. 2710 */ 2711 entry = map->hint; 2712 *out_entry = entry; 2713 if ((entry == &map->header) || 2714 (vaddr < entry->start) || (vaddr >= entry->end)) { 2715 vm_map_entry_t tmp_entry; 2716 2717 /* 2718 * Entry was either not a valid hint, or the vaddr was not 2719 * contained in the entry, so do a full lookup. 2720 */ 2721 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2722 RETURN(KERN_INVALID_ADDRESS); 2723 2724 entry = tmp_entry; 2725 *out_entry = entry; 2726 } 2727 2728 /* 2729 * Handle submaps. 2730 */ 2731 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2732 vm_map_t old_map = map; 2733 2734 *var_map = map = entry->object.sub_map; 2735 vm_map_unlock_read(old_map); 2736 goto RetryLookup; 2737 } 2738 2739 /* 2740 * Check whether this task is allowed to have this page. 2741 * Note the special case for MAP_ENTRY_COW 2742 * pages with an override. This is to implement a forced 2743 * COW for debuggers. 2744 */ 2745 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2746 prot = entry->max_protection; 2747 else 2748 prot = entry->protection; 2749 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2750 if ((fault_type & prot) != fault_type) { 2751 RETURN(KERN_PROTECTION_FAILURE); 2752 } 2753 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2754 (entry->eflags & MAP_ENTRY_COW) && 2755 (fault_type & VM_PROT_WRITE) && 2756 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2757 RETURN(KERN_PROTECTION_FAILURE); 2758 } 2759 2760 /* 2761 * If this page is not pageable, we have to get it for all possible 2762 * accesses. 2763 */ 2764 *wired = (entry->wired_count != 0); 2765 if (*wired) 2766 prot = fault_type = entry->protection; 2767 2768 /* 2769 * If the entry was copy-on-write, we either ... 2770 */ 2771 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2772 /* 2773 * If we want to write the page, we may as well handle that 2774 * now since we've got the map locked. 2775 * 2776 * If we don't need to write the page, we just demote the 2777 * permissions allowed. 2778 */ 2779 if (fault_type & VM_PROT_WRITE) { 2780 /* 2781 * Make a new object, and place it in the object 2782 * chain. Note that no new references have appeared 2783 * -- one just moved from the map to the new 2784 * object. 2785 */
| 2573 2574 /* Get the preliminary new entry start value */ 2575 addr = stack_entry->start - grow_amount; 2576 2577 /* If this puts us into the previous entry, cut back our growth 2578 * to the available space. Also, see the note above. 2579 */ 2580 if (addr < end) { 2581 stack_entry->avail_ssize = stack_entry->start - end; 2582 addr = end; 2583 } 2584 2585 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2586 VM_PROT_ALL, 2587 VM_PROT_ALL, 2588 0); 2589 2590 /* Adjust the available stack space by the amount we grew. */ 2591 if (rv == KERN_SUCCESS) { 2592 if (prev_entry != &map->header) 2593 vm_map_clip_end(map, prev_entry, addr); 2594 new_stack_entry = prev_entry->next; 2595 if (new_stack_entry->end != stack_entry->start || 2596 new_stack_entry->start != addr) 2597 panic ("Bad stack grow start/end in new stack entry"); 2598 else { 2599 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2600 (new_stack_entry->end - 2601 new_stack_entry->start); 2602 if (is_procstack) 2603 vm->vm_ssize += btoc(new_stack_entry->end - 2604 new_stack_entry->start); 2605 } 2606 } 2607 2608 vm_map_unlock(map); 2609 return (rv); 2610} 2611 2612/* 2613 * Unshare the specified VM space for exec. If other processes are 2614 * mapped to it, then create a new one. The new vmspace is null. 2615 */ 2616void 2617vmspace_exec(struct proc *p) 2618{ 2619 struct vmspace *oldvmspace = p->p_vmspace; 2620 struct vmspace *newvmspace; 2621 vm_map_t map = &p->p_vmspace->vm_map; 2622 2623 GIANT_REQUIRED; 2624 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2625 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2626 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2627 /* 2628 * This code is written like this for prototype purposes. The 2629 * goal is to avoid running down the vmspace here, but let the 2630 * other process's that are still using the vmspace to finally 2631 * run it down. Even though there is little or no chance of blocking 2632 * here, it is a good idea to keep this form for future mods. 2633 */ 2634 p->p_vmspace = newvmspace; 2635 pmap_pinit2(vmspace_pmap(newvmspace)); 2636 vmspace_free(oldvmspace); 2637 if (p == curthread->td_proc) /* XXXKSE ? */ 2638 pmap_activate(curthread); 2639} 2640 2641/* 2642 * Unshare the specified VM space for forcing COW. This 2643 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2644 */ 2645void 2646vmspace_unshare(struct proc *p) 2647{ 2648 struct vmspace *oldvmspace = p->p_vmspace; 2649 struct vmspace *newvmspace; 2650 2651 GIANT_REQUIRED; 2652 if (oldvmspace->vm_refcnt == 1) 2653 return; 2654 newvmspace = vmspace_fork(oldvmspace); 2655 p->p_vmspace = newvmspace; 2656 pmap_pinit2(vmspace_pmap(newvmspace)); 2657 vmspace_free(oldvmspace); 2658 if (p == curthread->td_proc) /* XXXKSE ? */ 2659 pmap_activate(curthread); 2660} 2661 2662/* 2663 * vm_map_lookup: 2664 * 2665 * Finds the VM object, offset, and 2666 * protection for a given virtual address in the 2667 * specified map, assuming a page fault of the 2668 * type specified. 2669 * 2670 * Leaves the map in question locked for read; return 2671 * values are guaranteed until a vm_map_lookup_done 2672 * call is performed. Note that the map argument 2673 * is in/out; the returned map must be used in 2674 * the call to vm_map_lookup_done. 2675 * 2676 * A handle (out_entry) is returned for use in 2677 * vm_map_lookup_done, to make that fast. 2678 * 2679 * If a lookup is requested with "write protection" 2680 * specified, the map may be changed to perform virtual 2681 * copying operations, although the data referenced will 2682 * remain the same. 2683 */ 2684int 2685vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2686 vm_offset_t vaddr, 2687 vm_prot_t fault_typea, 2688 vm_map_entry_t *out_entry, /* OUT */ 2689 vm_object_t *object, /* OUT */ 2690 vm_pindex_t *pindex, /* OUT */ 2691 vm_prot_t *out_prot, /* OUT */ 2692 boolean_t *wired) /* OUT */ 2693{ 2694 vm_map_entry_t entry; 2695 vm_map_t map = *var_map; 2696 vm_prot_t prot; 2697 vm_prot_t fault_type = fault_typea; 2698 2699 GIANT_REQUIRED; 2700RetryLookup:; 2701 /* 2702 * Lookup the faulting address. 2703 */ 2704 2705 vm_map_lock_read(map); 2706#define RETURN(why) \ 2707 { \ 2708 vm_map_unlock_read(map); \ 2709 return (why); \ 2710 } 2711 2712 /* 2713 * If the map has an interesting hint, try it before calling full 2714 * blown lookup routine. 2715 */ 2716 entry = map->hint; 2717 *out_entry = entry; 2718 if ((entry == &map->header) || 2719 (vaddr < entry->start) || (vaddr >= entry->end)) { 2720 vm_map_entry_t tmp_entry; 2721 2722 /* 2723 * Entry was either not a valid hint, or the vaddr was not 2724 * contained in the entry, so do a full lookup. 2725 */ 2726 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2727 RETURN(KERN_INVALID_ADDRESS); 2728 2729 entry = tmp_entry; 2730 *out_entry = entry; 2731 } 2732 2733 /* 2734 * Handle submaps. 2735 */ 2736 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2737 vm_map_t old_map = map; 2738 2739 *var_map = map = entry->object.sub_map; 2740 vm_map_unlock_read(old_map); 2741 goto RetryLookup; 2742 } 2743 2744 /* 2745 * Check whether this task is allowed to have this page. 2746 * Note the special case for MAP_ENTRY_COW 2747 * pages with an override. This is to implement a forced 2748 * COW for debuggers. 2749 */ 2750 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2751 prot = entry->max_protection; 2752 else 2753 prot = entry->protection; 2754 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2755 if ((fault_type & prot) != fault_type) { 2756 RETURN(KERN_PROTECTION_FAILURE); 2757 } 2758 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2759 (entry->eflags & MAP_ENTRY_COW) && 2760 (fault_type & VM_PROT_WRITE) && 2761 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2762 RETURN(KERN_PROTECTION_FAILURE); 2763 } 2764 2765 /* 2766 * If this page is not pageable, we have to get it for all possible 2767 * accesses. 2768 */ 2769 *wired = (entry->wired_count != 0); 2770 if (*wired) 2771 prot = fault_type = entry->protection; 2772 2773 /* 2774 * If the entry was copy-on-write, we either ... 2775 */ 2776 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2777 /* 2778 * If we want to write the page, we may as well handle that 2779 * now since we've got the map locked. 2780 * 2781 * If we don't need to write the page, we just demote the 2782 * permissions allowed. 2783 */ 2784 if (fault_type & VM_PROT_WRITE) { 2785 /* 2786 * Make a new object, and place it in the object 2787 * chain. Note that no new references have appeared 2788 * -- one just moved from the map to the new 2789 * object. 2790 */
|
2786 if (vm_map_lock_upgrade(map)) { 2787 vm_map_unlock_read(map);
| 2791 if (vm_map_lock_upgrade(map))
|
2788 goto RetryLookup;
| 2792 goto RetryLookup;
|
2789 }
| |
2790 vm_object_shadow( 2791 &entry->object.vm_object, 2792 &entry->offset, 2793 atop(entry->end - entry->start)); 2794 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2795 vm_map_lock_downgrade(map); 2796 } else { 2797 /* 2798 * We're attempting to read a copy-on-write page -- 2799 * don't allow writes. 2800 */ 2801 prot &= ~VM_PROT_WRITE; 2802 } 2803 } 2804 2805 /* 2806 * Create an object if necessary. 2807 */ 2808 if (entry->object.vm_object == NULL && 2809 !map->system_map) {
| 2793 vm_object_shadow( 2794 &entry->object.vm_object, 2795 &entry->offset, 2796 atop(entry->end - entry->start)); 2797 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2798 vm_map_lock_downgrade(map); 2799 } else { 2800 /* 2801 * We're attempting to read a copy-on-write page -- 2802 * don't allow writes. 2803 */ 2804 prot &= ~VM_PROT_WRITE; 2805 } 2806 } 2807 2808 /* 2809 * Create an object if necessary. 2810 */ 2811 if (entry->object.vm_object == NULL && 2812 !map->system_map) {
|
2810 if (vm_map_lock_upgrade(map)) { 2811 vm_map_unlock_read(map);
| 2813 if (vm_map_lock_upgrade(map))
|
2812 goto RetryLookup;
| 2814 goto RetryLookup;
|
2813 }
| |
2814 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2815 atop(entry->end - entry->start)); 2816 entry->offset = 0; 2817 vm_map_lock_downgrade(map); 2818 } 2819 2820 /* 2821 * Return the object/offset from this entry. If the entry was 2822 * copy-on-write or empty, it has been fixed up. 2823 */ 2824 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2825 *object = entry->object.vm_object; 2826 2827 /* 2828 * Return whether this is the only map sharing this data. 2829 */ 2830 *out_prot = prot; 2831 return (KERN_SUCCESS); 2832 2833#undef RETURN 2834} 2835 2836/* 2837 * vm_map_lookup_done: 2838 * 2839 * Releases locks acquired by a vm_map_lookup 2840 * (according to the handle returned by that lookup). 2841 */ 2842void 2843vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 2844{ 2845 /* 2846 * Unlock the main-level map 2847 */ 2848 GIANT_REQUIRED; 2849 vm_map_unlock_read(map); 2850} 2851 2852/* 2853 * Implement uiomove with VM operations. This handles (and collateral changes) 2854 * support every combination of source object modification, and COW type 2855 * operations. 2856 */ 2857int 2858vm_uiomove( 2859 vm_map_t mapa, 2860 vm_object_t srcobject, 2861 off_t cp, 2862 int cnta, 2863 vm_offset_t uaddra, 2864 int *npages) 2865{ 2866 vm_map_t map; 2867 vm_object_t first_object, oldobject, object; 2868 vm_map_entry_t entry; 2869 vm_prot_t prot; 2870 boolean_t wired; 2871 int tcnt, rv; 2872 vm_offset_t uaddr, start, end, tend; 2873 vm_pindex_t first_pindex, osize, oindex; 2874 off_t ooffset; 2875 int cnt; 2876 2877 GIANT_REQUIRED; 2878 2879 if (npages) 2880 *npages = 0; 2881 2882 cnt = cnta; 2883 uaddr = uaddra; 2884 2885 while (cnt > 0) { 2886 map = mapa; 2887 2888 if ((vm_map_lookup(&map, uaddr, 2889 VM_PROT_READ, &entry, &first_object, 2890 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 2891 return EFAULT; 2892 } 2893 2894 vm_map_clip_start(map, entry, uaddr); 2895 2896 tcnt = cnt; 2897 tend = uaddr + tcnt; 2898 if (tend > entry->end) { 2899 tcnt = entry->end - uaddr; 2900 tend = entry->end; 2901 } 2902 2903 vm_map_clip_end(map, entry, tend); 2904 2905 start = entry->start; 2906 end = entry->end; 2907 2908 osize = atop(tcnt); 2909 2910 oindex = OFF_TO_IDX(cp); 2911 if (npages) { 2912 vm_pindex_t idx; 2913 for (idx = 0; idx < osize; idx++) { 2914 vm_page_t m; 2915 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2916 vm_map_lookup_done(map, entry); 2917 return 0; 2918 } 2919 /* 2920 * disallow busy or invalid pages, but allow 2921 * m->busy pages if they are entirely valid. 2922 */ 2923 if ((m->flags & PG_BUSY) || 2924 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 2925 vm_map_lookup_done(map, entry); 2926 return 0; 2927 } 2928 } 2929 } 2930 2931/* 2932 * If we are changing an existing map entry, just redirect 2933 * the object, and change mappings. 2934 */ 2935 if ((first_object->type == OBJT_VNODE) && 2936 ((oldobject = entry->object.vm_object) == first_object)) { 2937 2938 if ((entry->offset != cp) || (oldobject != srcobject)) { 2939 /* 2940 * Remove old window into the file 2941 */ 2942 pmap_remove (map->pmap, uaddr, tend); 2943 2944 /* 2945 * Force copy on write for mmaped regions 2946 */ 2947 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2948 2949 /* 2950 * Point the object appropriately 2951 */ 2952 if (oldobject != srcobject) { 2953 2954 /* 2955 * Set the object optimization hint flag 2956 */ 2957 vm_object_set_flag(srcobject, OBJ_OPT); 2958 vm_object_reference(srcobject); 2959 entry->object.vm_object = srcobject; 2960 2961 if (oldobject) { 2962 vm_object_deallocate(oldobject); 2963 } 2964 } 2965 2966 entry->offset = cp; 2967 map->timestamp++; 2968 } else { 2969 pmap_remove (map->pmap, uaddr, tend); 2970 } 2971 2972 } else if ((first_object->ref_count == 1) && 2973 (first_object->size == osize) && 2974 ((first_object->type == OBJT_DEFAULT) || 2975 (first_object->type == OBJT_SWAP)) ) { 2976 2977 oldobject = first_object->backing_object; 2978 2979 if ((first_object->backing_object_offset != cp) || 2980 (oldobject != srcobject)) { 2981 /* 2982 * Remove old window into the file 2983 */ 2984 pmap_remove (map->pmap, uaddr, tend); 2985 2986 /* 2987 * Remove unneeded old pages 2988 */ 2989 vm_object_page_remove(first_object, 0, 0, 0); 2990 2991 /* 2992 * Invalidate swap space 2993 */ 2994 if (first_object->type == OBJT_SWAP) { 2995 swap_pager_freespace(first_object, 2996 0, 2997 first_object->size); 2998 } 2999 3000 /* 3001 * Force copy on write for mmaped regions 3002 */ 3003 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3004 3005 /* 3006 * Point the object appropriately 3007 */ 3008 if (oldobject != srcobject) { 3009 /* 3010 * Set the object optimization hint flag 3011 */ 3012 vm_object_set_flag(srcobject, OBJ_OPT); 3013 vm_object_reference(srcobject); 3014 3015 if (oldobject) { 3016 TAILQ_REMOVE(&oldobject->shadow_head, 3017 first_object, shadow_list); 3018 oldobject->shadow_count--; 3019 /* XXX bump generation? */ 3020 vm_object_deallocate(oldobject); 3021 } 3022 3023 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 3024 first_object, shadow_list); 3025 srcobject->shadow_count++; 3026 /* XXX bump generation? */ 3027 3028 first_object->backing_object = srcobject; 3029 } 3030 first_object->backing_object_offset = cp; 3031 map->timestamp++; 3032 } else { 3033 pmap_remove (map->pmap, uaddr, tend); 3034 } 3035/* 3036 * Otherwise, we have to do a logical mmap. 3037 */ 3038 } else { 3039 3040 vm_object_set_flag(srcobject, OBJ_OPT); 3041 vm_object_reference(srcobject); 3042 3043 pmap_remove (map->pmap, uaddr, tend); 3044 3045 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
| 2815 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2816 atop(entry->end - entry->start)); 2817 entry->offset = 0; 2818 vm_map_lock_downgrade(map); 2819 } 2820 2821 /* 2822 * Return the object/offset from this entry. If the entry was 2823 * copy-on-write or empty, it has been fixed up. 2824 */ 2825 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2826 *object = entry->object.vm_object; 2827 2828 /* 2829 * Return whether this is the only map sharing this data. 2830 */ 2831 *out_prot = prot; 2832 return (KERN_SUCCESS); 2833 2834#undef RETURN 2835} 2836 2837/* 2838 * vm_map_lookup_done: 2839 * 2840 * Releases locks acquired by a vm_map_lookup 2841 * (according to the handle returned by that lookup). 2842 */ 2843void 2844vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 2845{ 2846 /* 2847 * Unlock the main-level map 2848 */ 2849 GIANT_REQUIRED; 2850 vm_map_unlock_read(map); 2851} 2852 2853/* 2854 * Implement uiomove with VM operations. This handles (and collateral changes) 2855 * support every combination of source object modification, and COW type 2856 * operations. 2857 */ 2858int 2859vm_uiomove( 2860 vm_map_t mapa, 2861 vm_object_t srcobject, 2862 off_t cp, 2863 int cnta, 2864 vm_offset_t uaddra, 2865 int *npages) 2866{ 2867 vm_map_t map; 2868 vm_object_t first_object, oldobject, object; 2869 vm_map_entry_t entry; 2870 vm_prot_t prot; 2871 boolean_t wired; 2872 int tcnt, rv; 2873 vm_offset_t uaddr, start, end, tend; 2874 vm_pindex_t first_pindex, osize, oindex; 2875 off_t ooffset; 2876 int cnt; 2877 2878 GIANT_REQUIRED; 2879 2880 if (npages) 2881 *npages = 0; 2882 2883 cnt = cnta; 2884 uaddr = uaddra; 2885 2886 while (cnt > 0) { 2887 map = mapa; 2888 2889 if ((vm_map_lookup(&map, uaddr, 2890 VM_PROT_READ, &entry, &first_object, 2891 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 2892 return EFAULT; 2893 } 2894 2895 vm_map_clip_start(map, entry, uaddr); 2896 2897 tcnt = cnt; 2898 tend = uaddr + tcnt; 2899 if (tend > entry->end) { 2900 tcnt = entry->end - uaddr; 2901 tend = entry->end; 2902 } 2903 2904 vm_map_clip_end(map, entry, tend); 2905 2906 start = entry->start; 2907 end = entry->end; 2908 2909 osize = atop(tcnt); 2910 2911 oindex = OFF_TO_IDX(cp); 2912 if (npages) { 2913 vm_pindex_t idx; 2914 for (idx = 0; idx < osize; idx++) { 2915 vm_page_t m; 2916 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2917 vm_map_lookup_done(map, entry); 2918 return 0; 2919 } 2920 /* 2921 * disallow busy or invalid pages, but allow 2922 * m->busy pages if they are entirely valid. 2923 */ 2924 if ((m->flags & PG_BUSY) || 2925 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 2926 vm_map_lookup_done(map, entry); 2927 return 0; 2928 } 2929 } 2930 } 2931 2932/* 2933 * If we are changing an existing map entry, just redirect 2934 * the object, and change mappings. 2935 */ 2936 if ((first_object->type == OBJT_VNODE) && 2937 ((oldobject = entry->object.vm_object) == first_object)) { 2938 2939 if ((entry->offset != cp) || (oldobject != srcobject)) { 2940 /* 2941 * Remove old window into the file 2942 */ 2943 pmap_remove (map->pmap, uaddr, tend); 2944 2945 /* 2946 * Force copy on write for mmaped regions 2947 */ 2948 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2949 2950 /* 2951 * Point the object appropriately 2952 */ 2953 if (oldobject != srcobject) { 2954 2955 /* 2956 * Set the object optimization hint flag 2957 */ 2958 vm_object_set_flag(srcobject, OBJ_OPT); 2959 vm_object_reference(srcobject); 2960 entry->object.vm_object = srcobject; 2961 2962 if (oldobject) { 2963 vm_object_deallocate(oldobject); 2964 } 2965 } 2966 2967 entry->offset = cp; 2968 map->timestamp++; 2969 } else { 2970 pmap_remove (map->pmap, uaddr, tend); 2971 } 2972 2973 } else if ((first_object->ref_count == 1) && 2974 (first_object->size == osize) && 2975 ((first_object->type == OBJT_DEFAULT) || 2976 (first_object->type == OBJT_SWAP)) ) { 2977 2978 oldobject = first_object->backing_object; 2979 2980 if ((first_object->backing_object_offset != cp) || 2981 (oldobject != srcobject)) { 2982 /* 2983 * Remove old window into the file 2984 */ 2985 pmap_remove (map->pmap, uaddr, tend); 2986 2987 /* 2988 * Remove unneeded old pages 2989 */ 2990 vm_object_page_remove(first_object, 0, 0, 0); 2991 2992 /* 2993 * Invalidate swap space 2994 */ 2995 if (first_object->type == OBJT_SWAP) { 2996 swap_pager_freespace(first_object, 2997 0, 2998 first_object->size); 2999 } 3000 3001 /* 3002 * Force copy on write for mmaped regions 3003 */ 3004 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3005 3006 /* 3007 * Point the object appropriately 3008 */ 3009 if (oldobject != srcobject) { 3010 /* 3011 * Set the object optimization hint flag 3012 */ 3013 vm_object_set_flag(srcobject, OBJ_OPT); 3014 vm_object_reference(srcobject); 3015 3016 if (oldobject) { 3017 TAILQ_REMOVE(&oldobject->shadow_head, 3018 first_object, shadow_list); 3019 oldobject->shadow_count--; 3020 /* XXX bump generation? */ 3021 vm_object_deallocate(oldobject); 3022 } 3023 3024 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 3025 first_object, shadow_list); 3026 srcobject->shadow_count++; 3027 /* XXX bump generation? */ 3028 3029 first_object->backing_object = srcobject; 3030 } 3031 first_object->backing_object_offset = cp; 3032 map->timestamp++; 3033 } else { 3034 pmap_remove (map->pmap, uaddr, tend); 3035 } 3036/* 3037 * Otherwise, we have to do a logical mmap. 3038 */ 3039 } else { 3040 3041 vm_object_set_flag(srcobject, OBJ_OPT); 3042 vm_object_reference(srcobject); 3043 3044 pmap_remove (map->pmap, uaddr, tend); 3045 3046 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
3046 if (vm_map_lock_upgrade(map)) { 3047 vm_map_unlock_read(map); 3048 vm_map_lock(map); 3049 }
| 3047 vm_map_lock_upgrade(map);
|
3050 3051 if (entry == &map->header) { 3052 map->first_free = &map->header; 3053 } else if (map->first_free->start >= start) { 3054 map->first_free = entry->prev; 3055 } 3056 3057 SAVE_HINT(map, entry->prev); 3058 vm_map_entry_delete(map, entry); 3059 3060 object = srcobject; 3061 ooffset = cp; 3062 3063 rv = vm_map_insert(map, object, ooffset, start, tend, 3064 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 3065 3066 if (rv != KERN_SUCCESS) 3067 panic("vm_uiomove: could not insert new entry: %d", rv); 3068 } 3069 3070/* 3071 * Map the window directly, if it is already in memory 3072 */ 3073 pmap_object_init_pt(map->pmap, uaddr, 3074 srcobject, oindex, tcnt, 0); 3075 3076 map->timestamp++; 3077 vm_map_unlock(map); 3078 3079 cnt -= tcnt; 3080 uaddr += tcnt; 3081 cp += tcnt; 3082 if (npages) 3083 *npages += osize; 3084 } 3085 return 0; 3086} 3087 3088/* 3089 * Performs the copy_on_write operations necessary to allow the virtual copies 3090 * into user space to work. This has to be called for write(2) system calls 3091 * from other processes, file unlinking, and file size shrinkage. 3092 */ 3093void 3094vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 3095{ 3096 int rv; 3097 vm_object_t robject; 3098 vm_pindex_t idx; 3099 3100 GIANT_REQUIRED; 3101 if ((object == NULL) || 3102 ((object->flags & OBJ_OPT) == 0)) 3103 return; 3104 3105 if (object->shadow_count > object->ref_count) 3106 panic("vm_freeze_copyopts: sc > rc"); 3107 3108 while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 3109 vm_pindex_t bo_pindex; 3110 vm_page_t m_in, m_out; 3111 3112 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 3113 3114 vm_object_reference(robject); 3115 3116 vm_object_pip_wait(robject, "objfrz"); 3117 3118 if (robject->ref_count == 1) { 3119 vm_object_deallocate(robject); 3120 continue; 3121 } 3122 3123 vm_object_pip_add(robject, 1); 3124 3125 for (idx = 0; idx < robject->size; idx++) { 3126 3127 m_out = vm_page_grab(robject, idx, 3128 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3129 3130 if (m_out->valid == 0) { 3131 m_in = vm_page_grab(object, bo_pindex + idx, 3132 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3133 if (m_in->valid == 0) { 3134 rv = vm_pager_get_pages(object, &m_in, 1, 0); 3135 if (rv != VM_PAGER_OK) { 3136 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 3137 continue; 3138 } 3139 vm_page_deactivate(m_in); 3140 } 3141 3142 vm_page_protect(m_in, VM_PROT_NONE); 3143 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 3144 m_out->valid = m_in->valid; 3145 vm_page_dirty(m_out); 3146 vm_page_activate(m_out); 3147 vm_page_wakeup(m_in); 3148 } 3149 vm_page_wakeup(m_out); 3150 } 3151 3152 object->shadow_count--; 3153 object->ref_count--; 3154 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 3155 robject->backing_object = NULL; 3156 robject->backing_object_offset = 0; 3157 3158 vm_object_pip_wakeup(robject); 3159 vm_object_deallocate(robject); 3160 } 3161 3162 vm_object_clear_flag(object, OBJ_OPT); 3163} 3164 3165#include "opt_ddb.h" 3166#ifdef DDB 3167#include <sys/kernel.h> 3168 3169#include <ddb/ddb.h> 3170 3171/* 3172 * vm_map_print: [ debug ] 3173 */ 3174DB_SHOW_COMMAND(map, vm_map_print) 3175{ 3176 static int nlines; 3177 /* XXX convert args. */ 3178 vm_map_t map = (vm_map_t)addr; 3179 boolean_t full = have_addr; 3180 3181 vm_map_entry_t entry; 3182 3183 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3184 (void *)map, 3185 (void *)map->pmap, map->nentries, map->timestamp); 3186 nlines++; 3187 3188 if (!full && db_indent) 3189 return; 3190 3191 db_indent += 2; 3192 for (entry = map->header.next; entry != &map->header; 3193 entry = entry->next) { 3194 db_iprintf("map entry %p: start=%p, end=%p\n", 3195 (void *)entry, (void *)entry->start, (void *)entry->end); 3196 nlines++; 3197 { 3198 static char *inheritance_name[4] = 3199 {"share", "copy", "none", "donate_copy"}; 3200 3201 db_iprintf(" prot=%x/%x/%s", 3202 entry->protection, 3203 entry->max_protection, 3204 inheritance_name[(int)(unsigned char)entry->inheritance]); 3205 if (entry->wired_count != 0) 3206 db_printf(", wired"); 3207 } 3208 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3209 /* XXX no %qd in kernel. Truncate entry->offset. */ 3210 db_printf(", share=%p, offset=0x%lx\n", 3211 (void *)entry->object.sub_map, 3212 (long)entry->offset); 3213 nlines++; 3214 if ((entry->prev == &map->header) || 3215 (entry->prev->object.sub_map != 3216 entry->object.sub_map)) { 3217 db_indent += 2; 3218 vm_map_print((db_expr_t)(intptr_t) 3219 entry->object.sub_map, 3220 full, 0, (char *)0); 3221 db_indent -= 2; 3222 } 3223 } else { 3224 /* XXX no %qd in kernel. Truncate entry->offset. */ 3225 db_printf(", object=%p, offset=0x%lx", 3226 (void *)entry->object.vm_object, 3227 (long)entry->offset); 3228 if (entry->eflags & MAP_ENTRY_COW) 3229 db_printf(", copy (%s)", 3230 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3231 db_printf("\n"); 3232 nlines++; 3233 3234 if ((entry->prev == &map->header) || 3235 (entry->prev->object.vm_object != 3236 entry->object.vm_object)) { 3237 db_indent += 2; 3238 vm_object_print((db_expr_t)(intptr_t) 3239 entry->object.vm_object, 3240 full, 0, (char *)0); 3241 nlines += 4; 3242 db_indent -= 2; 3243 } 3244 } 3245 } 3246 db_indent -= 2; 3247 if (db_indent == 0) 3248 nlines = 0; 3249} 3250 3251 3252DB_SHOW_COMMAND(procvm, procvm) 3253{ 3254 struct proc *p; 3255 3256 if (have_addr) { 3257 p = (struct proc *) addr; 3258 } else { 3259 p = curproc; 3260 } 3261 3262 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3263 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3264 (void *)vmspace_pmap(p->p_vmspace)); 3265 3266 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3267} 3268 3269#endif /* DDB */
| 3048 3049 if (entry == &map->header) { 3050 map->first_free = &map->header; 3051 } else if (map->first_free->start >= start) { 3052 map->first_free = entry->prev; 3053 } 3054 3055 SAVE_HINT(map, entry->prev); 3056 vm_map_entry_delete(map, entry); 3057 3058 object = srcobject; 3059 ooffset = cp; 3060 3061 rv = vm_map_insert(map, object, ooffset, start, tend, 3062 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 3063 3064 if (rv != KERN_SUCCESS) 3065 panic("vm_uiomove: could not insert new entry: %d", rv); 3066 } 3067 3068/* 3069 * Map the window directly, if it is already in memory 3070 */ 3071 pmap_object_init_pt(map->pmap, uaddr, 3072 srcobject, oindex, tcnt, 0); 3073 3074 map->timestamp++; 3075 vm_map_unlock(map); 3076 3077 cnt -= tcnt; 3078 uaddr += tcnt; 3079 cp += tcnt; 3080 if (npages) 3081 *npages += osize; 3082 } 3083 return 0; 3084} 3085 3086/* 3087 * Performs the copy_on_write operations necessary to allow the virtual copies 3088 * into user space to work. This has to be called for write(2) system calls 3089 * from other processes, file unlinking, and file size shrinkage. 3090 */ 3091void 3092vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 3093{ 3094 int rv; 3095 vm_object_t robject; 3096 vm_pindex_t idx; 3097 3098 GIANT_REQUIRED; 3099 if ((object == NULL) || 3100 ((object->flags & OBJ_OPT) == 0)) 3101 return; 3102 3103 if (object->shadow_count > object->ref_count) 3104 panic("vm_freeze_copyopts: sc > rc"); 3105 3106 while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 3107 vm_pindex_t bo_pindex; 3108 vm_page_t m_in, m_out; 3109 3110 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 3111 3112 vm_object_reference(robject); 3113 3114 vm_object_pip_wait(robject, "objfrz"); 3115 3116 if (robject->ref_count == 1) { 3117 vm_object_deallocate(robject); 3118 continue; 3119 } 3120 3121 vm_object_pip_add(robject, 1); 3122 3123 for (idx = 0; idx < robject->size; idx++) { 3124 3125 m_out = vm_page_grab(robject, idx, 3126 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3127 3128 if (m_out->valid == 0) { 3129 m_in = vm_page_grab(object, bo_pindex + idx, 3130 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3131 if (m_in->valid == 0) { 3132 rv = vm_pager_get_pages(object, &m_in, 1, 0); 3133 if (rv != VM_PAGER_OK) { 3134 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 3135 continue; 3136 } 3137 vm_page_deactivate(m_in); 3138 } 3139 3140 vm_page_protect(m_in, VM_PROT_NONE); 3141 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 3142 m_out->valid = m_in->valid; 3143 vm_page_dirty(m_out); 3144 vm_page_activate(m_out); 3145 vm_page_wakeup(m_in); 3146 } 3147 vm_page_wakeup(m_out); 3148 } 3149 3150 object->shadow_count--; 3151 object->ref_count--; 3152 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 3153 robject->backing_object = NULL; 3154 robject->backing_object_offset = 0; 3155 3156 vm_object_pip_wakeup(robject); 3157 vm_object_deallocate(robject); 3158 } 3159 3160 vm_object_clear_flag(object, OBJ_OPT); 3161} 3162 3163#include "opt_ddb.h" 3164#ifdef DDB 3165#include <sys/kernel.h> 3166 3167#include <ddb/ddb.h> 3168 3169/* 3170 * vm_map_print: [ debug ] 3171 */ 3172DB_SHOW_COMMAND(map, vm_map_print) 3173{ 3174 static int nlines; 3175 /* XXX convert args. */ 3176 vm_map_t map = (vm_map_t)addr; 3177 boolean_t full = have_addr; 3178 3179 vm_map_entry_t entry; 3180 3181 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3182 (void *)map, 3183 (void *)map->pmap, map->nentries, map->timestamp); 3184 nlines++; 3185 3186 if (!full && db_indent) 3187 return; 3188 3189 db_indent += 2; 3190 for (entry = map->header.next; entry != &map->header; 3191 entry = entry->next) { 3192 db_iprintf("map entry %p: start=%p, end=%p\n", 3193 (void *)entry, (void *)entry->start, (void *)entry->end); 3194 nlines++; 3195 { 3196 static char *inheritance_name[4] = 3197 {"share", "copy", "none", "donate_copy"}; 3198 3199 db_iprintf(" prot=%x/%x/%s", 3200 entry->protection, 3201 entry->max_protection, 3202 inheritance_name[(int)(unsigned char)entry->inheritance]); 3203 if (entry->wired_count != 0) 3204 db_printf(", wired"); 3205 } 3206 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3207 /* XXX no %qd in kernel. Truncate entry->offset. */ 3208 db_printf(", share=%p, offset=0x%lx\n", 3209 (void *)entry->object.sub_map, 3210 (long)entry->offset); 3211 nlines++; 3212 if ((entry->prev == &map->header) || 3213 (entry->prev->object.sub_map != 3214 entry->object.sub_map)) { 3215 db_indent += 2; 3216 vm_map_print((db_expr_t)(intptr_t) 3217 entry->object.sub_map, 3218 full, 0, (char *)0); 3219 db_indent -= 2; 3220 } 3221 } else { 3222 /* XXX no %qd in kernel. Truncate entry->offset. */ 3223 db_printf(", object=%p, offset=0x%lx", 3224 (void *)entry->object.vm_object, 3225 (long)entry->offset); 3226 if (entry->eflags & MAP_ENTRY_COW) 3227 db_printf(", copy (%s)", 3228 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3229 db_printf("\n"); 3230 nlines++; 3231 3232 if ((entry->prev == &map->header) || 3233 (entry->prev->object.vm_object != 3234 entry->object.vm_object)) { 3235 db_indent += 2; 3236 vm_object_print((db_expr_t)(intptr_t) 3237 entry->object.vm_object, 3238 full, 0, (char *)0); 3239 nlines += 4; 3240 db_indent -= 2; 3241 } 3242 } 3243 } 3244 db_indent -= 2; 3245 if (db_indent == 0) 3246 nlines = 0; 3247} 3248 3249 3250DB_SHOW_COMMAND(procvm, procvm) 3251{ 3252 struct proc *p; 3253 3254 if (have_addr) { 3255 p = (struct proc *) addr; 3256 } else { 3257 p = curproc; 3258 } 3259 3260 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3261 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3262 (void *)vmspace_pmap(p->p_vmspace)); 3263 3264 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3265} 3266 3267#endif /* DDB */
|