Deleted Added
full compact
vm_map.c (92466) vm_map.c (92588)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: head/sys/vm/vm_map.c 92466 2002-03-17 03:19:31Z alc $
64 * $FreeBSD: head/sys/vm/vm_map.c 92588 2002-03-18 15:08:09Z green $
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 199 unchanged lines hidden (view full) ---

272void
273vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
274{
275 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
276 (behavior & MAP_ENTRY_BEHAV_MASK);
277}
278
279void
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 199 unchanged lines hidden (view full) ---

272void
273vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
274{
275 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
276 (behavior & MAP_ENTRY_BEHAV_MASK);
277}
278
279void
280_vm_map_lock(vm_map_t map, const char *file, int line)
280vm_map_lock(vm_map_t map)
281{
282 vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
281{
282 vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
283 _sx_xlock(&map->lock, file, line);
283 if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0)
284 panic("vm_map_lock: failed to get lock");
284 map->timestamp++;
285}
286
285 map->timestamp++;
286}
287
287int
288_vm_map_try_lock(vm_map_t map, const char *file, int line)
289{
290 vm_map_printf("trying to lock map LK_EXCLUSIVE: %p\n", map);
291 if (_sx_try_xlock(&map->lock, file, line)) {
292 map->timestamp++;
293 return (0);
294 }
295 return (EWOULDBLOCK);
296}
297
298void
288void
299_vm_map_unlock(vm_map_t map, const char *file, int line)
289vm_map_unlock(vm_map_t map)
300{
301 vm_map_printf("locking map LK_RELEASE: %p\n", map);
290{
291 vm_map_printf("locking map LK_RELEASE: %p\n", map);
302 _sx_xunlock(&map->lock, file, line);
292 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
303}
304
305void
293}
294
295void
306_vm_map_lock_read(vm_map_t map, const char *file, int line)
296vm_map_lock_read(vm_map_t map)
307{
308 vm_map_printf("locking map LK_SHARED: %p\n", map);
297{
298 vm_map_printf("locking map LK_SHARED: %p\n", map);
309 _sx_slock(&map->lock, file, line);
299 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
310}
311
312void
300}
301
302void
313_vm_map_unlock_read(vm_map_t map, const char *file, int line)
303vm_map_unlock_read(vm_map_t map)
314{
315 vm_map_printf("locking map LK_RELEASE: %p\n", map);
304{
305 vm_map_printf("locking map LK_RELEASE: %p\n", map);
316 _sx_sunlock(&map->lock, file, line);
306 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
317}
318
307}
308
319int
320_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
321{
309static __inline__ int
310_vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
311 int error;
312
322 vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
313 vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
323 if (_sx_try_upgrade(&map->lock, file, line)) {
314 error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
315 if (error == 0)
324 map->timestamp++;
316 map->timestamp++;
325 return (0);
326 }
327 return (EWOULDBLOCK);
317 return error;
328}
329
318}
319
320int
321vm_map_lock_upgrade(vm_map_t map)
322{
323 return (_vm_map_lock_upgrade(map, curthread));
324}
325
330void
326void
331_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
327vm_map_lock_downgrade(vm_map_t map)
332{
333 vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
328{
329 vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
334 _sx_downgrade(&map->lock, file, line);
330 lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
335}
336
331}
332
333void
334vm_map_set_recursive(vm_map_t map)
335{
336 mtx_lock((map)->lock.lk_interlock);
337 map->lock.lk_flags |= LK_CANRECURSE;
338 mtx_unlock((map)->lock.lk_interlock);
339}
340
341void
342vm_map_clear_recursive(vm_map_t map)
343{
344 mtx_lock((map)->lock.lk_interlock);
345 map->lock.lk_flags &= ~LK_CANRECURSE;
346 mtx_unlock((map)->lock.lk_interlock);
347}
348
337vm_offset_t
338vm_map_min(vm_map_t map)
339{
340 return (map->min_offset);
341}
342
343vm_offset_t
344vm_map_max(vm_map_t map)

--- 55 unchanged lines hidden (view full) ---

400 map->size = 0;
401 map->system_map = 0;
402 map->infork = 0;
403 map->min_offset = min;
404 map->max_offset = max;
405 map->first_free = &map->header;
406 map->hint = &map->header;
407 map->timestamp = 0;
349vm_offset_t
350vm_map_min(vm_map_t map)
351{
352 return (map->min_offset);
353}
354
355vm_offset_t
356vm_map_max(vm_map_t map)

--- 55 unchanged lines hidden (view full) ---

412 map->size = 0;
413 map->system_map = 0;
414 map->infork = 0;
415 map->min_offset = min;
416 map->max_offset = max;
417 map->first_free = &map->header;
418 map->hint = &map->header;
419 map->timestamp = 0;
408 sx_init(&map->lock, "thrd_sleep");
420 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
409}
410
411void
412vm_map_destroy(map)
413 struct vm_map *map;
414{
415 GIANT_REQUIRED;
421}
422
423void
424vm_map_destroy(map)
425 struct vm_map *map;
426{
427 GIANT_REQUIRED;
416 sx_destroy(&map->lock);
428 lockdestroy(&map->lock);
417}
418
419/*
420 * vm_map_entry_dispose: [ internal use only ]
421 *
422 * Inverse of vm_map_entry_create.
423 */
424static void

--- 1040 unchanged lines hidden (view full) ---

1465 vm_map_clip_end(map, entry, end);
1466
1467 entry->wired_count++;
1468 entry->eflags |= MAP_ENTRY_USER_WIRED;
1469 estart = entry->start;
1470 eend = entry->end;
1471
1472 /* First we need to allow map modifications */
429}
430
431/*
432 * vm_map_entry_dispose: [ internal use only ]
433 *
434 * Inverse of vm_map_entry_create.
435 */
436static void

--- 1040 unchanged lines hidden (view full) ---

1477 vm_map_clip_end(map, entry, end);
1478
1479 entry->wired_count++;
1480 entry->eflags |= MAP_ENTRY_USER_WIRED;
1481 estart = entry->start;
1482 eend = entry->end;
1483
1484 /* First we need to allow map modifications */
1485 vm_map_set_recursive(map);
1486 vm_map_lock_downgrade(map);
1473 map->timestamp++;
1474
1475 rv = vm_fault_user_wire(map, entry->start, entry->end);
1476 if (rv) {
1487 map->timestamp++;
1488
1489 rv = vm_fault_user_wire(map, entry->start, entry->end);
1490 if (rv) {
1477 vm_map_lock(map);
1491
1478 entry->wired_count--;
1479 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1492 entry->wired_count--;
1493 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1494
1495 vm_map_clear_recursive(map);
1480 vm_map_unlock(map);
1481
1482 /*
1483 * At this point, the map is unlocked, and
1484 * entry might no longer be valid. Use copy
1485 * of entry start value obtained while entry
1486 * was valid.
1487 */
1488 (void) vm_map_user_pageable(map, start, estart,
1489 TRUE);
1490 return rv;
1491 }
1492
1496 vm_map_unlock(map);
1497
1498 /*
1499 * At this point, the map is unlocked, and
1500 * entry might no longer be valid. Use copy
1501 * of entry start value obtained while entry
1502 * was valid.
1503 */
1504 (void) vm_map_user_pageable(map, start, estart,
1505 TRUE);
1506 return rv;
1507 }
1508
1493 /*
1494 * XXX- This is only okay because we have the
1495 * Giant lock. If the VM system were to be
1496 * reentrant, we'd know that we really can't
1497 * do this. Still, this behavior is no worse
1498 * than the old recursion...
1499 */
1500 if (vm_map_try_lock(map)) {
1509 vm_map_clear_recursive(map);
1510 if (vm_map_lock_upgrade(map)) {
1501 vm_map_lock(map);
1502 if (vm_map_lookup_entry(map, estart, &entry)
1503 == FALSE) {
1504 vm_map_unlock(map);
1505 /*
1506 * vm_fault_user_wire succeded, thus
1507 * the area between start and eend
1508 * is wired and has to be unwired

--- 221 unchanged lines hidden (view full) ---

1730 if (rv) {
1731 failed = entry->start;
1732 entry->wired_count--;
1733 }
1734 }
1735 entry = entry->next;
1736 }
1737
1511 vm_map_lock(map);
1512 if (vm_map_lookup_entry(map, estart, &entry)
1513 == FALSE) {
1514 vm_map_unlock(map);
1515 /*
1516 * vm_fault_user_wire succeded, thus
1517 * the area between start and eend
1518 * is wired and has to be unwired

--- 221 unchanged lines hidden (view full) ---

1740 if (rv) {
1741 failed = entry->start;
1742 entry->wired_count--;
1743 }
1744 }
1745 entry = entry->next;
1746 }
1747
1748 if (vm_map_pmap(map) == kernel_pmap) {
1749 vm_map_lock(map);
1750 }
1738 if (rv) {
1751 if (rv) {
1739 if (vm_map_pmap(map) != kernel_pmap)
1740 vm_map_unlock_read(map);
1752 vm_map_unlock(map);
1741 (void) vm_map_pageable(map, start, failed, TRUE);
1742 return (rv);
1753 (void) vm_map_pageable(map, start, failed, TRUE);
1754 return (rv);
1743 } else if (vm_map_pmap(map) == kernel_pmap) {
1744 vm_map_lock(map);
1745 }
1746 /*
1747 * An exclusive lock on the map is needed in order to call
1748 * vm_map_simplify_entry(). If the current lock on the map
1749 * is only a shared lock, an upgrade is needed.
1750 */
1751 if (vm_map_pmap(map) != kernel_pmap &&
1752 vm_map_lock_upgrade(map)) {
1755 }
1756 /*
1757 * An exclusive lock on the map is needed in order to call
1758 * vm_map_simplify_entry(). If the current lock on the map
1759 * is only a shared lock, an upgrade is needed.
1760 */
1761 if (vm_map_pmap(map) != kernel_pmap &&
1762 vm_map_lock_upgrade(map)) {
1753 vm_map_unlock_read(map);
1754 vm_map_lock(map);
1755 if (vm_map_lookup_entry(map, start, &start_entry) ==
1756 FALSE) {
1757 vm_map_unlock(map);
1758 return KERN_SUCCESS;
1759 }
1760 }
1761 vm_map_simplify_entry(map, start_entry);

--- 761 unchanged lines hidden (view full) ---

2523 * should only happen if the user has mapped into the
2524 * stack area after the stack was created, and is
2525 * probably an error.
2526 *
2527 * This also effectively destroys any guard page the user
2528 * might have intended by limiting the stack size.
2529 */
2530 if (grow_amount > stack_entry->start - end) {
1763 vm_map_lock(map);
1764 if (vm_map_lookup_entry(map, start, &start_entry) ==
1765 FALSE) {
1766 vm_map_unlock(map);
1767 return KERN_SUCCESS;
1768 }
1769 }
1770 vm_map_simplify_entry(map, start_entry);

--- 761 unchanged lines hidden (view full) ---

2532 * should only happen if the user has mapped into the
2533 * stack area after the stack was created, and is
2534 * probably an error.
2535 *
2536 * This also effectively destroys any guard page the user
2537 * might have intended by limiting the stack size.
2538 */
2539 if (grow_amount > stack_entry->start - end) {
2531 if (vm_map_lock_upgrade(map)) {
2532 vm_map_unlock_read(map);
2540 if (vm_map_lock_upgrade(map))
2533 goto Retry;
2541 goto Retry;
2534 }
2535
2536 stack_entry->avail_ssize = stack_entry->start - end;
2537
2538 vm_map_unlock(map);
2539 return (KERN_NO_SPACE);
2540 }
2541
2542 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;

--- 13 unchanged lines hidden (view full) ---

2556 grow_amount = stack_entry->avail_ssize;
2557 }
2558 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2559 p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2560 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2561 ctob(vm->vm_ssize);
2562 }
2563
2542
2543 stack_entry->avail_ssize = stack_entry->start - end;
2544
2545 vm_map_unlock(map);
2546 return (KERN_NO_SPACE);
2547 }
2548
2549 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;

--- 13 unchanged lines hidden (view full) ---

2563 grow_amount = stack_entry->avail_ssize;
2564 }
2565 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2566 p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2567 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2568 ctob(vm->vm_ssize);
2569 }
2570
2564 if (vm_map_lock_upgrade(map)) {
2565 vm_map_unlock_read(map);
2571 if (vm_map_lock_upgrade(map))
2566 goto Retry;
2572 goto Retry;
2567 }
2568
2569 /* Get the preliminary new entry start value */
2570 addr = stack_entry->start - grow_amount;
2571
2572 /* If this puts us into the previous entry, cut back our growth
2573 * to the available space. Also, see the note above.
2574 */
2575 if (addr < end) {

--- 202 unchanged lines hidden (view full) ---

2778 */
2779 if (fault_type & VM_PROT_WRITE) {
2780 /*
2781 * Make a new object, and place it in the object
2782 * chain. Note that no new references have appeared
2783 * -- one just moved from the map to the new
2784 * object.
2785 */
2573
2574 /* Get the preliminary new entry start value */
2575 addr = stack_entry->start - grow_amount;
2576
2577 /* If this puts us into the previous entry, cut back our growth
2578 * to the available space. Also, see the note above.
2579 */
2580 if (addr < end) {

--- 202 unchanged lines hidden (view full) ---

2783 */
2784 if (fault_type & VM_PROT_WRITE) {
2785 /*
2786 * Make a new object, and place it in the object
2787 * chain. Note that no new references have appeared
2788 * -- one just moved from the map to the new
2789 * object.
2790 */
2786 if (vm_map_lock_upgrade(map)) {
2787 vm_map_unlock_read(map);
2791 if (vm_map_lock_upgrade(map))
2788 goto RetryLookup;
2792 goto RetryLookup;
2789 }
2790 vm_object_shadow(
2791 &entry->object.vm_object,
2792 &entry->offset,
2793 atop(entry->end - entry->start));
2794 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2795 vm_map_lock_downgrade(map);
2796 } else {
2797 /*

--- 4 unchanged lines hidden (view full) ---

2802 }
2803 }
2804
2805 /*
2806 * Create an object if necessary.
2807 */
2808 if (entry->object.vm_object == NULL &&
2809 !map->system_map) {
2793 vm_object_shadow(
2794 &entry->object.vm_object,
2795 &entry->offset,
2796 atop(entry->end - entry->start));
2797 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2798 vm_map_lock_downgrade(map);
2799 } else {
2800 /*

--- 4 unchanged lines hidden (view full) ---

2805 }
2806 }
2807
2808 /*
2809 * Create an object if necessary.
2810 */
2811 if (entry->object.vm_object == NULL &&
2812 !map->system_map) {
2810 if (vm_map_lock_upgrade(map)) {
2811 vm_map_unlock_read(map);
2813 if (vm_map_lock_upgrade(map))
2812 goto RetryLookup;
2814 goto RetryLookup;
2813 }
2814 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2815 atop(entry->end - entry->start));
2816 entry->offset = 0;
2817 vm_map_lock_downgrade(map);
2818 }
2819
2820 /*
2821 * Return the object/offset from this entry. If the entry was

--- 216 unchanged lines hidden (view full) ---

3038 } else {
3039
3040 vm_object_set_flag(srcobject, OBJ_OPT);
3041 vm_object_reference(srcobject);
3042
3043 pmap_remove (map->pmap, uaddr, tend);
3044
3045 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2815 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2816 atop(entry->end - entry->start));
2817 entry->offset = 0;
2818 vm_map_lock_downgrade(map);
2819 }
2820
2821 /*
2822 * Return the object/offset from this entry. If the entry was

--- 216 unchanged lines hidden (view full) ---

3039 } else {
3040
3041 vm_object_set_flag(srcobject, OBJ_OPT);
3042 vm_object_reference(srcobject);
3043
3044 pmap_remove (map->pmap, uaddr, tend);
3045
3046 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3046 if (vm_map_lock_upgrade(map)) {
3047 vm_map_unlock_read(map);
3048 vm_map_lock(map);
3049 }
3047 vm_map_lock_upgrade(map);
3050
3051 if (entry == &map->header) {
3052 map->first_free = &map->header;
3053 } else if (map->first_free->start >= start) {
3054 map->first_free = entry->prev;
3055 }
3056
3057 SAVE_HINT(map, entry->prev);

--- 212 unchanged lines hidden ---
3048
3049 if (entry == &map->header) {
3050 map->first_free = &map->header;
3051 } else if (map->first_free->start >= start) {
3052 map->first_free = entry->prev;
3053 }
3054
3055 SAVE_HINT(map, entry->prev);

--- 212 unchanged lines hidden ---