455 if (new_entry == NULL) 456 panic("vm_map_entry_create: kernel resources exhausted"); 457 return (new_entry); 458} 459 460/* 461 * vm_map_entry_{un,}link: 462 * 463 * Insert/remove entries from maps. 464 */ 465static __inline void 466vm_map_entry_link(vm_map_t map, 467 vm_map_entry_t after_where, 468 vm_map_entry_t entry) 469{ 470 471 CTR4(KTR_VM, 472 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 473 map->nentries, entry, after_where); 474 map->nentries++; 475 entry->prev = after_where; 476 entry->next = after_where->next; 477 entry->next->prev = entry; 478 after_where->next = entry; 479} 480 481static __inline void 482vm_map_entry_unlink(vm_map_t map, 483 vm_map_entry_t entry) 484{ 485 vm_map_entry_t prev = entry->prev; 486 vm_map_entry_t next = entry->next; 487 488 next->prev = prev; 489 prev->next = next; 490 map->nentries--; 491 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 492 map->nentries, entry); 493} 494 495/* 496 * SAVE_HINT: 497 * 498 * Saves the specified entry as the hint for 499 * future lookups. 500 */ 501#define SAVE_HINT(map,value) \ 502 (map)->hint = (value); 503 504/* 505 * vm_map_lookup_entry: [ internal use only ] 506 * 507 * Finds the map entry containing (or 508 * immediately preceding) the specified address 509 * in the given map; the entry is returned 510 * in the "entry" parameter. The boolean 511 * result indicates whether the address is 512 * actually contained in the map. 513 */ 514boolean_t 515vm_map_lookup_entry( 516 vm_map_t map, 517 vm_offset_t address, 518 vm_map_entry_t *entry) /* OUT */ 519{ 520 vm_map_entry_t cur; 521 vm_map_entry_t last; 522 523 GIANT_REQUIRED; 524 /* 525 * Start looking either from the head of the list, or from the hint. 526 */ 527 cur = map->hint; 528 529 if (cur == &map->header) 530 cur = cur->next; 531 532 if (address >= cur->start) { 533 /* 534 * Go from hint to end of list. 535 * 536 * But first, make a quick check to see if we are already looking 537 * at the entry we want (which is usually the case). Note also 538 * that we don't need to save the hint here... it is the same 539 * hint (unless we are at the header, in which case the hint 540 * didn't buy us anything anyway). 541 */ 542 last = &map->header; 543 if ((cur != last) && (cur->end > address)) { 544 *entry = cur; 545 return (TRUE); 546 } 547 } else { 548 /* 549 * Go from start to hint, *inclusively* 550 */ 551 last = cur->next; 552 cur = map->header.next; 553 } 554 555 /* 556 * Search linearly 557 */ 558 while (cur != last) { 559 if (cur->end > address) { 560 if (address >= cur->start) { 561 /* 562 * Save this lookup for future hints, and 563 * return 564 */ 565 *entry = cur; 566 SAVE_HINT(map, cur); 567 return (TRUE); 568 } 569 break; 570 } 571 cur = cur->next; 572 } 573 *entry = cur->prev; 574 SAVE_HINT(map, *entry); 575 return (FALSE); 576} 577 578/* 579 * vm_map_insert: 580 * 581 * Inserts the given whole VM object into the target 582 * map at the specified address range. The object's 583 * size should match that of the address range. 584 * 585 * Requires that the map be locked, and leaves it so. 586 * 587 * If object is non-NULL, ref count must be bumped by caller 588 * prior to making call to account for the new entry. 589 */ 590int 591vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 592 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 593 int cow) 594{ 595 vm_map_entry_t new_entry; 596 vm_map_entry_t prev_entry; 597 vm_map_entry_t temp_entry; 598 vm_eflags_t protoeflags; 599 600 GIANT_REQUIRED; 601 602 /* 603 * Check that the start and end points are not bogus. 604 */ 605 if ((start < map->min_offset) || (end > map->max_offset) || 606 (start >= end)) 607 return (KERN_INVALID_ADDRESS); 608 609 /* 610 * Find the entry prior to the proposed starting address; if it's part 611 * of an existing entry, this range is bogus. 612 */ 613 if (vm_map_lookup_entry(map, start, &temp_entry)) 614 return (KERN_NO_SPACE); 615 616 prev_entry = temp_entry; 617 618 /* 619 * Assert that the next entry doesn't overlap the end point. 620 */ 621 if ((prev_entry->next != &map->header) && 622 (prev_entry->next->start < end)) 623 return (KERN_NO_SPACE); 624 625 protoeflags = 0; 626 627 if (cow & MAP_COPY_ON_WRITE) 628 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 629 630 if (cow & MAP_NOFAULT) { 631 protoeflags |= MAP_ENTRY_NOFAULT; 632 633 KASSERT(object == NULL, 634 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 635 } 636 if (cow & MAP_DISABLE_SYNCER) 637 protoeflags |= MAP_ENTRY_NOSYNC; 638 if (cow & MAP_DISABLE_COREDUMP) 639 protoeflags |= MAP_ENTRY_NOCOREDUMP; 640 641 if (object) { 642 /* 643 * When object is non-NULL, it could be shared with another 644 * process. We have to set or clear OBJ_ONEMAPPING 645 * appropriately. 646 */ 647 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 648 vm_object_clear_flag(object, OBJ_ONEMAPPING); 649 } 650 } 651 else if ((prev_entry != &map->header) && 652 (prev_entry->eflags == protoeflags) && 653 (prev_entry->end == start) && 654 (prev_entry->wired_count == 0) && 655 ((prev_entry->object.vm_object == NULL) || 656 vm_object_coalesce(prev_entry->object.vm_object, 657 OFF_TO_IDX(prev_entry->offset), 658 (vm_size_t)(prev_entry->end - prev_entry->start), 659 (vm_size_t)(end - prev_entry->end)))) { 660 /* 661 * We were able to extend the object. Determine if we 662 * can extend the previous map entry to include the 663 * new range as well. 664 */ 665 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 666 (prev_entry->protection == prot) && 667 (prev_entry->max_protection == max)) { 668 map->size += (end - prev_entry->end); 669 prev_entry->end = end; 670 vm_map_simplify_entry(map, prev_entry); 671 return (KERN_SUCCESS); 672 } 673 674 /* 675 * If we can extend the object but cannot extend the 676 * map entry, we have to create a new map entry. We 677 * must bump the ref count on the extended object to 678 * account for it. object may be NULL. 679 */ 680 object = prev_entry->object.vm_object; 681 offset = prev_entry->offset + 682 (prev_entry->end - prev_entry->start); 683 vm_object_reference(object); 684 } 685 686 /* 687 * NOTE: if conditionals fail, object can be NULL here. This occurs 688 * in things like the buffer map where we manage kva but do not manage 689 * backing objects. 690 */ 691 692 /* 693 * Create a new entry 694 */ 695 new_entry = vm_map_entry_create(map); 696 new_entry->start = start; 697 new_entry->end = end; 698 699 new_entry->eflags = protoeflags; 700 new_entry->object.vm_object = object; 701 new_entry->offset = offset; 702 new_entry->avail_ssize = 0; 703 704 new_entry->inheritance = VM_INHERIT_DEFAULT; 705 new_entry->protection = prot; 706 new_entry->max_protection = max; 707 new_entry->wired_count = 0; 708 709 /* 710 * Insert the new entry into the list 711 */ 712 vm_map_entry_link(map, prev_entry, new_entry); 713 map->size += new_entry->end - new_entry->start; 714 715 /* 716 * Update the free space hint 717 */ 718 if ((map->first_free == prev_entry) && 719 (prev_entry->end >= new_entry->start)) { 720 map->first_free = new_entry; 721 } 722 723#if 0 724 /* 725 * Temporarily removed to avoid MAP_STACK panic, due to 726 * MAP_STACK being a huge hack. Will be added back in 727 * when MAP_STACK (and the user stack mapping) is fixed. 728 */ 729 /* 730 * It may be possible to simplify the entry 731 */ 732 vm_map_simplify_entry(map, new_entry); 733#endif 734 735 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 736 pmap_object_init_pt(map->pmap, start, 737 object, OFF_TO_IDX(offset), end - start, 738 cow & MAP_PREFAULT_PARTIAL); 739 } 740 741 return (KERN_SUCCESS); 742} 743 744/* 745 * Find sufficient space for `length' bytes in the given map, starting at 746 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 747 */ 748int 749vm_map_findspace( 750 vm_map_t map, 751 vm_offset_t start, 752 vm_size_t length, 753 vm_offset_t *addr) 754{ 755 vm_map_entry_t entry, next; 756 vm_offset_t end; 757 758 GIANT_REQUIRED; 759 if (start < map->min_offset) 760 start = map->min_offset; 761 if (start > map->max_offset) 762 return (1); 763 764 /* 765 * Look for the first possible address; if there's already something 766 * at this address, we have to start after it. 767 */ 768 if (start == map->min_offset) { 769 if ((entry = map->first_free) != &map->header) 770 start = entry->end; 771 } else { 772 vm_map_entry_t tmp; 773 774 if (vm_map_lookup_entry(map, start, &tmp)) 775 start = tmp->end; 776 entry = tmp; 777 } 778 779 /* 780 * Look through the rest of the map, trying to fit a new region in the 781 * gap between existing regions, or after the very last region. 782 */ 783 for (;; start = (entry = next)->end) { 784 /* 785 * Find the end of the proposed new region. Be sure we didn't 786 * go beyond the end of the map, or wrap around the address; 787 * if so, we lose. Otherwise, if this is the last entry, or 788 * if the proposed new region fits before the next entry, we 789 * win. 790 */ 791 end = start + length; 792 if (end > map->max_offset || end < start) 793 return (1); 794 next = entry->next; 795 if (next == &map->header || next->start >= end) 796 break; 797 } 798 SAVE_HINT(map, entry); 799 *addr = start; 800 if (map == kernel_map) { 801 vm_offset_t ksize; 802 if ((ksize = round_page(start + length)) > kernel_vm_end) { 803 pmap_growkernel(ksize); 804 } 805 } 806 return (0); 807} 808 809/* 810 * vm_map_find finds an unallocated region in the target address 811 * map with the given length. The search is defined to be 812 * first-fit from the specified address; the region found is 813 * returned in the same parameter. 814 * 815 * If object is non-NULL, ref count must be bumped by caller 816 * prior to making call to account for the new entry. 817 */ 818int 819vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 820 vm_offset_t *addr, /* IN/OUT */ 821 vm_size_t length, boolean_t find_space, vm_prot_t prot, 822 vm_prot_t max, int cow) 823{ 824 vm_offset_t start; 825 int result, s = 0; 826 827 GIANT_REQUIRED; 828 829 start = *addr; 830 831 if (map == kmem_map) 832 s = splvm(); 833 834 vm_map_lock(map); 835 if (find_space) { 836 if (vm_map_findspace(map, start, length, addr)) { 837 vm_map_unlock(map); 838 if (map == kmem_map) 839 splx(s); 840 return (KERN_NO_SPACE); 841 } 842 start = *addr; 843 } 844 result = vm_map_insert(map, object, offset, 845 start, start + length, prot, max, cow); 846 vm_map_unlock(map); 847 848 if (map == kmem_map) 849 splx(s); 850 851 return (result); 852} 853 854/* 855 * vm_map_simplify_entry: 856 * 857 * Simplify the given map entry by merging with either neighbor. This 858 * routine also has the ability to merge with both neighbors. 859 * 860 * The map must be locked. 861 * 862 * This routine guarentees that the passed entry remains valid (though 863 * possibly extended). When merging, this routine may delete one or 864 * both neighbors. 865 */ 866void 867vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 868{ 869 vm_map_entry_t next, prev; 870 vm_size_t prevsize, esize; 871 872 GIANT_REQUIRED; 873 874 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 875 return; 876 877 prev = entry->prev; 878 if (prev != &map->header) { 879 prevsize = prev->end - prev->start; 880 if ( (prev->end == entry->start) && 881 (prev->object.vm_object == entry->object.vm_object) && 882 (!prev->object.vm_object || 883 (prev->offset + prevsize == entry->offset)) && 884 (prev->eflags == entry->eflags) && 885 (prev->protection == entry->protection) && 886 (prev->max_protection == entry->max_protection) && 887 (prev->inheritance == entry->inheritance) && 888 (prev->wired_count == entry->wired_count)) { 889 if (map->first_free == prev) 890 map->first_free = entry; 891 if (map->hint == prev) 892 map->hint = entry; 893 vm_map_entry_unlink(map, prev); 894 entry->start = prev->start; 895 entry->offset = prev->offset; 896 if (prev->object.vm_object) 897 vm_object_deallocate(prev->object.vm_object); 898 vm_map_entry_dispose(map, prev); 899 } 900 } 901 902 next = entry->next; 903 if (next != &map->header) { 904 esize = entry->end - entry->start; 905 if ((entry->end == next->start) && 906 (next->object.vm_object == entry->object.vm_object) && 907 (!entry->object.vm_object || 908 (entry->offset + esize == next->offset)) && 909 (next->eflags == entry->eflags) && 910 (next->protection == entry->protection) && 911 (next->max_protection == entry->max_protection) && 912 (next->inheritance == entry->inheritance) && 913 (next->wired_count == entry->wired_count)) { 914 if (map->first_free == next) 915 map->first_free = entry; 916 if (map->hint == next) 917 map->hint = entry; 918 vm_map_entry_unlink(map, next); 919 entry->end = next->end; 920 if (next->object.vm_object) 921 vm_object_deallocate(next->object.vm_object); 922 vm_map_entry_dispose(map, next); 923 } 924 } 925} 926/* 927 * vm_map_clip_start: [ internal use only ] 928 * 929 * Asserts that the given entry begins at or after 930 * the specified address; if necessary, 931 * it splits the entry into two. 932 */ 933#define vm_map_clip_start(map, entry, startaddr) \ 934{ \ 935 if (startaddr > entry->start) \ 936 _vm_map_clip_start(map, entry, startaddr); \ 937} 938 939/* 940 * This routine is called only when it is known that 941 * the entry must be split. 942 */ 943static void 944_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 945{ 946 vm_map_entry_t new_entry; 947 948 /* 949 * Split off the front portion -- note that we must insert the new 950 * entry BEFORE this one, so that this entry has the specified 951 * starting address. 952 */ 953 vm_map_simplify_entry(map, entry); 954 955 /* 956 * If there is no object backing this entry, we might as well create 957 * one now. If we defer it, an object can get created after the map 958 * is clipped, and individual objects will be created for the split-up 959 * map. This is a bit of a hack, but is also about the best place to 960 * put this improvement. 961 */ 962 if (entry->object.vm_object == NULL && !map->system_map) { 963 vm_object_t object; 964 object = vm_object_allocate(OBJT_DEFAULT, 965 atop(entry->end - entry->start)); 966 entry->object.vm_object = object; 967 entry->offset = 0; 968 } 969 970 new_entry = vm_map_entry_create(map); 971 *new_entry = *entry; 972 973 new_entry->end = start; 974 entry->offset += (start - entry->start); 975 entry->start = start; 976 977 vm_map_entry_link(map, entry->prev, new_entry); 978 979 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 980 vm_object_reference(new_entry->object.vm_object); 981 } 982} 983 984/* 985 * vm_map_clip_end: [ internal use only ] 986 * 987 * Asserts that the given entry ends at or before 988 * the specified address; if necessary, 989 * it splits the entry into two. 990 */ 991#define vm_map_clip_end(map, entry, endaddr) \ 992{ \ 993 if (endaddr < entry->end) \ 994 _vm_map_clip_end(map, entry, endaddr); \ 995} 996 997/* 998 * This routine is called only when it is known that 999 * the entry must be split. 1000 */ 1001static void 1002_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1003{ 1004 vm_map_entry_t new_entry; 1005 1006 /* 1007 * If there is no object backing this entry, we might as well create 1008 * one now. If we defer it, an object can get created after the map 1009 * is clipped, and individual objects will be created for the split-up 1010 * map. This is a bit of a hack, but is also about the best place to 1011 * put this improvement. 1012 */ 1013 if (entry->object.vm_object == NULL && !map->system_map) { 1014 vm_object_t object; 1015 object = vm_object_allocate(OBJT_DEFAULT, 1016 atop(entry->end - entry->start)); 1017 entry->object.vm_object = object; 1018 entry->offset = 0; 1019 } 1020 1021 /* 1022 * Create a new entry and insert it AFTER the specified entry 1023 */ 1024 new_entry = vm_map_entry_create(map); 1025 *new_entry = *entry; 1026 1027 new_entry->start = entry->end = end; 1028 new_entry->offset += (end - entry->start); 1029 1030 vm_map_entry_link(map, entry, new_entry); 1031 1032 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1033 vm_object_reference(new_entry->object.vm_object); 1034 } 1035} 1036 1037/* 1038 * VM_MAP_RANGE_CHECK: [ internal use only ] 1039 * 1040 * Asserts that the starting and ending region 1041 * addresses fall within the valid range of the map. 1042 */ 1043#define VM_MAP_RANGE_CHECK(map, start, end) \ 1044 { \ 1045 if (start < vm_map_min(map)) \ 1046 start = vm_map_min(map); \ 1047 if (end > vm_map_max(map)) \ 1048 end = vm_map_max(map); \ 1049 if (start > end) \ 1050 start = end; \ 1051 } 1052 1053/* 1054 * vm_map_submap: [ kernel use only ] 1055 * 1056 * Mark the given range as handled by a subordinate map. 1057 * 1058 * This range must have been created with vm_map_find, 1059 * and no other operations may have been performed on this 1060 * range prior to calling vm_map_submap. 1061 * 1062 * Only a limited number of operations can be performed 1063 * within this rage after calling vm_map_submap: 1064 * vm_fault 1065 * [Don't try vm_map_copy!] 1066 * 1067 * To remove a submapping, one must first remove the 1068 * range from the superior map, and then destroy the 1069 * submap (if desired). [Better yet, don't try it.] 1070 */ 1071int 1072vm_map_submap( 1073 vm_map_t map, 1074 vm_offset_t start, 1075 vm_offset_t end, 1076 vm_map_t submap) 1077{ 1078 vm_map_entry_t entry; 1079 int result = KERN_INVALID_ARGUMENT; 1080 1081 GIANT_REQUIRED; 1082 1083 vm_map_lock(map); 1084 1085 VM_MAP_RANGE_CHECK(map, start, end); 1086 1087 if (vm_map_lookup_entry(map, start, &entry)) { 1088 vm_map_clip_start(map, entry, start); 1089 } else 1090 entry = entry->next; 1091 1092 vm_map_clip_end(map, entry, end); 1093 1094 if ((entry->start == start) && (entry->end == end) && 1095 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1096 (entry->object.vm_object == NULL)) { 1097 entry->object.sub_map = submap; 1098 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1099 result = KERN_SUCCESS; 1100 } 1101 vm_map_unlock(map); 1102 1103 return (result); 1104} 1105 1106/* 1107 * vm_map_protect: 1108 * 1109 * Sets the protection of the specified address 1110 * region in the target map. If "set_max" is 1111 * specified, the maximum protection is to be set; 1112 * otherwise, only the current protection is affected. 1113 */ 1114int 1115vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1116 vm_prot_t new_prot, boolean_t set_max) 1117{ 1118 vm_map_entry_t current; 1119 vm_map_entry_t entry; 1120 1121 GIANT_REQUIRED; 1122 vm_map_lock(map); 1123 1124 VM_MAP_RANGE_CHECK(map, start, end); 1125 1126 if (vm_map_lookup_entry(map, start, &entry)) { 1127 vm_map_clip_start(map, entry, start); 1128 } else { 1129 entry = entry->next; 1130 } 1131 1132 /* 1133 * Make a first pass to check for protection violations. 1134 */ 1135 current = entry; 1136 while ((current != &map->header) && (current->start < end)) { 1137 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1138 vm_map_unlock(map); 1139 return (KERN_INVALID_ARGUMENT); 1140 } 1141 if ((new_prot & current->max_protection) != new_prot) { 1142 vm_map_unlock(map); 1143 return (KERN_PROTECTION_FAILURE); 1144 } 1145 current = current->next; 1146 } 1147 1148 /* 1149 * Go back and fix up protections. [Note that clipping is not 1150 * necessary the second time.] 1151 */ 1152 current = entry; 1153 while ((current != &map->header) && (current->start < end)) { 1154 vm_prot_t old_prot; 1155 1156 vm_map_clip_end(map, current, end); 1157 1158 old_prot = current->protection; 1159 if (set_max) 1160 current->protection = 1161 (current->max_protection = new_prot) & 1162 old_prot; 1163 else 1164 current->protection = new_prot; 1165 1166 /* 1167 * Update physical map if necessary. Worry about copy-on-write 1168 * here -- CHECK THIS XXX 1169 */ 1170 if (current->protection != old_prot) { 1171#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1172 VM_PROT_ALL) 1173 pmap_protect(map->pmap, current->start, 1174 current->end, 1175 current->protection & MASK(current)); 1176#undef MASK 1177 } 1178 vm_map_simplify_entry(map, current); 1179 current = current->next; 1180 } 1181 vm_map_unlock(map); 1182 return (KERN_SUCCESS); 1183} 1184 1185/* 1186 * vm_map_madvise: 1187 * 1188 * This routine traverses a processes map handling the madvise 1189 * system call. Advisories are classified as either those effecting 1190 * the vm_map_entry structure, or those effecting the underlying 1191 * objects. 1192 */ 1193int 1194vm_map_madvise( 1195 vm_map_t map, 1196 vm_offset_t start, 1197 vm_offset_t end, 1198 int behav) 1199{ 1200 vm_map_entry_t current, entry; 1201 int modify_map = 0; 1202 1203 GIANT_REQUIRED; 1204 1205 /* 1206 * Some madvise calls directly modify the vm_map_entry, in which case 1207 * we need to use an exclusive lock on the map and we need to perform 1208 * various clipping operations. Otherwise we only need a read-lock 1209 * on the map. 1210 */ 1211 switch(behav) { 1212 case MADV_NORMAL: 1213 case MADV_SEQUENTIAL: 1214 case MADV_RANDOM: 1215 case MADV_NOSYNC: 1216 case MADV_AUTOSYNC: 1217 case MADV_NOCORE: 1218 case MADV_CORE: 1219 modify_map = 1; 1220 vm_map_lock(map); 1221 break; 1222 case MADV_WILLNEED: 1223 case MADV_DONTNEED: 1224 case MADV_FREE: 1225 vm_map_lock_read(map); 1226 break; 1227 default: 1228 return (KERN_INVALID_ARGUMENT); 1229 } 1230 1231 /* 1232 * Locate starting entry and clip if necessary. 1233 */ 1234 VM_MAP_RANGE_CHECK(map, start, end); 1235 1236 if (vm_map_lookup_entry(map, start, &entry)) { 1237 if (modify_map) 1238 vm_map_clip_start(map, entry, start); 1239 } else { 1240 entry = entry->next; 1241 } 1242 1243 if (modify_map) { 1244 /* 1245 * madvise behaviors that are implemented in the vm_map_entry. 1246 * 1247 * We clip the vm_map_entry so that behavioral changes are 1248 * limited to the specified address range. 1249 */ 1250 for (current = entry; 1251 (current != &map->header) && (current->start < end); 1252 current = current->next 1253 ) { 1254 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1255 continue; 1256 1257 vm_map_clip_end(map, current, end); 1258 1259 switch (behav) { 1260 case MADV_NORMAL: 1261 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1262 break; 1263 case MADV_SEQUENTIAL: 1264 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1265 break; 1266 case MADV_RANDOM: 1267 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1268 break; 1269 case MADV_NOSYNC: 1270 current->eflags |= MAP_ENTRY_NOSYNC; 1271 break; 1272 case MADV_AUTOSYNC: 1273 current->eflags &= ~MAP_ENTRY_NOSYNC; 1274 break; 1275 case MADV_NOCORE: 1276 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1277 break; 1278 case MADV_CORE: 1279 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1280 break; 1281 default: 1282 break; 1283 } 1284 vm_map_simplify_entry(map, current); 1285 } 1286 vm_map_unlock(map); 1287 } else { 1288 vm_pindex_t pindex; 1289 int count; 1290 1291 /* 1292 * madvise behaviors that are implemented in the underlying 1293 * vm_object. 1294 * 1295 * Since we don't clip the vm_map_entry, we have to clip 1296 * the vm_object pindex and count. 1297 */ 1298 for (current = entry; 1299 (current != &map->header) && (current->start < end); 1300 current = current->next 1301 ) { 1302 vm_offset_t useStart; 1303 1304 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1305 continue; 1306 1307 pindex = OFF_TO_IDX(current->offset); 1308 count = atop(current->end - current->start); 1309 useStart = current->start; 1310 1311 if (current->start < start) { 1312 pindex += atop(start - current->start); 1313 count -= atop(start - current->start); 1314 useStart = start; 1315 } 1316 if (current->end > end) 1317 count -= atop(current->end - end); 1318 1319 if (count <= 0) 1320 continue; 1321 1322 vm_object_madvise(current->object.vm_object, 1323 pindex, count, behav); 1324 if (behav == MADV_WILLNEED) { 1325 pmap_object_init_pt( 1326 map->pmap, 1327 useStart, 1328 current->object.vm_object, 1329 pindex, 1330 (count << PAGE_SHIFT), 1331 MAP_PREFAULT_MADVISE 1332 ); 1333 } 1334 } 1335 vm_map_unlock_read(map); 1336 } 1337 return (0); 1338} 1339 1340 1341/* 1342 * vm_map_inherit: 1343 * 1344 * Sets the inheritance of the specified address 1345 * range in the target map. Inheritance 1346 * affects how the map will be shared with 1347 * child maps at the time of vm_map_fork. 1348 */ 1349int 1350vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1351 vm_inherit_t new_inheritance) 1352{ 1353 vm_map_entry_t entry; 1354 vm_map_entry_t temp_entry; 1355 1356 GIANT_REQUIRED; 1357 1358 switch (new_inheritance) { 1359 case VM_INHERIT_NONE: 1360 case VM_INHERIT_COPY: 1361 case VM_INHERIT_SHARE: 1362 break; 1363 default: 1364 return (KERN_INVALID_ARGUMENT); 1365 } 1366 1367 vm_map_lock(map); 1368 1369 VM_MAP_RANGE_CHECK(map, start, end); 1370 1371 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1372 entry = temp_entry; 1373 vm_map_clip_start(map, entry, start); 1374 } else 1375 entry = temp_entry->next; 1376 1377 while ((entry != &map->header) && (entry->start < end)) { 1378 vm_map_clip_end(map, entry, end); 1379 1380 entry->inheritance = new_inheritance; 1381 1382 vm_map_simplify_entry(map, entry); 1383 1384 entry = entry->next; 1385 } 1386 1387 vm_map_unlock(map); 1388 return (KERN_SUCCESS); 1389} 1390 1391/* 1392 * Implement the semantics of mlock 1393 */ 1394int 1395vm_map_user_pageable( 1396 vm_map_t map, 1397 vm_offset_t start, 1398 vm_offset_t end, 1399 boolean_t new_pageable) 1400{ 1401 vm_map_entry_t entry; 1402 vm_map_entry_t start_entry; 1403 vm_offset_t estart; 1404 vm_offset_t eend; 1405 int rv; 1406 1407 vm_map_lock(map); 1408 VM_MAP_RANGE_CHECK(map, start, end); 1409 1410 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1411 vm_map_unlock(map); 1412 return (KERN_INVALID_ADDRESS); 1413 } 1414 1415 if (new_pageable) { 1416 1417 entry = start_entry; 1418 vm_map_clip_start(map, entry, start); 1419 1420 /* 1421 * Now decrement the wiring count for each region. If a region 1422 * becomes completely unwired, unwire its physical pages and 1423 * mappings. 1424 */ 1425 while ((entry != &map->header) && (entry->start < end)) { 1426 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1427 vm_map_clip_end(map, entry, end); 1428 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1429 entry->wired_count--; 1430 if (entry->wired_count == 0) 1431 vm_fault_unwire(map, entry->start, entry->end); 1432 } 1433 vm_map_simplify_entry(map,entry); 1434 entry = entry->next; 1435 } 1436 } else { 1437 1438 entry = start_entry; 1439 1440 while ((entry != &map->header) && (entry->start < end)) { 1441 1442 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1443 entry = entry->next; 1444 continue; 1445 } 1446 1447 if (entry->wired_count != 0) { 1448 entry->wired_count++; 1449 entry->eflags |= MAP_ENTRY_USER_WIRED; 1450 entry = entry->next; 1451 continue; 1452 } 1453 1454 /* Here on entry being newly wired */ 1455 1456 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1457 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1458 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1459 1460 vm_object_shadow(&entry->object.vm_object, 1461 &entry->offset, 1462 atop(entry->end - entry->start)); 1463 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1464 1465 } else if (entry->object.vm_object == NULL && 1466 !map->system_map) { 1467 1468 entry->object.vm_object = 1469 vm_object_allocate(OBJT_DEFAULT, 1470 atop(entry->end - entry->start)); 1471 entry->offset = (vm_offset_t) 0; 1472 1473 } 1474 } 1475 1476 vm_map_clip_start(map, entry, start); 1477 vm_map_clip_end(map, entry, end); 1478 1479 entry->wired_count++; 1480 entry->eflags |= MAP_ENTRY_USER_WIRED; 1481 estart = entry->start; 1482 eend = entry->end; 1483 1484 /* First we need to allow map modifications */ 1485 vm_map_set_recursive(map); 1486 vm_map_lock_downgrade(map); 1487 map->timestamp++; 1488 1489 rv = vm_fault_user_wire(map, entry->start, entry->end); 1490 if (rv) { 1491 1492 entry->wired_count--; 1493 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1494 1495 vm_map_clear_recursive(map); 1496 vm_map_unlock(map); 1497 1498 /* 1499 * At this point, the map is unlocked, and 1500 * entry might no longer be valid. Use copy 1501 * of entry start value obtained while entry 1502 * was valid. 1503 */ 1504 (void) vm_map_user_pageable(map, start, estart, 1505 TRUE); 1506 return rv; 1507 } 1508 1509 vm_map_clear_recursive(map); 1510 if (vm_map_lock_upgrade(map)) { 1511 vm_map_lock(map); 1512 if (vm_map_lookup_entry(map, estart, &entry) 1513 == FALSE) { 1514 vm_map_unlock(map); 1515 /* 1516 * vm_fault_user_wire succeded, thus 1517 * the area between start and eend 1518 * is wired and has to be unwired 1519 * here as part of the cleanup. 1520 */ 1521 (void) vm_map_user_pageable(map, 1522 start, 1523 eend, 1524 TRUE); 1525 return (KERN_INVALID_ADDRESS); 1526 } 1527 } 1528 vm_map_simplify_entry(map,entry); 1529 } 1530 } 1531 map->timestamp++; 1532 vm_map_unlock(map); 1533 return KERN_SUCCESS; 1534} 1535 1536/* 1537 * vm_map_pageable: 1538 * 1539 * Sets the pageability of the specified address 1540 * range in the target map. Regions specified 1541 * as not pageable require locked-down physical 1542 * memory and physical page maps. 1543 * 1544 * The map must not be locked, but a reference 1545 * must remain to the map throughout the call. 1546 */ 1547int 1548vm_map_pageable( 1549 vm_map_t map, 1550 vm_offset_t start, 1551 vm_offset_t end, 1552 boolean_t new_pageable) 1553{ 1554 vm_map_entry_t entry; 1555 vm_map_entry_t start_entry; 1556 vm_offset_t failed = 0; 1557 int rv; 1558 1559 GIANT_REQUIRED; 1560 1561 vm_map_lock(map); 1562 1563 VM_MAP_RANGE_CHECK(map, start, end); 1564 1565 /* 1566 * Only one pageability change may take place at one time, since 1567 * vm_fault assumes it will be called only once for each 1568 * wiring/unwiring. Therefore, we have to make sure we're actually 1569 * changing the pageability for the entire region. We do so before 1570 * making any changes. 1571 */ 1572 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1573 vm_map_unlock(map); 1574 return (KERN_INVALID_ADDRESS); 1575 } 1576 entry = start_entry; 1577 1578 /* 1579 * Actions are rather different for wiring and unwiring, so we have 1580 * two separate cases. 1581 */ 1582 if (new_pageable) { 1583 vm_map_clip_start(map, entry, start); 1584 1585 /* 1586 * Unwiring. First ensure that the range to be unwired is 1587 * really wired down and that there are no holes. 1588 */ 1589 while ((entry != &map->header) && (entry->start < end)) { 1590 if (entry->wired_count == 0 || 1591 (entry->end < end && 1592 (entry->next == &map->header || 1593 entry->next->start > entry->end))) { 1594 vm_map_unlock(map); 1595 return (KERN_INVALID_ARGUMENT); 1596 } 1597 entry = entry->next; 1598 } 1599 1600 /* 1601 * Now decrement the wiring count for each region. If a region 1602 * becomes completely unwired, unwire its physical pages and 1603 * mappings. 1604 */ 1605 entry = start_entry; 1606 while ((entry != &map->header) && (entry->start < end)) { 1607 vm_map_clip_end(map, entry, end); 1608 1609 entry->wired_count--; 1610 if (entry->wired_count == 0) 1611 vm_fault_unwire(map, entry->start, entry->end); 1612 1613 vm_map_simplify_entry(map, entry); 1614 1615 entry = entry->next; 1616 } 1617 } else { 1618 /* 1619 * Wiring. We must do this in two passes: 1620 * 1621 * 1. Holding the write lock, we create any shadow or zero-fill 1622 * objects that need to be created. Then we clip each map 1623 * entry to the region to be wired and increment its wiring 1624 * count. We create objects before clipping the map entries 1625 * to avoid object proliferation. 1626 * 1627 * 2. We downgrade to a read lock, and call vm_fault_wire to 1628 * fault in the pages for any newly wired area (wired_count is 1629 * 1). 1630 * 1631 * Downgrading to a read lock for vm_fault_wire avoids a possible 1632 * deadlock with another process that may have faulted on one 1633 * of the pages to be wired (it would mark the page busy, 1634 * blocking us, then in turn block on the map lock that we 1635 * hold). Because of problems in the recursive lock package, 1636 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1637 * any actions that require the write lock must be done 1638 * beforehand. Because we keep the read lock on the map, the 1639 * copy-on-write status of the entries we modify here cannot 1640 * change. 1641 */ 1642 1643 /* 1644 * Pass 1. 1645 */ 1646 while ((entry != &map->header) && (entry->start < end)) { 1647 if (entry->wired_count == 0) { 1648 1649 /* 1650 * Perform actions of vm_map_lookup that need 1651 * the write lock on the map: create a shadow 1652 * object for a copy-on-write region, or an 1653 * object for a zero-fill region. 1654 * 1655 * We don't have to do this for entries that 1656 * point to sub maps, because we won't 1657 * hold the lock on the sub map. 1658 */ 1659 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1660 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1661 if (copyflag && 1662 ((entry->protection & VM_PROT_WRITE) != 0)) { 1663 1664 vm_object_shadow(&entry->object.vm_object, 1665 &entry->offset, 1666 atop(entry->end - entry->start)); 1667 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1668 } else if (entry->object.vm_object == NULL && 1669 !map->system_map) { 1670 entry->object.vm_object = 1671 vm_object_allocate(OBJT_DEFAULT, 1672 atop(entry->end - entry->start)); 1673 entry->offset = (vm_offset_t) 0; 1674 } 1675 } 1676 } 1677 vm_map_clip_start(map, entry, start); 1678 vm_map_clip_end(map, entry, end); 1679 entry->wired_count++; 1680 1681 /* 1682 * Check for holes 1683 */ 1684 if (entry->end < end && 1685 (entry->next == &map->header || 1686 entry->next->start > entry->end)) { 1687 /* 1688 * Found one. Object creation actions do not 1689 * need to be undone, but the wired counts 1690 * need to be restored. 1691 */ 1692 while (entry != &map->header && entry->end > start) { 1693 entry->wired_count--; 1694 entry = entry->prev; 1695 } 1696 vm_map_unlock(map); 1697 return (KERN_INVALID_ARGUMENT); 1698 } 1699 entry = entry->next; 1700 } 1701 1702 /* 1703 * Pass 2. 1704 */ 1705 1706 /* 1707 * HACK HACK HACK HACK 1708 * 1709 * If we are wiring in the kernel map or a submap of it, 1710 * unlock the map to avoid deadlocks. We trust that the 1711 * kernel is well-behaved, and therefore will not do 1712 * anything destructive to this region of the map while 1713 * we have it unlocked. We cannot trust user processes 1714 * to do the same. 1715 * 1716 * HACK HACK HACK HACK 1717 */ 1718 if (vm_map_pmap(map) == kernel_pmap) { 1719 vm_map_unlock(map); /* trust me ... */ 1720 } else { 1721 vm_map_lock_downgrade(map); 1722 } 1723 1724 rv = 0; 1725 entry = start_entry; 1726 while (entry != &map->header && entry->start < end) { 1727 /* 1728 * If vm_fault_wire fails for any page we need to undo 1729 * what has been done. We decrement the wiring count 1730 * for those pages which have not yet been wired (now) 1731 * and unwire those that have (later). 1732 * 1733 * XXX this violates the locking protocol on the map, 1734 * needs to be fixed. 1735 */ 1736 if (rv) 1737 entry->wired_count--; 1738 else if (entry->wired_count == 1) { 1739 rv = vm_fault_wire(map, entry->start, entry->end); 1740 if (rv) { 1741 failed = entry->start; 1742 entry->wired_count--; 1743 } 1744 } 1745 entry = entry->next; 1746 } 1747 1748 if (vm_map_pmap(map) == kernel_pmap) { 1749 vm_map_lock(map); 1750 } 1751 if (rv) { 1752 vm_map_unlock(map); 1753 (void) vm_map_pageable(map, start, failed, TRUE); 1754 return (rv); 1755 } 1756 /* 1757 * An exclusive lock on the map is needed in order to call 1758 * vm_map_simplify_entry(). If the current lock on the map 1759 * is only a shared lock, an upgrade is needed. 1760 */ 1761 if (vm_map_pmap(map) != kernel_pmap && 1762 vm_map_lock_upgrade(map)) { 1763 vm_map_lock(map); 1764 if (vm_map_lookup_entry(map, start, &start_entry) == 1765 FALSE) { 1766 vm_map_unlock(map); 1767 return KERN_SUCCESS; 1768 } 1769 } 1770 vm_map_simplify_entry(map, start_entry); 1771 } 1772 1773 vm_map_unlock(map); 1774 1775 return (KERN_SUCCESS); 1776} 1777 1778/* 1779 * vm_map_clean 1780 * 1781 * Push any dirty cached pages in the address range to their pager. 1782 * If syncio is TRUE, dirty pages are written synchronously. 1783 * If invalidate is TRUE, any cached pages are freed as well. 1784 * 1785 * Returns an error if any part of the specified range is not mapped. 1786 */ 1787int 1788vm_map_clean( 1789 vm_map_t map, 1790 vm_offset_t start, 1791 vm_offset_t end, 1792 boolean_t syncio, 1793 boolean_t invalidate) 1794{ 1795 vm_map_entry_t current; 1796 vm_map_entry_t entry; 1797 vm_size_t size; 1798 vm_object_t object; 1799 vm_ooffset_t offset; 1800 1801 GIANT_REQUIRED; 1802 1803 vm_map_lock_read(map); 1804 VM_MAP_RANGE_CHECK(map, start, end); 1805 if (!vm_map_lookup_entry(map, start, &entry)) { 1806 vm_map_unlock_read(map); 1807 return (KERN_INVALID_ADDRESS); 1808 } 1809 /* 1810 * Make a first pass to check for holes. 1811 */ 1812 for (current = entry; current->start < end; current = current->next) { 1813 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1814 vm_map_unlock_read(map); 1815 return (KERN_INVALID_ARGUMENT); 1816 } 1817 if (end > current->end && 1818 (current->next == &map->header || 1819 current->end != current->next->start)) { 1820 vm_map_unlock_read(map); 1821 return (KERN_INVALID_ADDRESS); 1822 } 1823 } 1824 1825 if (invalidate) 1826 pmap_remove(vm_map_pmap(map), start, end); 1827 /* 1828 * Make a second pass, cleaning/uncaching pages from the indicated 1829 * objects as we go. 1830 */ 1831 for (current = entry; current->start < end; current = current->next) { 1832 offset = current->offset + (start - current->start); 1833 size = (end <= current->end ? end : current->end) - start; 1834 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1835 vm_map_t smap; 1836 vm_map_entry_t tentry; 1837 vm_size_t tsize; 1838 1839 smap = current->object.sub_map; 1840 vm_map_lock_read(smap); 1841 (void) vm_map_lookup_entry(smap, offset, &tentry); 1842 tsize = tentry->end - offset; 1843 if (tsize < size) 1844 size = tsize; 1845 object = tentry->object.vm_object; 1846 offset = tentry->offset + (offset - tentry->start); 1847 vm_map_unlock_read(smap); 1848 } else { 1849 object = current->object.vm_object; 1850 } 1851 /* 1852 * Note that there is absolutely no sense in writing out 1853 * anonymous objects, so we track down the vnode object 1854 * to write out. 1855 * We invalidate (remove) all pages from the address space 1856 * anyway, for semantic correctness. 1857 * 1858 * note: certain anonymous maps, such as MAP_NOSYNC maps, 1859 * may start out with a NULL object. 1860 */ 1861 while (object && object->backing_object) { 1862 object = object->backing_object; 1863 offset += object->backing_object_offset; 1864 if (object->size < OFF_TO_IDX(offset + size)) 1865 size = IDX_TO_OFF(object->size) - offset; 1866 } 1867 if (object && (object->type == OBJT_VNODE) && 1868 (current->protection & VM_PROT_WRITE)) { 1869 /* 1870 * Flush pages if writing is allowed, invalidate them 1871 * if invalidation requested. Pages undergoing I/O 1872 * will be ignored by vm_object_page_remove(). 1873 * 1874 * We cannot lock the vnode and then wait for paging 1875 * to complete without deadlocking against vm_fault. 1876 * Instead we simply call vm_object_page_remove() and 1877 * allow it to block internally on a page-by-page 1878 * basis when it encounters pages undergoing async 1879 * I/O. 1880 */ 1881 int flags; 1882 1883 vm_object_reference(object); 1884 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread); 1885 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1886 flags |= invalidate ? OBJPC_INVAL : 0; 1887 vm_object_page_clean(object, 1888 OFF_TO_IDX(offset), 1889 OFF_TO_IDX(offset + size + PAGE_MASK), 1890 flags); 1891 if (invalidate) { 1892 /*vm_object_pip_wait(object, "objmcl");*/ 1893 vm_object_page_remove(object, 1894 OFF_TO_IDX(offset), 1895 OFF_TO_IDX(offset + size + PAGE_MASK), 1896 FALSE); 1897 } 1898 VOP_UNLOCK(object->handle, 0, curthread); 1899 vm_object_deallocate(object); 1900 } 1901 start += size; 1902 } 1903 1904 vm_map_unlock_read(map); 1905 return (KERN_SUCCESS); 1906} 1907 1908/* 1909 * vm_map_entry_unwire: [ internal use only ] 1910 * 1911 * Make the region specified by this entry pageable. 1912 * 1913 * The map in question should be locked. 1914 * [This is the reason for this routine's existence.] 1915 */ 1916static void 1917vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 1918{ 1919 vm_fault_unwire(map, entry->start, entry->end); 1920 entry->wired_count = 0; 1921} 1922 1923/* 1924 * vm_map_entry_delete: [ internal use only ] 1925 * 1926 * Deallocate the given entry from the target map. 1927 */ 1928static void 1929vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 1930{ 1931 vm_map_entry_unlink(map, entry); 1932 map->size -= entry->end - entry->start; 1933 1934 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1935 vm_object_deallocate(entry->object.vm_object); 1936 } 1937 1938 vm_map_entry_dispose(map, entry); 1939} 1940 1941/* 1942 * vm_map_delete: [ internal use only ] 1943 * 1944 * Deallocates the given address range from the target 1945 * map. 1946 */ 1947int 1948vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 1949{ 1950 vm_object_t object; 1951 vm_map_entry_t entry; 1952 vm_map_entry_t first_entry; 1953 1954 GIANT_REQUIRED; 1955 1956 /* 1957 * Find the start of the region, and clip it 1958 */ 1959 if (!vm_map_lookup_entry(map, start, &first_entry)) 1960 entry = first_entry->next; 1961 else { 1962 entry = first_entry; 1963 vm_map_clip_start(map, entry, start); 1964 /* 1965 * Fix the lookup hint now, rather than each time though the 1966 * loop. 1967 */ 1968 SAVE_HINT(map, entry->prev); 1969 } 1970 1971 /* 1972 * Save the free space hint 1973 */ 1974 if (entry == &map->header) { 1975 map->first_free = &map->header; 1976 } else if (map->first_free->start >= start) { 1977 map->first_free = entry->prev; 1978 } 1979 1980 /* 1981 * Step through all entries in this region 1982 */ 1983 while ((entry != &map->header) && (entry->start < end)) { 1984 vm_map_entry_t next; 1985 vm_offset_t s, e; 1986 vm_pindex_t offidxstart, offidxend, count; 1987 1988 vm_map_clip_end(map, entry, end); 1989 1990 s = entry->start; 1991 e = entry->end; 1992 next = entry->next; 1993 1994 offidxstart = OFF_TO_IDX(entry->offset); 1995 count = OFF_TO_IDX(e - s); 1996 object = entry->object.vm_object; 1997 1998 /* 1999 * Unwire before removing addresses from the pmap; otherwise, 2000 * unwiring will put the entries back in the pmap. 2001 */ 2002 if (entry->wired_count != 0) { 2003 vm_map_entry_unwire(map, entry); 2004 } 2005 2006 offidxend = offidxstart + count; 2007 2008 if ((object == kernel_object) || (object == kmem_object)) { 2009 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2010 } else { 2011 pmap_remove(map->pmap, s, e); 2012 if (object != NULL && 2013 object->ref_count != 1 && 2014 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 2015 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2016 vm_object_collapse(object); 2017 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2018 if (object->type == OBJT_SWAP) { 2019 swap_pager_freespace(object, offidxstart, count); 2020 } 2021 if (offidxend >= object->size && 2022 offidxstart < object->size) { 2023 object->size = offidxstart; 2024 } 2025 } 2026 } 2027 2028 /* 2029 * Delete the entry (which may delete the object) only after 2030 * removing all pmap entries pointing to its pages. 2031 * (Otherwise, its page frames may be reallocated, and any 2032 * modify bits will be set in the wrong object!) 2033 */ 2034 vm_map_entry_delete(map, entry); 2035 entry = next; 2036 } 2037 return (KERN_SUCCESS); 2038} 2039 2040/* 2041 * vm_map_remove: 2042 * 2043 * Remove the given address range from the target map. 2044 * This is the exported form of vm_map_delete. 2045 */ 2046int 2047vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2048{ 2049 int result, s = 0; 2050 2051 GIANT_REQUIRED; 2052 2053 if (map == kmem_map) 2054 s = splvm(); 2055 2056 vm_map_lock(map); 2057 VM_MAP_RANGE_CHECK(map, start, end); 2058 result = vm_map_delete(map, start, end); 2059 vm_map_unlock(map); 2060 2061 if (map == kmem_map) 2062 splx(s); 2063 2064 return (result); 2065} 2066 2067/* 2068 * vm_map_check_protection: 2069 * 2070 * Assert that the target map allows the specified 2071 * privilege on the entire address region given. 2072 * The entire region must be allocated. 2073 */ 2074boolean_t 2075vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2076 vm_prot_t protection) 2077{ 2078 vm_map_entry_t entry; 2079 vm_map_entry_t tmp_entry; 2080 2081 GIANT_REQUIRED; 2082 2083 vm_map_lock_read(map); 2084 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2085 vm_map_unlock_read(map); 2086 return (FALSE); 2087 } 2088 entry = tmp_entry; 2089 2090 while (start < end) { 2091 if (entry == &map->header) { 2092 vm_map_unlock_read(map); 2093 return (FALSE); 2094 } 2095 /* 2096 * No holes allowed! 2097 */ 2098 if (start < entry->start) { 2099 vm_map_unlock_read(map); 2100 return (FALSE); 2101 } 2102 /* 2103 * Check protection associated with entry. 2104 */ 2105 if ((entry->protection & protection) != protection) { 2106 vm_map_unlock_read(map); 2107 return (FALSE); 2108 } 2109 /* go to next entry */ 2110 start = entry->end; 2111 entry = entry->next; 2112 } 2113 vm_map_unlock_read(map); 2114 return (TRUE); 2115} 2116 2117/* 2118 * Split the pages in a map entry into a new object. This affords 2119 * easier removal of unused pages, and keeps object inheritance from 2120 * being a negative impact on memory usage. 2121 */ 2122static void 2123vm_map_split(vm_map_entry_t entry) 2124{ 2125 vm_page_t m; 2126 vm_object_t orig_object, new_object, source; 2127 vm_offset_t s, e; 2128 vm_pindex_t offidxstart, offidxend, idx; 2129 vm_size_t size; 2130 vm_ooffset_t offset; 2131 2132 GIANT_REQUIRED; 2133 2134 orig_object = entry->object.vm_object; 2135 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2136 return; 2137 if (orig_object->ref_count <= 1) 2138 return; 2139 2140 offset = entry->offset; 2141 s = entry->start; 2142 e = entry->end; 2143 2144 offidxstart = OFF_TO_IDX(offset); 2145 offidxend = offidxstart + OFF_TO_IDX(e - s); 2146 size = offidxend - offidxstart; 2147 2148 new_object = vm_pager_allocate(orig_object->type, 2149 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 2150 if (new_object == NULL) 2151 return; 2152 2153 source = orig_object->backing_object; 2154 if (source != NULL) { 2155 vm_object_reference(source); /* Referenced by new_object */ 2156 TAILQ_INSERT_TAIL(&source->shadow_head, 2157 new_object, shadow_list); 2158 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2159 new_object->backing_object_offset = 2160 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 2161 new_object->backing_object = source; 2162 source->shadow_count++; 2163 source->generation++; 2164 } 2165 2166 for (idx = 0; idx < size; idx++) { 2167 vm_page_t m; 2168 2169 retry: 2170 m = vm_page_lookup(orig_object, offidxstart + idx); 2171 if (m == NULL) 2172 continue; 2173 2174 /* 2175 * We must wait for pending I/O to complete before we can 2176 * rename the page. 2177 * 2178 * We do not have to VM_PROT_NONE the page as mappings should 2179 * not be changed by this operation. 2180 */ 2181 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2182 goto retry; 2183 2184 vm_page_busy(m); 2185 vm_page_rename(m, new_object, idx); 2186 /* page automatically made dirty by rename and cache handled */ 2187 vm_page_busy(m); 2188 } 2189 2190 if (orig_object->type == OBJT_SWAP) { 2191 vm_object_pip_add(orig_object, 1); 2192 /* 2193 * copy orig_object pages into new_object 2194 * and destroy unneeded pages in 2195 * shadow object. 2196 */ 2197 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2198 vm_object_pip_wakeup(orig_object); 2199 } 2200 2201 for (idx = 0; idx < size; idx++) { 2202 m = vm_page_lookup(new_object, idx); 2203 if (m) { 2204 vm_page_wakeup(m); 2205 } 2206 } 2207 2208 entry->object.vm_object = new_object; 2209 entry->offset = 0LL; 2210 vm_object_deallocate(orig_object); 2211} 2212 2213/* 2214 * vm_map_copy_entry: 2215 * 2216 * Copies the contents of the source entry to the destination 2217 * entry. The entries *must* be aligned properly. 2218 */ 2219static void 2220vm_map_copy_entry( 2221 vm_map_t src_map, 2222 vm_map_t dst_map, 2223 vm_map_entry_t src_entry, 2224 vm_map_entry_t dst_entry) 2225{ 2226 vm_object_t src_object; 2227 2228 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2229 return; 2230 2231 if (src_entry->wired_count == 0) { 2232 2233 /* 2234 * If the source entry is marked needs_copy, it is already 2235 * write-protected. 2236 */ 2237 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2238 pmap_protect(src_map->pmap, 2239 src_entry->start, 2240 src_entry->end, 2241 src_entry->protection & ~VM_PROT_WRITE); 2242 } 2243 2244 /* 2245 * Make a copy of the object. 2246 */ 2247 if ((src_object = src_entry->object.vm_object) != NULL) { 2248 2249 if ((src_object->handle == NULL) && 2250 (src_object->type == OBJT_DEFAULT || 2251 src_object->type == OBJT_SWAP)) { 2252 vm_object_collapse(src_object); 2253 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2254 vm_map_split(src_entry); 2255 src_object = src_entry->object.vm_object; 2256 } 2257 } 2258 2259 vm_object_reference(src_object); 2260 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2261 dst_entry->object.vm_object = src_object; 2262 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2263 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2264 dst_entry->offset = src_entry->offset; 2265 } else { 2266 dst_entry->object.vm_object = NULL; 2267 dst_entry->offset = 0; 2268 } 2269 2270 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2271 dst_entry->end - dst_entry->start, src_entry->start); 2272 } else { 2273 /* 2274 * Of course, wired down pages can't be set copy-on-write. 2275 * Cause wired pages to be copied into the new map by 2276 * simulating faults (the new pages are pageable) 2277 */ 2278 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2279 } 2280} 2281 2282/* 2283 * vmspace_fork: 2284 * Create a new process vmspace structure and vm_map 2285 * based on those of an existing process. The new map 2286 * is based on the old map, according to the inheritance 2287 * values on the regions in that map. 2288 * 2289 * The source map must not be locked. 2290 */ 2291struct vmspace * 2292vmspace_fork(struct vmspace *vm1) 2293{ 2294 struct vmspace *vm2; 2295 vm_map_t old_map = &vm1->vm_map; 2296 vm_map_t new_map; 2297 vm_map_entry_t old_entry; 2298 vm_map_entry_t new_entry; 2299 vm_object_t object; 2300 2301 GIANT_REQUIRED; 2302 2303 vm_map_lock(old_map); 2304 old_map->infork = 1; 2305 2306 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2307 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2308 (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy); 2309 new_map = &vm2->vm_map; /* XXX */ 2310 new_map->timestamp = 1; 2311 2312 old_entry = old_map->header.next; 2313 2314 while (old_entry != &old_map->header) { 2315 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2316 panic("vm_map_fork: encountered a submap"); 2317 2318 switch (old_entry->inheritance) { 2319 case VM_INHERIT_NONE: 2320 break; 2321 2322 case VM_INHERIT_SHARE: 2323 /* 2324 * Clone the entry, creating the shared object if necessary. 2325 */ 2326 object = old_entry->object.vm_object; 2327 if (object == NULL) { 2328 object = vm_object_allocate(OBJT_DEFAULT, 2329 atop(old_entry->end - old_entry->start)); 2330 old_entry->object.vm_object = object; 2331 old_entry->offset = (vm_offset_t) 0; 2332 } 2333 2334 /* 2335 * Add the reference before calling vm_object_shadow 2336 * to insure that a shadow object is created. 2337 */ 2338 vm_object_reference(object); 2339 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2340 vm_object_shadow(&old_entry->object.vm_object, 2341 &old_entry->offset, 2342 atop(old_entry->end - old_entry->start)); 2343 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2344 /* Transfer the second reference too. */ 2345 vm_object_reference( 2346 old_entry->object.vm_object); 2347 vm_object_deallocate(object); 2348 object = old_entry->object.vm_object; 2349 } 2350 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2351 2352 /* 2353 * Clone the entry, referencing the shared object. 2354 */ 2355 new_entry = vm_map_entry_create(new_map); 2356 *new_entry = *old_entry; 2357 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2358 new_entry->wired_count = 0; 2359 2360 /* 2361 * Insert the entry into the new map -- we know we're 2362 * inserting at the end of the new map. 2363 */ 2364 vm_map_entry_link(new_map, new_map->header.prev, 2365 new_entry); 2366 2367 /* 2368 * Update the physical map 2369 */ 2370 pmap_copy(new_map->pmap, old_map->pmap, 2371 new_entry->start, 2372 (old_entry->end - old_entry->start), 2373 old_entry->start); 2374 break; 2375 2376 case VM_INHERIT_COPY: 2377 /* 2378 * Clone the entry and link into the map. 2379 */ 2380 new_entry = vm_map_entry_create(new_map); 2381 *new_entry = *old_entry; 2382 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2383 new_entry->wired_count = 0; 2384 new_entry->object.vm_object = NULL; 2385 vm_map_entry_link(new_map, new_map->header.prev, 2386 new_entry); 2387 vm_map_copy_entry(old_map, new_map, old_entry, 2388 new_entry); 2389 break; 2390 } 2391 old_entry = old_entry->next; 2392 } 2393 2394 new_map->size = old_map->size; 2395 old_map->infork = 0; 2396 vm_map_unlock(old_map); 2397 2398 return (vm2); 2399} 2400 2401int 2402vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2403 vm_prot_t prot, vm_prot_t max, int cow) 2404{ 2405 vm_map_entry_t prev_entry; 2406 vm_map_entry_t new_stack_entry; 2407 vm_size_t init_ssize; 2408 int rv; 2409 2410 GIANT_REQUIRED; 2411 2412 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2413 return (KERN_NO_SPACE); 2414 2415 if (max_ssize < sgrowsiz) 2416 init_ssize = max_ssize; 2417 else 2418 init_ssize = sgrowsiz; 2419 2420 vm_map_lock(map); 2421 2422 /* If addr is already mapped, no go */ 2423 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2424 vm_map_unlock(map); 2425 return (KERN_NO_SPACE); 2426 } 2427 2428 /* If we can't accomodate max_ssize in the current mapping, 2429 * no go. However, we need to be aware that subsequent user 2430 * mappings might map into the space we have reserved for 2431 * stack, and currently this space is not protected. 2432 * 2433 * Hopefully we will at least detect this condition 2434 * when we try to grow the stack. 2435 */ 2436 if ((prev_entry->next != &map->header) && 2437 (prev_entry->next->start < addrbos + max_ssize)) { 2438 vm_map_unlock(map); 2439 return (KERN_NO_SPACE); 2440 } 2441 2442 /* We initially map a stack of only init_ssize. We will 2443 * grow as needed later. Since this is to be a grow 2444 * down stack, we map at the top of the range. 2445 * 2446 * Note: we would normally expect prot and max to be 2447 * VM_PROT_ALL, and cow to be 0. Possibly we should 2448 * eliminate these as input parameters, and just 2449 * pass these values here in the insert call. 2450 */ 2451 rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize, 2452 addrbos + max_ssize, prot, max, cow); 2453 2454 /* Now set the avail_ssize amount */ 2455 if (rv == KERN_SUCCESS){ 2456 if (prev_entry != &map->header) 2457 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize); 2458 new_stack_entry = prev_entry->next; 2459 if (new_stack_entry->end != addrbos + max_ssize || 2460 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2461 panic ("Bad entry start/end for new stack entry"); 2462 else 2463 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2464 } 2465 2466 vm_map_unlock(map); 2467 return (rv); 2468} 2469 2470/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2471 * desired address is already mapped, or if we successfully grow 2472 * the stack. Also returns KERN_SUCCESS if addr is outside the 2473 * stack range (this is strange, but preserves compatibility with 2474 * the grow function in vm_machdep.c). 2475 */ 2476int 2477vm_map_growstack (struct proc *p, vm_offset_t addr) 2478{ 2479 vm_map_entry_t prev_entry; 2480 vm_map_entry_t stack_entry; 2481 vm_map_entry_t new_stack_entry; 2482 struct vmspace *vm = p->p_vmspace; 2483 vm_map_t map = &vm->vm_map; 2484 vm_offset_t end; 2485 int grow_amount; 2486 int rv; 2487 int is_procstack; 2488 2489 GIANT_REQUIRED; 2490 2491Retry: 2492 vm_map_lock_read(map); 2493 2494 /* If addr is already in the entry range, no need to grow.*/ 2495 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2496 vm_map_unlock_read(map); 2497 return (KERN_SUCCESS); 2498 } 2499 2500 if ((stack_entry = prev_entry->next) == &map->header) { 2501 vm_map_unlock_read(map); 2502 return (KERN_SUCCESS); 2503 } 2504 if (prev_entry == &map->header) 2505 end = stack_entry->start - stack_entry->avail_ssize; 2506 else 2507 end = prev_entry->end; 2508 2509 /* This next test mimics the old grow function in vm_machdep.c. 2510 * It really doesn't quite make sense, but we do it anyway 2511 * for compatibility. 2512 * 2513 * If not growable stack, return success. This signals the 2514 * caller to proceed as he would normally with normal vm. 2515 */ 2516 if (stack_entry->avail_ssize < 1 || 2517 addr >= stack_entry->start || 2518 addr < stack_entry->start - stack_entry->avail_ssize) { 2519 vm_map_unlock_read(map); 2520 return (KERN_SUCCESS); 2521 } 2522 2523 /* Find the minimum grow amount */ 2524 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2525 if (grow_amount > stack_entry->avail_ssize) { 2526 vm_map_unlock_read(map); 2527 return (KERN_NO_SPACE); 2528 } 2529 2530 /* If there is no longer enough space between the entries 2531 * nogo, and adjust the available space. Note: this 2532 * should only happen if the user has mapped into the 2533 * stack area after the stack was created, and is 2534 * probably an error. 2535 * 2536 * This also effectively destroys any guard page the user 2537 * might have intended by limiting the stack size. 2538 */ 2539 if (grow_amount > stack_entry->start - end) { 2540 if (vm_map_lock_upgrade(map)) 2541 goto Retry; 2542 2543 stack_entry->avail_ssize = stack_entry->start - end; 2544 2545 vm_map_unlock(map); 2546 return (KERN_NO_SPACE); 2547 } 2548 2549 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2550 2551 /* If this is the main process stack, see if we're over the 2552 * stack limit. 2553 */ 2554 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2555 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2556 vm_map_unlock_read(map); 2557 return (KERN_NO_SPACE); 2558 } 2559 2560 /* Round up the grow amount modulo SGROWSIZ */ 2561 grow_amount = roundup (grow_amount, sgrowsiz); 2562 if (grow_amount > stack_entry->avail_ssize) { 2563 grow_amount = stack_entry->avail_ssize; 2564 } 2565 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2566 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2567 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2568 ctob(vm->vm_ssize); 2569 } 2570 2571 if (vm_map_lock_upgrade(map)) 2572 goto Retry; 2573 2574 /* Get the preliminary new entry start value */ 2575 addr = stack_entry->start - grow_amount; 2576 2577 /* If this puts us into the previous entry, cut back our growth 2578 * to the available space. Also, see the note above. 2579 */ 2580 if (addr < end) { 2581 stack_entry->avail_ssize = stack_entry->start - end; 2582 addr = end; 2583 } 2584 2585 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2586 VM_PROT_ALL, 2587 VM_PROT_ALL, 2588 0); 2589 2590 /* Adjust the available stack space by the amount we grew. */ 2591 if (rv == KERN_SUCCESS) { 2592 if (prev_entry != &map->header) 2593 vm_map_clip_end(map, prev_entry, addr); 2594 new_stack_entry = prev_entry->next; 2595 if (new_stack_entry->end != stack_entry->start || 2596 new_stack_entry->start != addr) 2597 panic ("Bad stack grow start/end in new stack entry"); 2598 else { 2599 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2600 (new_stack_entry->end - 2601 new_stack_entry->start); 2602 if (is_procstack) 2603 vm->vm_ssize += btoc(new_stack_entry->end - 2604 new_stack_entry->start); 2605 } 2606 } 2607 2608 vm_map_unlock(map); 2609 return (rv); 2610} 2611 2612/* 2613 * Unshare the specified VM space for exec. If other processes are 2614 * mapped to it, then create a new one. The new vmspace is null. 2615 */ 2616void 2617vmspace_exec(struct proc *p) 2618{ 2619 struct vmspace *oldvmspace = p->p_vmspace; 2620 struct vmspace *newvmspace; 2621 vm_map_t map = &p->p_vmspace->vm_map; 2622 2623 GIANT_REQUIRED; 2624 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2625 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2626 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2627 /* 2628 * This code is written like this for prototype purposes. The 2629 * goal is to avoid running down the vmspace here, but let the 2630 * other process's that are still using the vmspace to finally 2631 * run it down. Even though there is little or no chance of blocking 2632 * here, it is a good idea to keep this form for future mods. 2633 */ 2634 p->p_vmspace = newvmspace; 2635 pmap_pinit2(vmspace_pmap(newvmspace)); 2636 vmspace_free(oldvmspace); 2637 if (p == curthread->td_proc) /* XXXKSE ? */ 2638 pmap_activate(curthread); 2639} 2640 2641/* 2642 * Unshare the specified VM space for forcing COW. This 2643 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2644 */ 2645void 2646vmspace_unshare(struct proc *p) 2647{ 2648 struct vmspace *oldvmspace = p->p_vmspace; 2649 struct vmspace *newvmspace; 2650 2651 GIANT_REQUIRED; 2652 if (oldvmspace->vm_refcnt == 1) 2653 return; 2654 newvmspace = vmspace_fork(oldvmspace); 2655 p->p_vmspace = newvmspace; 2656 pmap_pinit2(vmspace_pmap(newvmspace)); 2657 vmspace_free(oldvmspace); 2658 if (p == curthread->td_proc) /* XXXKSE ? */ 2659 pmap_activate(curthread); 2660} 2661 2662/* 2663 * vm_map_lookup: 2664 * 2665 * Finds the VM object, offset, and 2666 * protection for a given virtual address in the 2667 * specified map, assuming a page fault of the 2668 * type specified. 2669 * 2670 * Leaves the map in question locked for read; return 2671 * values are guaranteed until a vm_map_lookup_done 2672 * call is performed. Note that the map argument 2673 * is in/out; the returned map must be used in 2674 * the call to vm_map_lookup_done. 2675 * 2676 * A handle (out_entry) is returned for use in 2677 * vm_map_lookup_done, to make that fast. 2678 * 2679 * If a lookup is requested with "write protection" 2680 * specified, the map may be changed to perform virtual 2681 * copying operations, although the data referenced will 2682 * remain the same. 2683 */ 2684int 2685vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2686 vm_offset_t vaddr, 2687 vm_prot_t fault_typea, 2688 vm_map_entry_t *out_entry, /* OUT */ 2689 vm_object_t *object, /* OUT */ 2690 vm_pindex_t *pindex, /* OUT */ 2691 vm_prot_t *out_prot, /* OUT */ 2692 boolean_t *wired) /* OUT */ 2693{ 2694 vm_map_entry_t entry; 2695 vm_map_t map = *var_map; 2696 vm_prot_t prot; 2697 vm_prot_t fault_type = fault_typea; 2698 2699 GIANT_REQUIRED; 2700RetryLookup:; 2701 /* 2702 * Lookup the faulting address. 2703 */ 2704 2705 vm_map_lock_read(map); 2706#define RETURN(why) \ 2707 { \ 2708 vm_map_unlock_read(map); \ 2709 return (why); \ 2710 } 2711 2712 /* 2713 * If the map has an interesting hint, try it before calling full 2714 * blown lookup routine. 2715 */ 2716 entry = map->hint; 2717 *out_entry = entry; 2718 if ((entry == &map->header) || 2719 (vaddr < entry->start) || (vaddr >= entry->end)) { 2720 vm_map_entry_t tmp_entry; 2721 2722 /* 2723 * Entry was either not a valid hint, or the vaddr was not 2724 * contained in the entry, so do a full lookup. 2725 */ 2726 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2727 RETURN(KERN_INVALID_ADDRESS); 2728 2729 entry = tmp_entry; 2730 *out_entry = entry; 2731 } 2732 2733 /* 2734 * Handle submaps. 2735 */ 2736 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2737 vm_map_t old_map = map; 2738 2739 *var_map = map = entry->object.sub_map; 2740 vm_map_unlock_read(old_map); 2741 goto RetryLookup; 2742 } 2743 2744 /* 2745 * Check whether this task is allowed to have this page. 2746 * Note the special case for MAP_ENTRY_COW 2747 * pages with an override. This is to implement a forced 2748 * COW for debuggers. 2749 */ 2750 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2751 prot = entry->max_protection; 2752 else 2753 prot = entry->protection; 2754 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2755 if ((fault_type & prot) != fault_type) { 2756 RETURN(KERN_PROTECTION_FAILURE); 2757 } 2758 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2759 (entry->eflags & MAP_ENTRY_COW) && 2760 (fault_type & VM_PROT_WRITE) && 2761 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2762 RETURN(KERN_PROTECTION_FAILURE); 2763 } 2764 2765 /* 2766 * If this page is not pageable, we have to get it for all possible 2767 * accesses. 2768 */ 2769 *wired = (entry->wired_count != 0); 2770 if (*wired) 2771 prot = fault_type = entry->protection; 2772 2773 /* 2774 * If the entry was copy-on-write, we either ... 2775 */ 2776 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2777 /* 2778 * If we want to write the page, we may as well handle that 2779 * now since we've got the map locked. 2780 * 2781 * If we don't need to write the page, we just demote the 2782 * permissions allowed. 2783 */ 2784 if (fault_type & VM_PROT_WRITE) { 2785 /* 2786 * Make a new object, and place it in the object 2787 * chain. Note that no new references have appeared 2788 * -- one just moved from the map to the new 2789 * object. 2790 */ 2791 if (vm_map_lock_upgrade(map)) 2792 goto RetryLookup; 2793 vm_object_shadow( 2794 &entry->object.vm_object, 2795 &entry->offset, 2796 atop(entry->end - entry->start)); 2797 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2798 vm_map_lock_downgrade(map); 2799 } else { 2800 /* 2801 * We're attempting to read a copy-on-write page -- 2802 * don't allow writes. 2803 */ 2804 prot &= ~VM_PROT_WRITE; 2805 } 2806 } 2807 2808 /* 2809 * Create an object if necessary. 2810 */ 2811 if (entry->object.vm_object == NULL && 2812 !map->system_map) { 2813 if (vm_map_lock_upgrade(map)) 2814 goto RetryLookup; 2815 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2816 atop(entry->end - entry->start)); 2817 entry->offset = 0; 2818 vm_map_lock_downgrade(map); 2819 } 2820 2821 /* 2822 * Return the object/offset from this entry. If the entry was 2823 * copy-on-write or empty, it has been fixed up. 2824 */ 2825 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2826 *object = entry->object.vm_object; 2827 2828 /* 2829 * Return whether this is the only map sharing this data. 2830 */ 2831 *out_prot = prot; 2832 return (KERN_SUCCESS); 2833 2834#undef RETURN 2835} 2836 2837/* 2838 * vm_map_lookup_done: 2839 * 2840 * Releases locks acquired by a vm_map_lookup 2841 * (according to the handle returned by that lookup). 2842 */ 2843void 2844vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 2845{ 2846 /* 2847 * Unlock the main-level map 2848 */ 2849 GIANT_REQUIRED; 2850 vm_map_unlock_read(map); 2851} 2852 2853/* 2854 * Implement uiomove with VM operations. This handles (and collateral changes) 2855 * support every combination of source object modification, and COW type 2856 * operations. 2857 */ 2858int 2859vm_uiomove( 2860 vm_map_t mapa, 2861 vm_object_t srcobject, 2862 off_t cp, 2863 int cnta, 2864 vm_offset_t uaddra, 2865 int *npages) 2866{ 2867 vm_map_t map; 2868 vm_object_t first_object, oldobject, object; 2869 vm_map_entry_t entry; 2870 vm_prot_t prot; 2871 boolean_t wired; 2872 int tcnt, rv; 2873 vm_offset_t uaddr, start, end, tend; 2874 vm_pindex_t first_pindex, osize, oindex; 2875 off_t ooffset; 2876 int cnt; 2877 2878 GIANT_REQUIRED; 2879 2880 if (npages) 2881 *npages = 0; 2882 2883 cnt = cnta; 2884 uaddr = uaddra; 2885 2886 while (cnt > 0) { 2887 map = mapa; 2888 2889 if ((vm_map_lookup(&map, uaddr, 2890 VM_PROT_READ, &entry, &first_object, 2891 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 2892 return EFAULT; 2893 } 2894 2895 vm_map_clip_start(map, entry, uaddr); 2896 2897 tcnt = cnt; 2898 tend = uaddr + tcnt; 2899 if (tend > entry->end) { 2900 tcnt = entry->end - uaddr; 2901 tend = entry->end; 2902 } 2903 2904 vm_map_clip_end(map, entry, tend); 2905 2906 start = entry->start; 2907 end = entry->end; 2908 2909 osize = atop(tcnt); 2910 2911 oindex = OFF_TO_IDX(cp); 2912 if (npages) { 2913 vm_pindex_t idx; 2914 for (idx = 0; idx < osize; idx++) { 2915 vm_page_t m; 2916 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2917 vm_map_lookup_done(map, entry); 2918 return 0; 2919 } 2920 /* 2921 * disallow busy or invalid pages, but allow 2922 * m->busy pages if they are entirely valid. 2923 */ 2924 if ((m->flags & PG_BUSY) || 2925 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 2926 vm_map_lookup_done(map, entry); 2927 return 0; 2928 } 2929 } 2930 } 2931 2932/* 2933 * If we are changing an existing map entry, just redirect 2934 * the object, and change mappings. 2935 */ 2936 if ((first_object->type == OBJT_VNODE) && 2937 ((oldobject = entry->object.vm_object) == first_object)) { 2938 2939 if ((entry->offset != cp) || (oldobject != srcobject)) { 2940 /* 2941 * Remove old window into the file 2942 */ 2943 pmap_remove (map->pmap, uaddr, tend); 2944 2945 /* 2946 * Force copy on write for mmaped regions 2947 */ 2948 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2949 2950 /* 2951 * Point the object appropriately 2952 */ 2953 if (oldobject != srcobject) { 2954 2955 /* 2956 * Set the object optimization hint flag 2957 */ 2958 vm_object_set_flag(srcobject, OBJ_OPT); 2959 vm_object_reference(srcobject); 2960 entry->object.vm_object = srcobject; 2961 2962 if (oldobject) { 2963 vm_object_deallocate(oldobject); 2964 } 2965 } 2966 2967 entry->offset = cp; 2968 map->timestamp++; 2969 } else { 2970 pmap_remove (map->pmap, uaddr, tend); 2971 } 2972 2973 } else if ((first_object->ref_count == 1) && 2974 (first_object->size == osize) && 2975 ((first_object->type == OBJT_DEFAULT) || 2976 (first_object->type == OBJT_SWAP)) ) { 2977 2978 oldobject = first_object->backing_object; 2979 2980 if ((first_object->backing_object_offset != cp) || 2981 (oldobject != srcobject)) { 2982 /* 2983 * Remove old window into the file 2984 */ 2985 pmap_remove (map->pmap, uaddr, tend); 2986 2987 /* 2988 * Remove unneeded old pages 2989 */ 2990 vm_object_page_remove(first_object, 0, 0, 0); 2991 2992 /* 2993 * Invalidate swap space 2994 */ 2995 if (first_object->type == OBJT_SWAP) { 2996 swap_pager_freespace(first_object, 2997 0, 2998 first_object->size); 2999 } 3000 3001 /* 3002 * Force copy on write for mmaped regions 3003 */ 3004 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3005 3006 /* 3007 * Point the object appropriately 3008 */ 3009 if (oldobject != srcobject) { 3010 /* 3011 * Set the object optimization hint flag 3012 */ 3013 vm_object_set_flag(srcobject, OBJ_OPT); 3014 vm_object_reference(srcobject); 3015 3016 if (oldobject) { 3017 TAILQ_REMOVE(&oldobject->shadow_head, 3018 first_object, shadow_list); 3019 oldobject->shadow_count--; 3020 /* XXX bump generation? */ 3021 vm_object_deallocate(oldobject); 3022 } 3023 3024 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 3025 first_object, shadow_list); 3026 srcobject->shadow_count++; 3027 /* XXX bump generation? */ 3028 3029 first_object->backing_object = srcobject; 3030 } 3031 first_object->backing_object_offset = cp; 3032 map->timestamp++; 3033 } else { 3034 pmap_remove (map->pmap, uaddr, tend); 3035 } 3036/* 3037 * Otherwise, we have to do a logical mmap. 3038 */ 3039 } else { 3040 3041 vm_object_set_flag(srcobject, OBJ_OPT); 3042 vm_object_reference(srcobject); 3043 3044 pmap_remove (map->pmap, uaddr, tend); 3045 3046 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3047 vm_map_lock_upgrade(map); 3048 3049 if (entry == &map->header) { 3050 map->first_free = &map->header; 3051 } else if (map->first_free->start >= start) { 3052 map->first_free = entry->prev; 3053 } 3054 3055 SAVE_HINT(map, entry->prev); 3056 vm_map_entry_delete(map, entry); 3057 3058 object = srcobject; 3059 ooffset = cp; 3060 3061 rv = vm_map_insert(map, object, ooffset, start, tend, 3062 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 3063 3064 if (rv != KERN_SUCCESS) 3065 panic("vm_uiomove: could not insert new entry: %d", rv); 3066 } 3067 3068/* 3069 * Map the window directly, if it is already in memory 3070 */ 3071 pmap_object_init_pt(map->pmap, uaddr, 3072 srcobject, oindex, tcnt, 0); 3073 3074 map->timestamp++; 3075 vm_map_unlock(map); 3076 3077 cnt -= tcnt; 3078 uaddr += tcnt; 3079 cp += tcnt; 3080 if (npages) 3081 *npages += osize; 3082 } 3083 return 0; 3084} 3085 3086/* 3087 * Performs the copy_on_write operations necessary to allow the virtual copies 3088 * into user space to work. This has to be called for write(2) system calls 3089 * from other processes, file unlinking, and file size shrinkage. 3090 */ 3091void 3092vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 3093{ 3094 int rv; 3095 vm_object_t robject; 3096 vm_pindex_t idx; 3097 3098 GIANT_REQUIRED; 3099 if ((object == NULL) || 3100 ((object->flags & OBJ_OPT) == 0)) 3101 return; 3102 3103 if (object->shadow_count > object->ref_count) 3104 panic("vm_freeze_copyopts: sc > rc"); 3105 3106 while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 3107 vm_pindex_t bo_pindex; 3108 vm_page_t m_in, m_out; 3109 3110 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 3111 3112 vm_object_reference(robject); 3113 3114 vm_object_pip_wait(robject, "objfrz"); 3115 3116 if (robject->ref_count == 1) { 3117 vm_object_deallocate(robject); 3118 continue; 3119 } 3120 3121 vm_object_pip_add(robject, 1); 3122 3123 for (idx = 0; idx < robject->size; idx++) { 3124 3125 m_out = vm_page_grab(robject, idx, 3126 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3127 3128 if (m_out->valid == 0) { 3129 m_in = vm_page_grab(object, bo_pindex + idx, 3130 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3131 if (m_in->valid == 0) { 3132 rv = vm_pager_get_pages(object, &m_in, 1, 0); 3133 if (rv != VM_PAGER_OK) { 3134 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 3135 continue; 3136 } 3137 vm_page_deactivate(m_in); 3138 } 3139 3140 vm_page_protect(m_in, VM_PROT_NONE); 3141 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 3142 m_out->valid = m_in->valid; 3143 vm_page_dirty(m_out); 3144 vm_page_activate(m_out); 3145 vm_page_wakeup(m_in); 3146 } 3147 vm_page_wakeup(m_out); 3148 } 3149 3150 object->shadow_count--; 3151 object->ref_count--; 3152 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 3153 robject->backing_object = NULL; 3154 robject->backing_object_offset = 0; 3155 3156 vm_object_pip_wakeup(robject); 3157 vm_object_deallocate(robject); 3158 } 3159 3160 vm_object_clear_flag(object, OBJ_OPT); 3161} 3162 3163#include "opt_ddb.h" 3164#ifdef DDB 3165#include <sys/kernel.h> 3166 3167#include <ddb/ddb.h> 3168 3169/* 3170 * vm_map_print: [ debug ] 3171 */ 3172DB_SHOW_COMMAND(map, vm_map_print) 3173{ 3174 static int nlines; 3175 /* XXX convert args. */ 3176 vm_map_t map = (vm_map_t)addr; 3177 boolean_t full = have_addr; 3178 3179 vm_map_entry_t entry; 3180 3181 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3182 (void *)map, 3183 (void *)map->pmap, map->nentries, map->timestamp); 3184 nlines++; 3185 3186 if (!full && db_indent) 3187 return; 3188 3189 db_indent += 2; 3190 for (entry = map->header.next; entry != &map->header; 3191 entry = entry->next) { 3192 db_iprintf("map entry %p: start=%p, end=%p\n", 3193 (void *)entry, (void *)entry->start, (void *)entry->end); 3194 nlines++; 3195 { 3196 static char *inheritance_name[4] = 3197 {"share", "copy", "none", "donate_copy"}; 3198 3199 db_iprintf(" prot=%x/%x/%s", 3200 entry->protection, 3201 entry->max_protection, 3202 inheritance_name[(int)(unsigned char)entry->inheritance]); 3203 if (entry->wired_count != 0) 3204 db_printf(", wired"); 3205 } 3206 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3207 /* XXX no %qd in kernel. Truncate entry->offset. */ 3208 db_printf(", share=%p, offset=0x%lx\n", 3209 (void *)entry->object.sub_map, 3210 (long)entry->offset); 3211 nlines++; 3212 if ((entry->prev == &map->header) || 3213 (entry->prev->object.sub_map != 3214 entry->object.sub_map)) { 3215 db_indent += 2; 3216 vm_map_print((db_expr_t)(intptr_t) 3217 entry->object.sub_map, 3218 full, 0, (char *)0); 3219 db_indent -= 2; 3220 } 3221 } else { 3222 /* XXX no %qd in kernel. Truncate entry->offset. */ 3223 db_printf(", object=%p, offset=0x%lx", 3224 (void *)entry->object.vm_object, 3225 (long)entry->offset); 3226 if (entry->eflags & MAP_ENTRY_COW) 3227 db_printf(", copy (%s)", 3228 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3229 db_printf("\n"); 3230 nlines++; 3231 3232 if ((entry->prev == &map->header) || 3233 (entry->prev->object.vm_object != 3234 entry->object.vm_object)) { 3235 db_indent += 2; 3236 vm_object_print((db_expr_t)(intptr_t) 3237 entry->object.vm_object, 3238 full, 0, (char *)0); 3239 nlines += 4; 3240 db_indent -= 2; 3241 } 3242 } 3243 } 3244 db_indent -= 2; 3245 if (db_indent == 0) 3246 nlines = 0; 3247} 3248 3249 3250DB_SHOW_COMMAND(procvm, procvm) 3251{ 3252 struct proc *p; 3253 3254 if (have_addr) { 3255 p = (struct proc *) addr; 3256 } else { 3257 p = curproc; 3258 } 3259 3260 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3261 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3262 (void *)vmspace_pmap(p->p_vmspace)); 3263 3264 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3265} 3266 3267#endif /* DDB */
| 534 if (new_entry == NULL) 535 panic("vm_map_entry_create: kernel resources exhausted"); 536 return (new_entry); 537} 538 539/* 540 * vm_map_entry_{un,}link: 541 * 542 * Insert/remove entries from maps. 543 */ 544static __inline void 545vm_map_entry_link(vm_map_t map, 546 vm_map_entry_t after_where, 547 vm_map_entry_t entry) 548{ 549 550 CTR4(KTR_VM, 551 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 552 map->nentries, entry, after_where); 553 map->nentries++; 554 entry->prev = after_where; 555 entry->next = after_where->next; 556 entry->next->prev = entry; 557 after_where->next = entry; 558} 559 560static __inline void 561vm_map_entry_unlink(vm_map_t map, 562 vm_map_entry_t entry) 563{ 564 vm_map_entry_t prev = entry->prev; 565 vm_map_entry_t next = entry->next; 566 567 next->prev = prev; 568 prev->next = next; 569 map->nentries--; 570 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 571 map->nentries, entry); 572} 573 574/* 575 * SAVE_HINT: 576 * 577 * Saves the specified entry as the hint for 578 * future lookups. 579 */ 580#define SAVE_HINT(map,value) \ 581 (map)->hint = (value); 582 583/* 584 * vm_map_lookup_entry: [ internal use only ] 585 * 586 * Finds the map entry containing (or 587 * immediately preceding) the specified address 588 * in the given map; the entry is returned 589 * in the "entry" parameter. The boolean 590 * result indicates whether the address is 591 * actually contained in the map. 592 */ 593boolean_t 594vm_map_lookup_entry( 595 vm_map_t map, 596 vm_offset_t address, 597 vm_map_entry_t *entry) /* OUT */ 598{ 599 vm_map_entry_t cur; 600 vm_map_entry_t last; 601 602 GIANT_REQUIRED; 603 /* 604 * Start looking either from the head of the list, or from the hint. 605 */ 606 cur = map->hint; 607 608 if (cur == &map->header) 609 cur = cur->next; 610 611 if (address >= cur->start) { 612 /* 613 * Go from hint to end of list. 614 * 615 * But first, make a quick check to see if we are already looking 616 * at the entry we want (which is usually the case). Note also 617 * that we don't need to save the hint here... it is the same 618 * hint (unless we are at the header, in which case the hint 619 * didn't buy us anything anyway). 620 */ 621 last = &map->header; 622 if ((cur != last) && (cur->end > address)) { 623 *entry = cur; 624 return (TRUE); 625 } 626 } else { 627 /* 628 * Go from start to hint, *inclusively* 629 */ 630 last = cur->next; 631 cur = map->header.next; 632 } 633 634 /* 635 * Search linearly 636 */ 637 while (cur != last) { 638 if (cur->end > address) { 639 if (address >= cur->start) { 640 /* 641 * Save this lookup for future hints, and 642 * return 643 */ 644 *entry = cur; 645 SAVE_HINT(map, cur); 646 return (TRUE); 647 } 648 break; 649 } 650 cur = cur->next; 651 } 652 *entry = cur->prev; 653 SAVE_HINT(map, *entry); 654 return (FALSE); 655} 656 657/* 658 * vm_map_insert: 659 * 660 * Inserts the given whole VM object into the target 661 * map at the specified address range. The object's 662 * size should match that of the address range. 663 * 664 * Requires that the map be locked, and leaves it so. 665 * 666 * If object is non-NULL, ref count must be bumped by caller 667 * prior to making call to account for the new entry. 668 */ 669int 670vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 671 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 672 int cow) 673{ 674 vm_map_entry_t new_entry; 675 vm_map_entry_t prev_entry; 676 vm_map_entry_t temp_entry; 677 vm_eflags_t protoeflags; 678 679 GIANT_REQUIRED; 680 681 /* 682 * Check that the start and end points are not bogus. 683 */ 684 if ((start < map->min_offset) || (end > map->max_offset) || 685 (start >= end)) 686 return (KERN_INVALID_ADDRESS); 687 688 /* 689 * Find the entry prior to the proposed starting address; if it's part 690 * of an existing entry, this range is bogus. 691 */ 692 if (vm_map_lookup_entry(map, start, &temp_entry)) 693 return (KERN_NO_SPACE); 694 695 prev_entry = temp_entry; 696 697 /* 698 * Assert that the next entry doesn't overlap the end point. 699 */ 700 if ((prev_entry->next != &map->header) && 701 (prev_entry->next->start < end)) 702 return (KERN_NO_SPACE); 703 704 protoeflags = 0; 705 706 if (cow & MAP_COPY_ON_WRITE) 707 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 708 709 if (cow & MAP_NOFAULT) { 710 protoeflags |= MAP_ENTRY_NOFAULT; 711 712 KASSERT(object == NULL, 713 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 714 } 715 if (cow & MAP_DISABLE_SYNCER) 716 protoeflags |= MAP_ENTRY_NOSYNC; 717 if (cow & MAP_DISABLE_COREDUMP) 718 protoeflags |= MAP_ENTRY_NOCOREDUMP; 719 720 if (object) { 721 /* 722 * When object is non-NULL, it could be shared with another 723 * process. We have to set or clear OBJ_ONEMAPPING 724 * appropriately. 725 */ 726 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 727 vm_object_clear_flag(object, OBJ_ONEMAPPING); 728 } 729 } 730 else if ((prev_entry != &map->header) && 731 (prev_entry->eflags == protoeflags) && 732 (prev_entry->end == start) && 733 (prev_entry->wired_count == 0) && 734 ((prev_entry->object.vm_object == NULL) || 735 vm_object_coalesce(prev_entry->object.vm_object, 736 OFF_TO_IDX(prev_entry->offset), 737 (vm_size_t)(prev_entry->end - prev_entry->start), 738 (vm_size_t)(end - prev_entry->end)))) { 739 /* 740 * We were able to extend the object. Determine if we 741 * can extend the previous map entry to include the 742 * new range as well. 743 */ 744 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 745 (prev_entry->protection == prot) && 746 (prev_entry->max_protection == max)) { 747 map->size += (end - prev_entry->end); 748 prev_entry->end = end; 749 vm_map_simplify_entry(map, prev_entry); 750 return (KERN_SUCCESS); 751 } 752 753 /* 754 * If we can extend the object but cannot extend the 755 * map entry, we have to create a new map entry. We 756 * must bump the ref count on the extended object to 757 * account for it. object may be NULL. 758 */ 759 object = prev_entry->object.vm_object; 760 offset = prev_entry->offset + 761 (prev_entry->end - prev_entry->start); 762 vm_object_reference(object); 763 } 764 765 /* 766 * NOTE: if conditionals fail, object can be NULL here. This occurs 767 * in things like the buffer map where we manage kva but do not manage 768 * backing objects. 769 */ 770 771 /* 772 * Create a new entry 773 */ 774 new_entry = vm_map_entry_create(map); 775 new_entry->start = start; 776 new_entry->end = end; 777 778 new_entry->eflags = protoeflags; 779 new_entry->object.vm_object = object; 780 new_entry->offset = offset; 781 new_entry->avail_ssize = 0; 782 783 new_entry->inheritance = VM_INHERIT_DEFAULT; 784 new_entry->protection = prot; 785 new_entry->max_protection = max; 786 new_entry->wired_count = 0; 787 788 /* 789 * Insert the new entry into the list 790 */ 791 vm_map_entry_link(map, prev_entry, new_entry); 792 map->size += new_entry->end - new_entry->start; 793 794 /* 795 * Update the free space hint 796 */ 797 if ((map->first_free == prev_entry) && 798 (prev_entry->end >= new_entry->start)) { 799 map->first_free = new_entry; 800 } 801 802#if 0 803 /* 804 * Temporarily removed to avoid MAP_STACK panic, due to 805 * MAP_STACK being a huge hack. Will be added back in 806 * when MAP_STACK (and the user stack mapping) is fixed. 807 */ 808 /* 809 * It may be possible to simplify the entry 810 */ 811 vm_map_simplify_entry(map, new_entry); 812#endif 813 814 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 815 pmap_object_init_pt(map->pmap, start, 816 object, OFF_TO_IDX(offset), end - start, 817 cow & MAP_PREFAULT_PARTIAL); 818 } 819 820 return (KERN_SUCCESS); 821} 822 823/* 824 * Find sufficient space for `length' bytes in the given map, starting at 825 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 826 */ 827int 828vm_map_findspace( 829 vm_map_t map, 830 vm_offset_t start, 831 vm_size_t length, 832 vm_offset_t *addr) 833{ 834 vm_map_entry_t entry, next; 835 vm_offset_t end; 836 837 GIANT_REQUIRED; 838 if (start < map->min_offset) 839 start = map->min_offset; 840 if (start > map->max_offset) 841 return (1); 842 843 /* 844 * Look for the first possible address; if there's already something 845 * at this address, we have to start after it. 846 */ 847 if (start == map->min_offset) { 848 if ((entry = map->first_free) != &map->header) 849 start = entry->end; 850 } else { 851 vm_map_entry_t tmp; 852 853 if (vm_map_lookup_entry(map, start, &tmp)) 854 start = tmp->end; 855 entry = tmp; 856 } 857 858 /* 859 * Look through the rest of the map, trying to fit a new region in the 860 * gap between existing regions, or after the very last region. 861 */ 862 for (;; start = (entry = next)->end) { 863 /* 864 * Find the end of the proposed new region. Be sure we didn't 865 * go beyond the end of the map, or wrap around the address; 866 * if so, we lose. Otherwise, if this is the last entry, or 867 * if the proposed new region fits before the next entry, we 868 * win. 869 */ 870 end = start + length; 871 if (end > map->max_offset || end < start) 872 return (1); 873 next = entry->next; 874 if (next == &map->header || next->start >= end) 875 break; 876 } 877 SAVE_HINT(map, entry); 878 *addr = start; 879 if (map == kernel_map) { 880 vm_offset_t ksize; 881 if ((ksize = round_page(start + length)) > kernel_vm_end) { 882 pmap_growkernel(ksize); 883 } 884 } 885 return (0); 886} 887 888/* 889 * vm_map_find finds an unallocated region in the target address 890 * map with the given length. The search is defined to be 891 * first-fit from the specified address; the region found is 892 * returned in the same parameter. 893 * 894 * If object is non-NULL, ref count must be bumped by caller 895 * prior to making call to account for the new entry. 896 */ 897int 898vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 899 vm_offset_t *addr, /* IN/OUT */ 900 vm_size_t length, boolean_t find_space, vm_prot_t prot, 901 vm_prot_t max, int cow) 902{ 903 vm_offset_t start; 904 int result, s = 0; 905 906 GIANT_REQUIRED; 907 908 start = *addr; 909 910 if (map == kmem_map) 911 s = splvm(); 912 913 vm_map_lock(map); 914 if (find_space) { 915 if (vm_map_findspace(map, start, length, addr)) { 916 vm_map_unlock(map); 917 if (map == kmem_map) 918 splx(s); 919 return (KERN_NO_SPACE); 920 } 921 start = *addr; 922 } 923 result = vm_map_insert(map, object, offset, 924 start, start + length, prot, max, cow); 925 vm_map_unlock(map); 926 927 if (map == kmem_map) 928 splx(s); 929 930 return (result); 931} 932 933/* 934 * vm_map_simplify_entry: 935 * 936 * Simplify the given map entry by merging with either neighbor. This 937 * routine also has the ability to merge with both neighbors. 938 * 939 * The map must be locked. 940 * 941 * This routine guarentees that the passed entry remains valid (though 942 * possibly extended). When merging, this routine may delete one or 943 * both neighbors. 944 */ 945void 946vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 947{ 948 vm_map_entry_t next, prev; 949 vm_size_t prevsize, esize; 950 951 GIANT_REQUIRED; 952 953 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 954 return; 955 956 prev = entry->prev; 957 if (prev != &map->header) { 958 prevsize = prev->end - prev->start; 959 if ( (prev->end == entry->start) && 960 (prev->object.vm_object == entry->object.vm_object) && 961 (!prev->object.vm_object || 962 (prev->offset + prevsize == entry->offset)) && 963 (prev->eflags == entry->eflags) && 964 (prev->protection == entry->protection) && 965 (prev->max_protection == entry->max_protection) && 966 (prev->inheritance == entry->inheritance) && 967 (prev->wired_count == entry->wired_count)) { 968 if (map->first_free == prev) 969 map->first_free = entry; 970 if (map->hint == prev) 971 map->hint = entry; 972 vm_map_entry_unlink(map, prev); 973 entry->start = prev->start; 974 entry->offset = prev->offset; 975 if (prev->object.vm_object) 976 vm_object_deallocate(prev->object.vm_object); 977 vm_map_entry_dispose(map, prev); 978 } 979 } 980 981 next = entry->next; 982 if (next != &map->header) { 983 esize = entry->end - entry->start; 984 if ((entry->end == next->start) && 985 (next->object.vm_object == entry->object.vm_object) && 986 (!entry->object.vm_object || 987 (entry->offset + esize == next->offset)) && 988 (next->eflags == entry->eflags) && 989 (next->protection == entry->protection) && 990 (next->max_protection == entry->max_protection) && 991 (next->inheritance == entry->inheritance) && 992 (next->wired_count == entry->wired_count)) { 993 if (map->first_free == next) 994 map->first_free = entry; 995 if (map->hint == next) 996 map->hint = entry; 997 vm_map_entry_unlink(map, next); 998 entry->end = next->end; 999 if (next->object.vm_object) 1000 vm_object_deallocate(next->object.vm_object); 1001 vm_map_entry_dispose(map, next); 1002 } 1003 } 1004} 1005/* 1006 * vm_map_clip_start: [ internal use only ] 1007 * 1008 * Asserts that the given entry begins at or after 1009 * the specified address; if necessary, 1010 * it splits the entry into two. 1011 */ 1012#define vm_map_clip_start(map, entry, startaddr) \ 1013{ \ 1014 if (startaddr > entry->start) \ 1015 _vm_map_clip_start(map, entry, startaddr); \ 1016} 1017 1018/* 1019 * This routine is called only when it is known that 1020 * the entry must be split. 1021 */ 1022static void 1023_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1024{ 1025 vm_map_entry_t new_entry; 1026 1027 /* 1028 * Split off the front portion -- note that we must insert the new 1029 * entry BEFORE this one, so that this entry has the specified 1030 * starting address. 1031 */ 1032 vm_map_simplify_entry(map, entry); 1033 1034 /* 1035 * If there is no object backing this entry, we might as well create 1036 * one now. If we defer it, an object can get created after the map 1037 * is clipped, and individual objects will be created for the split-up 1038 * map. This is a bit of a hack, but is also about the best place to 1039 * put this improvement. 1040 */ 1041 if (entry->object.vm_object == NULL && !map->system_map) { 1042 vm_object_t object; 1043 object = vm_object_allocate(OBJT_DEFAULT, 1044 atop(entry->end - entry->start)); 1045 entry->object.vm_object = object; 1046 entry->offset = 0; 1047 } 1048 1049 new_entry = vm_map_entry_create(map); 1050 *new_entry = *entry; 1051 1052 new_entry->end = start; 1053 entry->offset += (start - entry->start); 1054 entry->start = start; 1055 1056 vm_map_entry_link(map, entry->prev, new_entry); 1057 1058 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1059 vm_object_reference(new_entry->object.vm_object); 1060 } 1061} 1062 1063/* 1064 * vm_map_clip_end: [ internal use only ] 1065 * 1066 * Asserts that the given entry ends at or before 1067 * the specified address; if necessary, 1068 * it splits the entry into two. 1069 */ 1070#define vm_map_clip_end(map, entry, endaddr) \ 1071{ \ 1072 if (endaddr < entry->end) \ 1073 _vm_map_clip_end(map, entry, endaddr); \ 1074} 1075 1076/* 1077 * This routine is called only when it is known that 1078 * the entry must be split. 1079 */ 1080static void 1081_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1082{ 1083 vm_map_entry_t new_entry; 1084 1085 /* 1086 * If there is no object backing this entry, we might as well create 1087 * one now. If we defer it, an object can get created after the map 1088 * is clipped, and individual objects will be created for the split-up 1089 * map. This is a bit of a hack, but is also about the best place to 1090 * put this improvement. 1091 */ 1092 if (entry->object.vm_object == NULL && !map->system_map) { 1093 vm_object_t object; 1094 object = vm_object_allocate(OBJT_DEFAULT, 1095 atop(entry->end - entry->start)); 1096 entry->object.vm_object = object; 1097 entry->offset = 0; 1098 } 1099 1100 /* 1101 * Create a new entry and insert it AFTER the specified entry 1102 */ 1103 new_entry = vm_map_entry_create(map); 1104 *new_entry = *entry; 1105 1106 new_entry->start = entry->end = end; 1107 new_entry->offset += (end - entry->start); 1108 1109 vm_map_entry_link(map, entry, new_entry); 1110 1111 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1112 vm_object_reference(new_entry->object.vm_object); 1113 } 1114} 1115 1116/* 1117 * VM_MAP_RANGE_CHECK: [ internal use only ] 1118 * 1119 * Asserts that the starting and ending region 1120 * addresses fall within the valid range of the map. 1121 */ 1122#define VM_MAP_RANGE_CHECK(map, start, end) \ 1123 { \ 1124 if (start < vm_map_min(map)) \ 1125 start = vm_map_min(map); \ 1126 if (end > vm_map_max(map)) \ 1127 end = vm_map_max(map); \ 1128 if (start > end) \ 1129 start = end; \ 1130 } 1131 1132/* 1133 * vm_map_submap: [ kernel use only ] 1134 * 1135 * Mark the given range as handled by a subordinate map. 1136 * 1137 * This range must have been created with vm_map_find, 1138 * and no other operations may have been performed on this 1139 * range prior to calling vm_map_submap. 1140 * 1141 * Only a limited number of operations can be performed 1142 * within this rage after calling vm_map_submap: 1143 * vm_fault 1144 * [Don't try vm_map_copy!] 1145 * 1146 * To remove a submapping, one must first remove the 1147 * range from the superior map, and then destroy the 1148 * submap (if desired). [Better yet, don't try it.] 1149 */ 1150int 1151vm_map_submap( 1152 vm_map_t map, 1153 vm_offset_t start, 1154 vm_offset_t end, 1155 vm_map_t submap) 1156{ 1157 vm_map_entry_t entry; 1158 int result = KERN_INVALID_ARGUMENT; 1159 1160 GIANT_REQUIRED; 1161 1162 vm_map_lock(map); 1163 1164 VM_MAP_RANGE_CHECK(map, start, end); 1165 1166 if (vm_map_lookup_entry(map, start, &entry)) { 1167 vm_map_clip_start(map, entry, start); 1168 } else 1169 entry = entry->next; 1170 1171 vm_map_clip_end(map, entry, end); 1172 1173 if ((entry->start == start) && (entry->end == end) && 1174 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1175 (entry->object.vm_object == NULL)) { 1176 entry->object.sub_map = submap; 1177 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1178 result = KERN_SUCCESS; 1179 } 1180 vm_map_unlock(map); 1181 1182 return (result); 1183} 1184 1185/* 1186 * vm_map_protect: 1187 * 1188 * Sets the protection of the specified address 1189 * region in the target map. If "set_max" is 1190 * specified, the maximum protection is to be set; 1191 * otherwise, only the current protection is affected. 1192 */ 1193int 1194vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1195 vm_prot_t new_prot, boolean_t set_max) 1196{ 1197 vm_map_entry_t current; 1198 vm_map_entry_t entry; 1199 1200 GIANT_REQUIRED; 1201 vm_map_lock(map); 1202 1203 VM_MAP_RANGE_CHECK(map, start, end); 1204 1205 if (vm_map_lookup_entry(map, start, &entry)) { 1206 vm_map_clip_start(map, entry, start); 1207 } else { 1208 entry = entry->next; 1209 } 1210 1211 /* 1212 * Make a first pass to check for protection violations. 1213 */ 1214 current = entry; 1215 while ((current != &map->header) && (current->start < end)) { 1216 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1217 vm_map_unlock(map); 1218 return (KERN_INVALID_ARGUMENT); 1219 } 1220 if ((new_prot & current->max_protection) != new_prot) { 1221 vm_map_unlock(map); 1222 return (KERN_PROTECTION_FAILURE); 1223 } 1224 current = current->next; 1225 } 1226 1227 /* 1228 * Go back and fix up protections. [Note that clipping is not 1229 * necessary the second time.] 1230 */ 1231 current = entry; 1232 while ((current != &map->header) && (current->start < end)) { 1233 vm_prot_t old_prot; 1234 1235 vm_map_clip_end(map, current, end); 1236 1237 old_prot = current->protection; 1238 if (set_max) 1239 current->protection = 1240 (current->max_protection = new_prot) & 1241 old_prot; 1242 else 1243 current->protection = new_prot; 1244 1245 /* 1246 * Update physical map if necessary. Worry about copy-on-write 1247 * here -- CHECK THIS XXX 1248 */ 1249 if (current->protection != old_prot) { 1250#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1251 VM_PROT_ALL) 1252 pmap_protect(map->pmap, current->start, 1253 current->end, 1254 current->protection & MASK(current)); 1255#undef MASK 1256 } 1257 vm_map_simplify_entry(map, current); 1258 current = current->next; 1259 } 1260 vm_map_unlock(map); 1261 return (KERN_SUCCESS); 1262} 1263 1264/* 1265 * vm_map_madvise: 1266 * 1267 * This routine traverses a processes map handling the madvise 1268 * system call. Advisories are classified as either those effecting 1269 * the vm_map_entry structure, or those effecting the underlying 1270 * objects. 1271 */ 1272int 1273vm_map_madvise( 1274 vm_map_t map, 1275 vm_offset_t start, 1276 vm_offset_t end, 1277 int behav) 1278{ 1279 vm_map_entry_t current, entry; 1280 int modify_map = 0; 1281 1282 GIANT_REQUIRED; 1283 1284 /* 1285 * Some madvise calls directly modify the vm_map_entry, in which case 1286 * we need to use an exclusive lock on the map and we need to perform 1287 * various clipping operations. Otherwise we only need a read-lock 1288 * on the map. 1289 */ 1290 switch(behav) { 1291 case MADV_NORMAL: 1292 case MADV_SEQUENTIAL: 1293 case MADV_RANDOM: 1294 case MADV_NOSYNC: 1295 case MADV_AUTOSYNC: 1296 case MADV_NOCORE: 1297 case MADV_CORE: 1298 modify_map = 1; 1299 vm_map_lock(map); 1300 break; 1301 case MADV_WILLNEED: 1302 case MADV_DONTNEED: 1303 case MADV_FREE: 1304 vm_map_lock_read(map); 1305 break; 1306 default: 1307 return (KERN_INVALID_ARGUMENT); 1308 } 1309 1310 /* 1311 * Locate starting entry and clip if necessary. 1312 */ 1313 VM_MAP_RANGE_CHECK(map, start, end); 1314 1315 if (vm_map_lookup_entry(map, start, &entry)) { 1316 if (modify_map) 1317 vm_map_clip_start(map, entry, start); 1318 } else { 1319 entry = entry->next; 1320 } 1321 1322 if (modify_map) { 1323 /* 1324 * madvise behaviors that are implemented in the vm_map_entry. 1325 * 1326 * We clip the vm_map_entry so that behavioral changes are 1327 * limited to the specified address range. 1328 */ 1329 for (current = entry; 1330 (current != &map->header) && (current->start < end); 1331 current = current->next 1332 ) { 1333 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1334 continue; 1335 1336 vm_map_clip_end(map, current, end); 1337 1338 switch (behav) { 1339 case MADV_NORMAL: 1340 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1341 break; 1342 case MADV_SEQUENTIAL: 1343 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1344 break; 1345 case MADV_RANDOM: 1346 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1347 break; 1348 case MADV_NOSYNC: 1349 current->eflags |= MAP_ENTRY_NOSYNC; 1350 break; 1351 case MADV_AUTOSYNC: 1352 current->eflags &= ~MAP_ENTRY_NOSYNC; 1353 break; 1354 case MADV_NOCORE: 1355 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1356 break; 1357 case MADV_CORE: 1358 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1359 break; 1360 default: 1361 break; 1362 } 1363 vm_map_simplify_entry(map, current); 1364 } 1365 vm_map_unlock(map); 1366 } else { 1367 vm_pindex_t pindex; 1368 int count; 1369 1370 /* 1371 * madvise behaviors that are implemented in the underlying 1372 * vm_object. 1373 * 1374 * Since we don't clip the vm_map_entry, we have to clip 1375 * the vm_object pindex and count. 1376 */ 1377 for (current = entry; 1378 (current != &map->header) && (current->start < end); 1379 current = current->next 1380 ) { 1381 vm_offset_t useStart; 1382 1383 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1384 continue; 1385 1386 pindex = OFF_TO_IDX(current->offset); 1387 count = atop(current->end - current->start); 1388 useStart = current->start; 1389 1390 if (current->start < start) { 1391 pindex += atop(start - current->start); 1392 count -= atop(start - current->start); 1393 useStart = start; 1394 } 1395 if (current->end > end) 1396 count -= atop(current->end - end); 1397 1398 if (count <= 0) 1399 continue; 1400 1401 vm_object_madvise(current->object.vm_object, 1402 pindex, count, behav); 1403 if (behav == MADV_WILLNEED) { 1404 pmap_object_init_pt( 1405 map->pmap, 1406 useStart, 1407 current->object.vm_object, 1408 pindex, 1409 (count << PAGE_SHIFT), 1410 MAP_PREFAULT_MADVISE 1411 ); 1412 } 1413 } 1414 vm_map_unlock_read(map); 1415 } 1416 return (0); 1417} 1418 1419 1420/* 1421 * vm_map_inherit: 1422 * 1423 * Sets the inheritance of the specified address 1424 * range in the target map. Inheritance 1425 * affects how the map will be shared with 1426 * child maps at the time of vm_map_fork. 1427 */ 1428int 1429vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1430 vm_inherit_t new_inheritance) 1431{ 1432 vm_map_entry_t entry; 1433 vm_map_entry_t temp_entry; 1434 1435 GIANT_REQUIRED; 1436 1437 switch (new_inheritance) { 1438 case VM_INHERIT_NONE: 1439 case VM_INHERIT_COPY: 1440 case VM_INHERIT_SHARE: 1441 break; 1442 default: 1443 return (KERN_INVALID_ARGUMENT); 1444 } 1445 1446 vm_map_lock(map); 1447 1448 VM_MAP_RANGE_CHECK(map, start, end); 1449 1450 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1451 entry = temp_entry; 1452 vm_map_clip_start(map, entry, start); 1453 } else 1454 entry = temp_entry->next; 1455 1456 while ((entry != &map->header) && (entry->start < end)) { 1457 vm_map_clip_end(map, entry, end); 1458 1459 entry->inheritance = new_inheritance; 1460 1461 vm_map_simplify_entry(map, entry); 1462 1463 entry = entry->next; 1464 } 1465 1466 vm_map_unlock(map); 1467 return (KERN_SUCCESS); 1468} 1469 1470/* 1471 * Implement the semantics of mlock 1472 */ 1473int 1474vm_map_user_pageable( 1475 vm_map_t map, 1476 vm_offset_t start, 1477 vm_offset_t end, 1478 boolean_t new_pageable) 1479{ 1480 vm_map_entry_t entry; 1481 vm_map_entry_t start_entry; 1482 vm_offset_t estart; 1483 vm_offset_t eend; 1484 int rv; 1485 1486 vm_map_lock(map); 1487 VM_MAP_RANGE_CHECK(map, start, end); 1488 1489 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1490 vm_map_unlock(map); 1491 return (KERN_INVALID_ADDRESS); 1492 } 1493 1494 if (new_pageable) { 1495 1496 entry = start_entry; 1497 vm_map_clip_start(map, entry, start); 1498 1499 /* 1500 * Now decrement the wiring count for each region. If a region 1501 * becomes completely unwired, unwire its physical pages and 1502 * mappings. 1503 */ 1504 while ((entry != &map->header) && (entry->start < end)) { 1505 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1506 vm_map_clip_end(map, entry, end); 1507 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1508 entry->wired_count--; 1509 if (entry->wired_count == 0) 1510 vm_fault_unwire(map, entry->start, entry->end); 1511 } 1512 vm_map_simplify_entry(map,entry); 1513 entry = entry->next; 1514 } 1515 } else { 1516 1517 entry = start_entry; 1518 1519 while ((entry != &map->header) && (entry->start < end)) { 1520 1521 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1522 entry = entry->next; 1523 continue; 1524 } 1525 1526 if (entry->wired_count != 0) { 1527 entry->wired_count++; 1528 entry->eflags |= MAP_ENTRY_USER_WIRED; 1529 entry = entry->next; 1530 continue; 1531 } 1532 1533 /* Here on entry being newly wired */ 1534 1535 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1536 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1537 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1538 1539 vm_object_shadow(&entry->object.vm_object, 1540 &entry->offset, 1541 atop(entry->end - entry->start)); 1542 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1543 1544 } else if (entry->object.vm_object == NULL && 1545 !map->system_map) { 1546 1547 entry->object.vm_object = 1548 vm_object_allocate(OBJT_DEFAULT, 1549 atop(entry->end - entry->start)); 1550 entry->offset = (vm_offset_t) 0; 1551 1552 } 1553 } 1554 1555 vm_map_clip_start(map, entry, start); 1556 vm_map_clip_end(map, entry, end); 1557 1558 entry->wired_count++; 1559 entry->eflags |= MAP_ENTRY_USER_WIRED; 1560 estart = entry->start; 1561 eend = entry->end; 1562 1563 /* First we need to allow map modifications */ 1564 vm_map_set_recursive(map); 1565 vm_map_lock_downgrade(map); 1566 map->timestamp++; 1567 1568 rv = vm_fault_user_wire(map, entry->start, entry->end); 1569 if (rv) { 1570 1571 entry->wired_count--; 1572 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1573 1574 vm_map_clear_recursive(map); 1575 vm_map_unlock(map); 1576 1577 /* 1578 * At this point, the map is unlocked, and 1579 * entry might no longer be valid. Use copy 1580 * of entry start value obtained while entry 1581 * was valid. 1582 */ 1583 (void) vm_map_user_pageable(map, start, estart, 1584 TRUE); 1585 return rv; 1586 } 1587 1588 vm_map_clear_recursive(map); 1589 if (vm_map_lock_upgrade(map)) { 1590 vm_map_lock(map); 1591 if (vm_map_lookup_entry(map, estart, &entry) 1592 == FALSE) { 1593 vm_map_unlock(map); 1594 /* 1595 * vm_fault_user_wire succeded, thus 1596 * the area between start and eend 1597 * is wired and has to be unwired 1598 * here as part of the cleanup. 1599 */ 1600 (void) vm_map_user_pageable(map, 1601 start, 1602 eend, 1603 TRUE); 1604 return (KERN_INVALID_ADDRESS); 1605 } 1606 } 1607 vm_map_simplify_entry(map,entry); 1608 } 1609 } 1610 map->timestamp++; 1611 vm_map_unlock(map); 1612 return KERN_SUCCESS; 1613} 1614 1615/* 1616 * vm_map_pageable: 1617 * 1618 * Sets the pageability of the specified address 1619 * range in the target map. Regions specified 1620 * as not pageable require locked-down physical 1621 * memory and physical page maps. 1622 * 1623 * The map must not be locked, but a reference 1624 * must remain to the map throughout the call. 1625 */ 1626int 1627vm_map_pageable( 1628 vm_map_t map, 1629 vm_offset_t start, 1630 vm_offset_t end, 1631 boolean_t new_pageable) 1632{ 1633 vm_map_entry_t entry; 1634 vm_map_entry_t start_entry; 1635 vm_offset_t failed = 0; 1636 int rv; 1637 1638 GIANT_REQUIRED; 1639 1640 vm_map_lock(map); 1641 1642 VM_MAP_RANGE_CHECK(map, start, end); 1643 1644 /* 1645 * Only one pageability change may take place at one time, since 1646 * vm_fault assumes it will be called only once for each 1647 * wiring/unwiring. Therefore, we have to make sure we're actually 1648 * changing the pageability for the entire region. We do so before 1649 * making any changes. 1650 */ 1651 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1652 vm_map_unlock(map); 1653 return (KERN_INVALID_ADDRESS); 1654 } 1655 entry = start_entry; 1656 1657 /* 1658 * Actions are rather different for wiring and unwiring, so we have 1659 * two separate cases. 1660 */ 1661 if (new_pageable) { 1662 vm_map_clip_start(map, entry, start); 1663 1664 /* 1665 * Unwiring. First ensure that the range to be unwired is 1666 * really wired down and that there are no holes. 1667 */ 1668 while ((entry != &map->header) && (entry->start < end)) { 1669 if (entry->wired_count == 0 || 1670 (entry->end < end && 1671 (entry->next == &map->header || 1672 entry->next->start > entry->end))) { 1673 vm_map_unlock(map); 1674 return (KERN_INVALID_ARGUMENT); 1675 } 1676 entry = entry->next; 1677 } 1678 1679 /* 1680 * Now decrement the wiring count for each region. If a region 1681 * becomes completely unwired, unwire its physical pages and 1682 * mappings. 1683 */ 1684 entry = start_entry; 1685 while ((entry != &map->header) && (entry->start < end)) { 1686 vm_map_clip_end(map, entry, end); 1687 1688 entry->wired_count--; 1689 if (entry->wired_count == 0) 1690 vm_fault_unwire(map, entry->start, entry->end); 1691 1692 vm_map_simplify_entry(map, entry); 1693 1694 entry = entry->next; 1695 } 1696 } else { 1697 /* 1698 * Wiring. We must do this in two passes: 1699 * 1700 * 1. Holding the write lock, we create any shadow or zero-fill 1701 * objects that need to be created. Then we clip each map 1702 * entry to the region to be wired and increment its wiring 1703 * count. We create objects before clipping the map entries 1704 * to avoid object proliferation. 1705 * 1706 * 2. We downgrade to a read lock, and call vm_fault_wire to 1707 * fault in the pages for any newly wired area (wired_count is 1708 * 1). 1709 * 1710 * Downgrading to a read lock for vm_fault_wire avoids a possible 1711 * deadlock with another process that may have faulted on one 1712 * of the pages to be wired (it would mark the page busy, 1713 * blocking us, then in turn block on the map lock that we 1714 * hold). Because of problems in the recursive lock package, 1715 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1716 * any actions that require the write lock must be done 1717 * beforehand. Because we keep the read lock on the map, the 1718 * copy-on-write status of the entries we modify here cannot 1719 * change. 1720 */ 1721 1722 /* 1723 * Pass 1. 1724 */ 1725 while ((entry != &map->header) && (entry->start < end)) { 1726 if (entry->wired_count == 0) { 1727 1728 /* 1729 * Perform actions of vm_map_lookup that need 1730 * the write lock on the map: create a shadow 1731 * object for a copy-on-write region, or an 1732 * object for a zero-fill region. 1733 * 1734 * We don't have to do this for entries that 1735 * point to sub maps, because we won't 1736 * hold the lock on the sub map. 1737 */ 1738 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1739 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1740 if (copyflag && 1741 ((entry->protection & VM_PROT_WRITE) != 0)) { 1742 1743 vm_object_shadow(&entry->object.vm_object, 1744 &entry->offset, 1745 atop(entry->end - entry->start)); 1746 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1747 } else if (entry->object.vm_object == NULL && 1748 !map->system_map) { 1749 entry->object.vm_object = 1750 vm_object_allocate(OBJT_DEFAULT, 1751 atop(entry->end - entry->start)); 1752 entry->offset = (vm_offset_t) 0; 1753 } 1754 } 1755 } 1756 vm_map_clip_start(map, entry, start); 1757 vm_map_clip_end(map, entry, end); 1758 entry->wired_count++; 1759 1760 /* 1761 * Check for holes 1762 */ 1763 if (entry->end < end && 1764 (entry->next == &map->header || 1765 entry->next->start > entry->end)) { 1766 /* 1767 * Found one. Object creation actions do not 1768 * need to be undone, but the wired counts 1769 * need to be restored. 1770 */ 1771 while (entry != &map->header && entry->end > start) { 1772 entry->wired_count--; 1773 entry = entry->prev; 1774 } 1775 vm_map_unlock(map); 1776 return (KERN_INVALID_ARGUMENT); 1777 } 1778 entry = entry->next; 1779 } 1780 1781 /* 1782 * Pass 2. 1783 */ 1784 1785 /* 1786 * HACK HACK HACK HACK 1787 * 1788 * If we are wiring in the kernel map or a submap of it, 1789 * unlock the map to avoid deadlocks. We trust that the 1790 * kernel is well-behaved, and therefore will not do 1791 * anything destructive to this region of the map while 1792 * we have it unlocked. We cannot trust user processes 1793 * to do the same. 1794 * 1795 * HACK HACK HACK HACK 1796 */ 1797 if (vm_map_pmap(map) == kernel_pmap) { 1798 vm_map_unlock(map); /* trust me ... */ 1799 } else { 1800 vm_map_lock_downgrade(map); 1801 } 1802 1803 rv = 0; 1804 entry = start_entry; 1805 while (entry != &map->header && entry->start < end) { 1806 /* 1807 * If vm_fault_wire fails for any page we need to undo 1808 * what has been done. We decrement the wiring count 1809 * for those pages which have not yet been wired (now) 1810 * and unwire those that have (later). 1811 * 1812 * XXX this violates the locking protocol on the map, 1813 * needs to be fixed. 1814 */ 1815 if (rv) 1816 entry->wired_count--; 1817 else if (entry->wired_count == 1) { 1818 rv = vm_fault_wire(map, entry->start, entry->end); 1819 if (rv) { 1820 failed = entry->start; 1821 entry->wired_count--; 1822 } 1823 } 1824 entry = entry->next; 1825 } 1826 1827 if (vm_map_pmap(map) == kernel_pmap) { 1828 vm_map_lock(map); 1829 } 1830 if (rv) { 1831 vm_map_unlock(map); 1832 (void) vm_map_pageable(map, start, failed, TRUE); 1833 return (rv); 1834 } 1835 /* 1836 * An exclusive lock on the map is needed in order to call 1837 * vm_map_simplify_entry(). If the current lock on the map 1838 * is only a shared lock, an upgrade is needed. 1839 */ 1840 if (vm_map_pmap(map) != kernel_pmap && 1841 vm_map_lock_upgrade(map)) { 1842 vm_map_lock(map); 1843 if (vm_map_lookup_entry(map, start, &start_entry) == 1844 FALSE) { 1845 vm_map_unlock(map); 1846 return KERN_SUCCESS; 1847 } 1848 } 1849 vm_map_simplify_entry(map, start_entry); 1850 } 1851 1852 vm_map_unlock(map); 1853 1854 return (KERN_SUCCESS); 1855} 1856 1857/* 1858 * vm_map_clean 1859 * 1860 * Push any dirty cached pages in the address range to their pager. 1861 * If syncio is TRUE, dirty pages are written synchronously. 1862 * If invalidate is TRUE, any cached pages are freed as well. 1863 * 1864 * Returns an error if any part of the specified range is not mapped. 1865 */ 1866int 1867vm_map_clean( 1868 vm_map_t map, 1869 vm_offset_t start, 1870 vm_offset_t end, 1871 boolean_t syncio, 1872 boolean_t invalidate) 1873{ 1874 vm_map_entry_t current; 1875 vm_map_entry_t entry; 1876 vm_size_t size; 1877 vm_object_t object; 1878 vm_ooffset_t offset; 1879 1880 GIANT_REQUIRED; 1881 1882 vm_map_lock_read(map); 1883 VM_MAP_RANGE_CHECK(map, start, end); 1884 if (!vm_map_lookup_entry(map, start, &entry)) { 1885 vm_map_unlock_read(map); 1886 return (KERN_INVALID_ADDRESS); 1887 } 1888 /* 1889 * Make a first pass to check for holes. 1890 */ 1891 for (current = entry; current->start < end; current = current->next) { 1892 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1893 vm_map_unlock_read(map); 1894 return (KERN_INVALID_ARGUMENT); 1895 } 1896 if (end > current->end && 1897 (current->next == &map->header || 1898 current->end != current->next->start)) { 1899 vm_map_unlock_read(map); 1900 return (KERN_INVALID_ADDRESS); 1901 } 1902 } 1903 1904 if (invalidate) 1905 pmap_remove(vm_map_pmap(map), start, end); 1906 /* 1907 * Make a second pass, cleaning/uncaching pages from the indicated 1908 * objects as we go. 1909 */ 1910 for (current = entry; current->start < end; current = current->next) { 1911 offset = current->offset + (start - current->start); 1912 size = (end <= current->end ? end : current->end) - start; 1913 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1914 vm_map_t smap; 1915 vm_map_entry_t tentry; 1916 vm_size_t tsize; 1917 1918 smap = current->object.sub_map; 1919 vm_map_lock_read(smap); 1920 (void) vm_map_lookup_entry(smap, offset, &tentry); 1921 tsize = tentry->end - offset; 1922 if (tsize < size) 1923 size = tsize; 1924 object = tentry->object.vm_object; 1925 offset = tentry->offset + (offset - tentry->start); 1926 vm_map_unlock_read(smap); 1927 } else { 1928 object = current->object.vm_object; 1929 } 1930 /* 1931 * Note that there is absolutely no sense in writing out 1932 * anonymous objects, so we track down the vnode object 1933 * to write out. 1934 * We invalidate (remove) all pages from the address space 1935 * anyway, for semantic correctness. 1936 * 1937 * note: certain anonymous maps, such as MAP_NOSYNC maps, 1938 * may start out with a NULL object. 1939 */ 1940 while (object && object->backing_object) { 1941 object = object->backing_object; 1942 offset += object->backing_object_offset; 1943 if (object->size < OFF_TO_IDX(offset + size)) 1944 size = IDX_TO_OFF(object->size) - offset; 1945 } 1946 if (object && (object->type == OBJT_VNODE) && 1947 (current->protection & VM_PROT_WRITE)) { 1948 /* 1949 * Flush pages if writing is allowed, invalidate them 1950 * if invalidation requested. Pages undergoing I/O 1951 * will be ignored by vm_object_page_remove(). 1952 * 1953 * We cannot lock the vnode and then wait for paging 1954 * to complete without deadlocking against vm_fault. 1955 * Instead we simply call vm_object_page_remove() and 1956 * allow it to block internally on a page-by-page 1957 * basis when it encounters pages undergoing async 1958 * I/O. 1959 */ 1960 int flags; 1961 1962 vm_object_reference(object); 1963 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread); 1964 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1965 flags |= invalidate ? OBJPC_INVAL : 0; 1966 vm_object_page_clean(object, 1967 OFF_TO_IDX(offset), 1968 OFF_TO_IDX(offset + size + PAGE_MASK), 1969 flags); 1970 if (invalidate) { 1971 /*vm_object_pip_wait(object, "objmcl");*/ 1972 vm_object_page_remove(object, 1973 OFF_TO_IDX(offset), 1974 OFF_TO_IDX(offset + size + PAGE_MASK), 1975 FALSE); 1976 } 1977 VOP_UNLOCK(object->handle, 0, curthread); 1978 vm_object_deallocate(object); 1979 } 1980 start += size; 1981 } 1982 1983 vm_map_unlock_read(map); 1984 return (KERN_SUCCESS); 1985} 1986 1987/* 1988 * vm_map_entry_unwire: [ internal use only ] 1989 * 1990 * Make the region specified by this entry pageable. 1991 * 1992 * The map in question should be locked. 1993 * [This is the reason for this routine's existence.] 1994 */ 1995static void 1996vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 1997{ 1998 vm_fault_unwire(map, entry->start, entry->end); 1999 entry->wired_count = 0; 2000} 2001 2002/* 2003 * vm_map_entry_delete: [ internal use only ] 2004 * 2005 * Deallocate the given entry from the target map. 2006 */ 2007static void 2008vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2009{ 2010 vm_map_entry_unlink(map, entry); 2011 map->size -= entry->end - entry->start; 2012 2013 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2014 vm_object_deallocate(entry->object.vm_object); 2015 } 2016 2017 vm_map_entry_dispose(map, entry); 2018} 2019 2020/* 2021 * vm_map_delete: [ internal use only ] 2022 * 2023 * Deallocates the given address range from the target 2024 * map. 2025 */ 2026int 2027vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2028{ 2029 vm_object_t object; 2030 vm_map_entry_t entry; 2031 vm_map_entry_t first_entry; 2032 2033 GIANT_REQUIRED; 2034 2035 /* 2036 * Find the start of the region, and clip it 2037 */ 2038 if (!vm_map_lookup_entry(map, start, &first_entry)) 2039 entry = first_entry->next; 2040 else { 2041 entry = first_entry; 2042 vm_map_clip_start(map, entry, start); 2043 /* 2044 * Fix the lookup hint now, rather than each time though the 2045 * loop. 2046 */ 2047 SAVE_HINT(map, entry->prev); 2048 } 2049 2050 /* 2051 * Save the free space hint 2052 */ 2053 if (entry == &map->header) { 2054 map->first_free = &map->header; 2055 } else if (map->first_free->start >= start) { 2056 map->first_free = entry->prev; 2057 } 2058 2059 /* 2060 * Step through all entries in this region 2061 */ 2062 while ((entry != &map->header) && (entry->start < end)) { 2063 vm_map_entry_t next; 2064 vm_offset_t s, e; 2065 vm_pindex_t offidxstart, offidxend, count; 2066 2067 vm_map_clip_end(map, entry, end); 2068 2069 s = entry->start; 2070 e = entry->end; 2071 next = entry->next; 2072 2073 offidxstart = OFF_TO_IDX(entry->offset); 2074 count = OFF_TO_IDX(e - s); 2075 object = entry->object.vm_object; 2076 2077 /* 2078 * Unwire before removing addresses from the pmap; otherwise, 2079 * unwiring will put the entries back in the pmap. 2080 */ 2081 if (entry->wired_count != 0) { 2082 vm_map_entry_unwire(map, entry); 2083 } 2084 2085 offidxend = offidxstart + count; 2086 2087 if ((object == kernel_object) || (object == kmem_object)) { 2088 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2089 } else { 2090 pmap_remove(map->pmap, s, e); 2091 if (object != NULL && 2092 object->ref_count != 1 && 2093 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 2094 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2095 vm_object_collapse(object); 2096 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2097 if (object->type == OBJT_SWAP) { 2098 swap_pager_freespace(object, offidxstart, count); 2099 } 2100 if (offidxend >= object->size && 2101 offidxstart < object->size) { 2102 object->size = offidxstart; 2103 } 2104 } 2105 } 2106 2107 /* 2108 * Delete the entry (which may delete the object) only after 2109 * removing all pmap entries pointing to its pages. 2110 * (Otherwise, its page frames may be reallocated, and any 2111 * modify bits will be set in the wrong object!) 2112 */ 2113 vm_map_entry_delete(map, entry); 2114 entry = next; 2115 } 2116 return (KERN_SUCCESS); 2117} 2118 2119/* 2120 * vm_map_remove: 2121 * 2122 * Remove the given address range from the target map. 2123 * This is the exported form of vm_map_delete. 2124 */ 2125int 2126vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2127{ 2128 int result, s = 0; 2129 2130 GIANT_REQUIRED; 2131 2132 if (map == kmem_map) 2133 s = splvm(); 2134 2135 vm_map_lock(map); 2136 VM_MAP_RANGE_CHECK(map, start, end); 2137 result = vm_map_delete(map, start, end); 2138 vm_map_unlock(map); 2139 2140 if (map == kmem_map) 2141 splx(s); 2142 2143 return (result); 2144} 2145 2146/* 2147 * vm_map_check_protection: 2148 * 2149 * Assert that the target map allows the specified 2150 * privilege on the entire address region given. 2151 * The entire region must be allocated. 2152 */ 2153boolean_t 2154vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2155 vm_prot_t protection) 2156{ 2157 vm_map_entry_t entry; 2158 vm_map_entry_t tmp_entry; 2159 2160 GIANT_REQUIRED; 2161 2162 vm_map_lock_read(map); 2163 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2164 vm_map_unlock_read(map); 2165 return (FALSE); 2166 } 2167 entry = tmp_entry; 2168 2169 while (start < end) { 2170 if (entry == &map->header) { 2171 vm_map_unlock_read(map); 2172 return (FALSE); 2173 } 2174 /* 2175 * No holes allowed! 2176 */ 2177 if (start < entry->start) { 2178 vm_map_unlock_read(map); 2179 return (FALSE); 2180 } 2181 /* 2182 * Check protection associated with entry. 2183 */ 2184 if ((entry->protection & protection) != protection) { 2185 vm_map_unlock_read(map); 2186 return (FALSE); 2187 } 2188 /* go to next entry */ 2189 start = entry->end; 2190 entry = entry->next; 2191 } 2192 vm_map_unlock_read(map); 2193 return (TRUE); 2194} 2195 2196/* 2197 * Split the pages in a map entry into a new object. This affords 2198 * easier removal of unused pages, and keeps object inheritance from 2199 * being a negative impact on memory usage. 2200 */ 2201static void 2202vm_map_split(vm_map_entry_t entry) 2203{ 2204 vm_page_t m; 2205 vm_object_t orig_object, new_object, source; 2206 vm_offset_t s, e; 2207 vm_pindex_t offidxstart, offidxend, idx; 2208 vm_size_t size; 2209 vm_ooffset_t offset; 2210 2211 GIANT_REQUIRED; 2212 2213 orig_object = entry->object.vm_object; 2214 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2215 return; 2216 if (orig_object->ref_count <= 1) 2217 return; 2218 2219 offset = entry->offset; 2220 s = entry->start; 2221 e = entry->end; 2222 2223 offidxstart = OFF_TO_IDX(offset); 2224 offidxend = offidxstart + OFF_TO_IDX(e - s); 2225 size = offidxend - offidxstart; 2226 2227 new_object = vm_pager_allocate(orig_object->type, 2228 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 2229 if (new_object == NULL) 2230 return; 2231 2232 source = orig_object->backing_object; 2233 if (source != NULL) { 2234 vm_object_reference(source); /* Referenced by new_object */ 2235 TAILQ_INSERT_TAIL(&source->shadow_head, 2236 new_object, shadow_list); 2237 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2238 new_object->backing_object_offset = 2239 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 2240 new_object->backing_object = source; 2241 source->shadow_count++; 2242 source->generation++; 2243 } 2244 2245 for (idx = 0; idx < size; idx++) { 2246 vm_page_t m; 2247 2248 retry: 2249 m = vm_page_lookup(orig_object, offidxstart + idx); 2250 if (m == NULL) 2251 continue; 2252 2253 /* 2254 * We must wait for pending I/O to complete before we can 2255 * rename the page. 2256 * 2257 * We do not have to VM_PROT_NONE the page as mappings should 2258 * not be changed by this operation. 2259 */ 2260 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2261 goto retry; 2262 2263 vm_page_busy(m); 2264 vm_page_rename(m, new_object, idx); 2265 /* page automatically made dirty by rename and cache handled */ 2266 vm_page_busy(m); 2267 } 2268 2269 if (orig_object->type == OBJT_SWAP) { 2270 vm_object_pip_add(orig_object, 1); 2271 /* 2272 * copy orig_object pages into new_object 2273 * and destroy unneeded pages in 2274 * shadow object. 2275 */ 2276 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2277 vm_object_pip_wakeup(orig_object); 2278 } 2279 2280 for (idx = 0; idx < size; idx++) { 2281 m = vm_page_lookup(new_object, idx); 2282 if (m) { 2283 vm_page_wakeup(m); 2284 } 2285 } 2286 2287 entry->object.vm_object = new_object; 2288 entry->offset = 0LL; 2289 vm_object_deallocate(orig_object); 2290} 2291 2292/* 2293 * vm_map_copy_entry: 2294 * 2295 * Copies the contents of the source entry to the destination 2296 * entry. The entries *must* be aligned properly. 2297 */ 2298static void 2299vm_map_copy_entry( 2300 vm_map_t src_map, 2301 vm_map_t dst_map, 2302 vm_map_entry_t src_entry, 2303 vm_map_entry_t dst_entry) 2304{ 2305 vm_object_t src_object; 2306 2307 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2308 return; 2309 2310 if (src_entry->wired_count == 0) { 2311 2312 /* 2313 * If the source entry is marked needs_copy, it is already 2314 * write-protected. 2315 */ 2316 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2317 pmap_protect(src_map->pmap, 2318 src_entry->start, 2319 src_entry->end, 2320 src_entry->protection & ~VM_PROT_WRITE); 2321 } 2322 2323 /* 2324 * Make a copy of the object. 2325 */ 2326 if ((src_object = src_entry->object.vm_object) != NULL) { 2327 2328 if ((src_object->handle == NULL) && 2329 (src_object->type == OBJT_DEFAULT || 2330 src_object->type == OBJT_SWAP)) { 2331 vm_object_collapse(src_object); 2332 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2333 vm_map_split(src_entry); 2334 src_object = src_entry->object.vm_object; 2335 } 2336 } 2337 2338 vm_object_reference(src_object); 2339 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2340 dst_entry->object.vm_object = src_object; 2341 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2342 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2343 dst_entry->offset = src_entry->offset; 2344 } else { 2345 dst_entry->object.vm_object = NULL; 2346 dst_entry->offset = 0; 2347 } 2348 2349 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2350 dst_entry->end - dst_entry->start, src_entry->start); 2351 } else { 2352 /* 2353 * Of course, wired down pages can't be set copy-on-write. 2354 * Cause wired pages to be copied into the new map by 2355 * simulating faults (the new pages are pageable) 2356 */ 2357 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2358 } 2359} 2360 2361/* 2362 * vmspace_fork: 2363 * Create a new process vmspace structure and vm_map 2364 * based on those of an existing process. The new map 2365 * is based on the old map, according to the inheritance 2366 * values on the regions in that map. 2367 * 2368 * The source map must not be locked. 2369 */ 2370struct vmspace * 2371vmspace_fork(struct vmspace *vm1) 2372{ 2373 struct vmspace *vm2; 2374 vm_map_t old_map = &vm1->vm_map; 2375 vm_map_t new_map; 2376 vm_map_entry_t old_entry; 2377 vm_map_entry_t new_entry; 2378 vm_object_t object; 2379 2380 GIANT_REQUIRED; 2381 2382 vm_map_lock(old_map); 2383 old_map->infork = 1; 2384 2385 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2386 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2387 (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy); 2388 new_map = &vm2->vm_map; /* XXX */ 2389 new_map->timestamp = 1; 2390 2391 old_entry = old_map->header.next; 2392 2393 while (old_entry != &old_map->header) { 2394 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2395 panic("vm_map_fork: encountered a submap"); 2396 2397 switch (old_entry->inheritance) { 2398 case VM_INHERIT_NONE: 2399 break; 2400 2401 case VM_INHERIT_SHARE: 2402 /* 2403 * Clone the entry, creating the shared object if necessary. 2404 */ 2405 object = old_entry->object.vm_object; 2406 if (object == NULL) { 2407 object = vm_object_allocate(OBJT_DEFAULT, 2408 atop(old_entry->end - old_entry->start)); 2409 old_entry->object.vm_object = object; 2410 old_entry->offset = (vm_offset_t) 0; 2411 } 2412 2413 /* 2414 * Add the reference before calling vm_object_shadow 2415 * to insure that a shadow object is created. 2416 */ 2417 vm_object_reference(object); 2418 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2419 vm_object_shadow(&old_entry->object.vm_object, 2420 &old_entry->offset, 2421 atop(old_entry->end - old_entry->start)); 2422 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2423 /* Transfer the second reference too. */ 2424 vm_object_reference( 2425 old_entry->object.vm_object); 2426 vm_object_deallocate(object); 2427 object = old_entry->object.vm_object; 2428 } 2429 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2430 2431 /* 2432 * Clone the entry, referencing the shared object. 2433 */ 2434 new_entry = vm_map_entry_create(new_map); 2435 *new_entry = *old_entry; 2436 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2437 new_entry->wired_count = 0; 2438 2439 /* 2440 * Insert the entry into the new map -- we know we're 2441 * inserting at the end of the new map. 2442 */ 2443 vm_map_entry_link(new_map, new_map->header.prev, 2444 new_entry); 2445 2446 /* 2447 * Update the physical map 2448 */ 2449 pmap_copy(new_map->pmap, old_map->pmap, 2450 new_entry->start, 2451 (old_entry->end - old_entry->start), 2452 old_entry->start); 2453 break; 2454 2455 case VM_INHERIT_COPY: 2456 /* 2457 * Clone the entry and link into the map. 2458 */ 2459 new_entry = vm_map_entry_create(new_map); 2460 *new_entry = *old_entry; 2461 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2462 new_entry->wired_count = 0; 2463 new_entry->object.vm_object = NULL; 2464 vm_map_entry_link(new_map, new_map->header.prev, 2465 new_entry); 2466 vm_map_copy_entry(old_map, new_map, old_entry, 2467 new_entry); 2468 break; 2469 } 2470 old_entry = old_entry->next; 2471 } 2472 2473 new_map->size = old_map->size; 2474 old_map->infork = 0; 2475 vm_map_unlock(old_map); 2476 2477 return (vm2); 2478} 2479 2480int 2481vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2482 vm_prot_t prot, vm_prot_t max, int cow) 2483{ 2484 vm_map_entry_t prev_entry; 2485 vm_map_entry_t new_stack_entry; 2486 vm_size_t init_ssize; 2487 int rv; 2488 2489 GIANT_REQUIRED; 2490 2491 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2492 return (KERN_NO_SPACE); 2493 2494 if (max_ssize < sgrowsiz) 2495 init_ssize = max_ssize; 2496 else 2497 init_ssize = sgrowsiz; 2498 2499 vm_map_lock(map); 2500 2501 /* If addr is already mapped, no go */ 2502 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2503 vm_map_unlock(map); 2504 return (KERN_NO_SPACE); 2505 } 2506 2507 /* If we can't accomodate max_ssize in the current mapping, 2508 * no go. However, we need to be aware that subsequent user 2509 * mappings might map into the space we have reserved for 2510 * stack, and currently this space is not protected. 2511 * 2512 * Hopefully we will at least detect this condition 2513 * when we try to grow the stack. 2514 */ 2515 if ((prev_entry->next != &map->header) && 2516 (prev_entry->next->start < addrbos + max_ssize)) { 2517 vm_map_unlock(map); 2518 return (KERN_NO_SPACE); 2519 } 2520 2521 /* We initially map a stack of only init_ssize. We will 2522 * grow as needed later. Since this is to be a grow 2523 * down stack, we map at the top of the range. 2524 * 2525 * Note: we would normally expect prot and max to be 2526 * VM_PROT_ALL, and cow to be 0. Possibly we should 2527 * eliminate these as input parameters, and just 2528 * pass these values here in the insert call. 2529 */ 2530 rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize, 2531 addrbos + max_ssize, prot, max, cow); 2532 2533 /* Now set the avail_ssize amount */ 2534 if (rv == KERN_SUCCESS){ 2535 if (prev_entry != &map->header) 2536 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize); 2537 new_stack_entry = prev_entry->next; 2538 if (new_stack_entry->end != addrbos + max_ssize || 2539 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2540 panic ("Bad entry start/end for new stack entry"); 2541 else 2542 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2543 } 2544 2545 vm_map_unlock(map); 2546 return (rv); 2547} 2548 2549/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2550 * desired address is already mapped, or if we successfully grow 2551 * the stack. Also returns KERN_SUCCESS if addr is outside the 2552 * stack range (this is strange, but preserves compatibility with 2553 * the grow function in vm_machdep.c). 2554 */ 2555int 2556vm_map_growstack (struct proc *p, vm_offset_t addr) 2557{ 2558 vm_map_entry_t prev_entry; 2559 vm_map_entry_t stack_entry; 2560 vm_map_entry_t new_stack_entry; 2561 struct vmspace *vm = p->p_vmspace; 2562 vm_map_t map = &vm->vm_map; 2563 vm_offset_t end; 2564 int grow_amount; 2565 int rv; 2566 int is_procstack; 2567 2568 GIANT_REQUIRED; 2569 2570Retry: 2571 vm_map_lock_read(map); 2572 2573 /* If addr is already in the entry range, no need to grow.*/ 2574 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2575 vm_map_unlock_read(map); 2576 return (KERN_SUCCESS); 2577 } 2578 2579 if ((stack_entry = prev_entry->next) == &map->header) { 2580 vm_map_unlock_read(map); 2581 return (KERN_SUCCESS); 2582 } 2583 if (prev_entry == &map->header) 2584 end = stack_entry->start - stack_entry->avail_ssize; 2585 else 2586 end = prev_entry->end; 2587 2588 /* This next test mimics the old grow function in vm_machdep.c. 2589 * It really doesn't quite make sense, but we do it anyway 2590 * for compatibility. 2591 * 2592 * If not growable stack, return success. This signals the 2593 * caller to proceed as he would normally with normal vm. 2594 */ 2595 if (stack_entry->avail_ssize < 1 || 2596 addr >= stack_entry->start || 2597 addr < stack_entry->start - stack_entry->avail_ssize) { 2598 vm_map_unlock_read(map); 2599 return (KERN_SUCCESS); 2600 } 2601 2602 /* Find the minimum grow amount */ 2603 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2604 if (grow_amount > stack_entry->avail_ssize) { 2605 vm_map_unlock_read(map); 2606 return (KERN_NO_SPACE); 2607 } 2608 2609 /* If there is no longer enough space between the entries 2610 * nogo, and adjust the available space. Note: this 2611 * should only happen if the user has mapped into the 2612 * stack area after the stack was created, and is 2613 * probably an error. 2614 * 2615 * This also effectively destroys any guard page the user 2616 * might have intended by limiting the stack size. 2617 */ 2618 if (grow_amount > stack_entry->start - end) { 2619 if (vm_map_lock_upgrade(map)) 2620 goto Retry; 2621 2622 stack_entry->avail_ssize = stack_entry->start - end; 2623 2624 vm_map_unlock(map); 2625 return (KERN_NO_SPACE); 2626 } 2627 2628 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2629 2630 /* If this is the main process stack, see if we're over the 2631 * stack limit. 2632 */ 2633 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2634 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2635 vm_map_unlock_read(map); 2636 return (KERN_NO_SPACE); 2637 } 2638 2639 /* Round up the grow amount modulo SGROWSIZ */ 2640 grow_amount = roundup (grow_amount, sgrowsiz); 2641 if (grow_amount > stack_entry->avail_ssize) { 2642 grow_amount = stack_entry->avail_ssize; 2643 } 2644 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2645 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2646 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2647 ctob(vm->vm_ssize); 2648 } 2649 2650 if (vm_map_lock_upgrade(map)) 2651 goto Retry; 2652 2653 /* Get the preliminary new entry start value */ 2654 addr = stack_entry->start - grow_amount; 2655 2656 /* If this puts us into the previous entry, cut back our growth 2657 * to the available space. Also, see the note above. 2658 */ 2659 if (addr < end) { 2660 stack_entry->avail_ssize = stack_entry->start - end; 2661 addr = end; 2662 } 2663 2664 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2665 VM_PROT_ALL, 2666 VM_PROT_ALL, 2667 0); 2668 2669 /* Adjust the available stack space by the amount we grew. */ 2670 if (rv == KERN_SUCCESS) { 2671 if (prev_entry != &map->header) 2672 vm_map_clip_end(map, prev_entry, addr); 2673 new_stack_entry = prev_entry->next; 2674 if (new_stack_entry->end != stack_entry->start || 2675 new_stack_entry->start != addr) 2676 panic ("Bad stack grow start/end in new stack entry"); 2677 else { 2678 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2679 (new_stack_entry->end - 2680 new_stack_entry->start); 2681 if (is_procstack) 2682 vm->vm_ssize += btoc(new_stack_entry->end - 2683 new_stack_entry->start); 2684 } 2685 } 2686 2687 vm_map_unlock(map); 2688 return (rv); 2689} 2690 2691/* 2692 * Unshare the specified VM space for exec. If other processes are 2693 * mapped to it, then create a new one. The new vmspace is null. 2694 */ 2695void 2696vmspace_exec(struct proc *p) 2697{ 2698 struct vmspace *oldvmspace = p->p_vmspace; 2699 struct vmspace *newvmspace; 2700 vm_map_t map = &p->p_vmspace->vm_map; 2701 2702 GIANT_REQUIRED; 2703 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2704 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2705 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2706 /* 2707 * This code is written like this for prototype purposes. The 2708 * goal is to avoid running down the vmspace here, but let the 2709 * other process's that are still using the vmspace to finally 2710 * run it down. Even though there is little or no chance of blocking 2711 * here, it is a good idea to keep this form for future mods. 2712 */ 2713 p->p_vmspace = newvmspace; 2714 pmap_pinit2(vmspace_pmap(newvmspace)); 2715 vmspace_free(oldvmspace); 2716 if (p == curthread->td_proc) /* XXXKSE ? */ 2717 pmap_activate(curthread); 2718} 2719 2720/* 2721 * Unshare the specified VM space for forcing COW. This 2722 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2723 */ 2724void 2725vmspace_unshare(struct proc *p) 2726{ 2727 struct vmspace *oldvmspace = p->p_vmspace; 2728 struct vmspace *newvmspace; 2729 2730 GIANT_REQUIRED; 2731 if (oldvmspace->vm_refcnt == 1) 2732 return; 2733 newvmspace = vmspace_fork(oldvmspace); 2734 p->p_vmspace = newvmspace; 2735 pmap_pinit2(vmspace_pmap(newvmspace)); 2736 vmspace_free(oldvmspace); 2737 if (p == curthread->td_proc) /* XXXKSE ? */ 2738 pmap_activate(curthread); 2739} 2740 2741/* 2742 * vm_map_lookup: 2743 * 2744 * Finds the VM object, offset, and 2745 * protection for a given virtual address in the 2746 * specified map, assuming a page fault of the 2747 * type specified. 2748 * 2749 * Leaves the map in question locked for read; return 2750 * values are guaranteed until a vm_map_lookup_done 2751 * call is performed. Note that the map argument 2752 * is in/out; the returned map must be used in 2753 * the call to vm_map_lookup_done. 2754 * 2755 * A handle (out_entry) is returned for use in 2756 * vm_map_lookup_done, to make that fast. 2757 * 2758 * If a lookup is requested with "write protection" 2759 * specified, the map may be changed to perform virtual 2760 * copying operations, although the data referenced will 2761 * remain the same. 2762 */ 2763int 2764vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2765 vm_offset_t vaddr, 2766 vm_prot_t fault_typea, 2767 vm_map_entry_t *out_entry, /* OUT */ 2768 vm_object_t *object, /* OUT */ 2769 vm_pindex_t *pindex, /* OUT */ 2770 vm_prot_t *out_prot, /* OUT */ 2771 boolean_t *wired) /* OUT */ 2772{ 2773 vm_map_entry_t entry; 2774 vm_map_t map = *var_map; 2775 vm_prot_t prot; 2776 vm_prot_t fault_type = fault_typea; 2777 2778 GIANT_REQUIRED; 2779RetryLookup:; 2780 /* 2781 * Lookup the faulting address. 2782 */ 2783 2784 vm_map_lock_read(map); 2785#define RETURN(why) \ 2786 { \ 2787 vm_map_unlock_read(map); \ 2788 return (why); \ 2789 } 2790 2791 /* 2792 * If the map has an interesting hint, try it before calling full 2793 * blown lookup routine. 2794 */ 2795 entry = map->hint; 2796 *out_entry = entry; 2797 if ((entry == &map->header) || 2798 (vaddr < entry->start) || (vaddr >= entry->end)) { 2799 vm_map_entry_t tmp_entry; 2800 2801 /* 2802 * Entry was either not a valid hint, or the vaddr was not 2803 * contained in the entry, so do a full lookup. 2804 */ 2805 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2806 RETURN(KERN_INVALID_ADDRESS); 2807 2808 entry = tmp_entry; 2809 *out_entry = entry; 2810 } 2811 2812 /* 2813 * Handle submaps. 2814 */ 2815 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2816 vm_map_t old_map = map; 2817 2818 *var_map = map = entry->object.sub_map; 2819 vm_map_unlock_read(old_map); 2820 goto RetryLookup; 2821 } 2822 2823 /* 2824 * Check whether this task is allowed to have this page. 2825 * Note the special case for MAP_ENTRY_COW 2826 * pages with an override. This is to implement a forced 2827 * COW for debuggers. 2828 */ 2829 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2830 prot = entry->max_protection; 2831 else 2832 prot = entry->protection; 2833 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2834 if ((fault_type & prot) != fault_type) { 2835 RETURN(KERN_PROTECTION_FAILURE); 2836 } 2837 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2838 (entry->eflags & MAP_ENTRY_COW) && 2839 (fault_type & VM_PROT_WRITE) && 2840 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2841 RETURN(KERN_PROTECTION_FAILURE); 2842 } 2843 2844 /* 2845 * If this page is not pageable, we have to get it for all possible 2846 * accesses. 2847 */ 2848 *wired = (entry->wired_count != 0); 2849 if (*wired) 2850 prot = fault_type = entry->protection; 2851 2852 /* 2853 * If the entry was copy-on-write, we either ... 2854 */ 2855 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2856 /* 2857 * If we want to write the page, we may as well handle that 2858 * now since we've got the map locked. 2859 * 2860 * If we don't need to write the page, we just demote the 2861 * permissions allowed. 2862 */ 2863 if (fault_type & VM_PROT_WRITE) { 2864 /* 2865 * Make a new object, and place it in the object 2866 * chain. Note that no new references have appeared 2867 * -- one just moved from the map to the new 2868 * object. 2869 */ 2870 if (vm_map_lock_upgrade(map)) 2871 goto RetryLookup; 2872 vm_object_shadow( 2873 &entry->object.vm_object, 2874 &entry->offset, 2875 atop(entry->end - entry->start)); 2876 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2877 vm_map_lock_downgrade(map); 2878 } else { 2879 /* 2880 * We're attempting to read a copy-on-write page -- 2881 * don't allow writes. 2882 */ 2883 prot &= ~VM_PROT_WRITE; 2884 } 2885 } 2886 2887 /* 2888 * Create an object if necessary. 2889 */ 2890 if (entry->object.vm_object == NULL && 2891 !map->system_map) { 2892 if (vm_map_lock_upgrade(map)) 2893 goto RetryLookup; 2894 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2895 atop(entry->end - entry->start)); 2896 entry->offset = 0; 2897 vm_map_lock_downgrade(map); 2898 } 2899 2900 /* 2901 * Return the object/offset from this entry. If the entry was 2902 * copy-on-write or empty, it has been fixed up. 2903 */ 2904 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2905 *object = entry->object.vm_object; 2906 2907 /* 2908 * Return whether this is the only map sharing this data. 2909 */ 2910 *out_prot = prot; 2911 return (KERN_SUCCESS); 2912 2913#undef RETURN 2914} 2915 2916/* 2917 * vm_map_lookup_done: 2918 * 2919 * Releases locks acquired by a vm_map_lookup 2920 * (according to the handle returned by that lookup). 2921 */ 2922void 2923vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 2924{ 2925 /* 2926 * Unlock the main-level map 2927 */ 2928 GIANT_REQUIRED; 2929 vm_map_unlock_read(map); 2930} 2931 2932/* 2933 * Implement uiomove with VM operations. This handles (and collateral changes) 2934 * support every combination of source object modification, and COW type 2935 * operations. 2936 */ 2937int 2938vm_uiomove( 2939 vm_map_t mapa, 2940 vm_object_t srcobject, 2941 off_t cp, 2942 int cnta, 2943 vm_offset_t uaddra, 2944 int *npages) 2945{ 2946 vm_map_t map; 2947 vm_object_t first_object, oldobject, object; 2948 vm_map_entry_t entry; 2949 vm_prot_t prot; 2950 boolean_t wired; 2951 int tcnt, rv; 2952 vm_offset_t uaddr, start, end, tend; 2953 vm_pindex_t first_pindex, osize, oindex; 2954 off_t ooffset; 2955 int cnt; 2956 2957 GIANT_REQUIRED; 2958 2959 if (npages) 2960 *npages = 0; 2961 2962 cnt = cnta; 2963 uaddr = uaddra; 2964 2965 while (cnt > 0) { 2966 map = mapa; 2967 2968 if ((vm_map_lookup(&map, uaddr, 2969 VM_PROT_READ, &entry, &first_object, 2970 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 2971 return EFAULT; 2972 } 2973 2974 vm_map_clip_start(map, entry, uaddr); 2975 2976 tcnt = cnt; 2977 tend = uaddr + tcnt; 2978 if (tend > entry->end) { 2979 tcnt = entry->end - uaddr; 2980 tend = entry->end; 2981 } 2982 2983 vm_map_clip_end(map, entry, tend); 2984 2985 start = entry->start; 2986 end = entry->end; 2987 2988 osize = atop(tcnt); 2989 2990 oindex = OFF_TO_IDX(cp); 2991 if (npages) { 2992 vm_pindex_t idx; 2993 for (idx = 0; idx < osize; idx++) { 2994 vm_page_t m; 2995 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2996 vm_map_lookup_done(map, entry); 2997 return 0; 2998 } 2999 /* 3000 * disallow busy or invalid pages, but allow 3001 * m->busy pages if they are entirely valid. 3002 */ 3003 if ((m->flags & PG_BUSY) || 3004 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 3005 vm_map_lookup_done(map, entry); 3006 return 0; 3007 } 3008 } 3009 } 3010 3011/* 3012 * If we are changing an existing map entry, just redirect 3013 * the object, and change mappings. 3014 */ 3015 if ((first_object->type == OBJT_VNODE) && 3016 ((oldobject = entry->object.vm_object) == first_object)) { 3017 3018 if ((entry->offset != cp) || (oldobject != srcobject)) { 3019 /* 3020 * Remove old window into the file 3021 */ 3022 pmap_remove (map->pmap, uaddr, tend); 3023 3024 /* 3025 * Force copy on write for mmaped regions 3026 */ 3027 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3028 3029 /* 3030 * Point the object appropriately 3031 */ 3032 if (oldobject != srcobject) { 3033 3034 /* 3035 * Set the object optimization hint flag 3036 */ 3037 vm_object_set_flag(srcobject, OBJ_OPT); 3038 vm_object_reference(srcobject); 3039 entry->object.vm_object = srcobject; 3040 3041 if (oldobject) { 3042 vm_object_deallocate(oldobject); 3043 } 3044 } 3045 3046 entry->offset = cp; 3047 map->timestamp++; 3048 } else { 3049 pmap_remove (map->pmap, uaddr, tend); 3050 } 3051 3052 } else if ((first_object->ref_count == 1) && 3053 (first_object->size == osize) && 3054 ((first_object->type == OBJT_DEFAULT) || 3055 (first_object->type == OBJT_SWAP)) ) { 3056 3057 oldobject = first_object->backing_object; 3058 3059 if ((first_object->backing_object_offset != cp) || 3060 (oldobject != srcobject)) { 3061 /* 3062 * Remove old window into the file 3063 */ 3064 pmap_remove (map->pmap, uaddr, tend); 3065 3066 /* 3067 * Remove unneeded old pages 3068 */ 3069 vm_object_page_remove(first_object, 0, 0, 0); 3070 3071 /* 3072 * Invalidate swap space 3073 */ 3074 if (first_object->type == OBJT_SWAP) { 3075 swap_pager_freespace(first_object, 3076 0, 3077 first_object->size); 3078 } 3079 3080 /* 3081 * Force copy on write for mmaped regions 3082 */ 3083 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3084 3085 /* 3086 * Point the object appropriately 3087 */ 3088 if (oldobject != srcobject) { 3089 /* 3090 * Set the object optimization hint flag 3091 */ 3092 vm_object_set_flag(srcobject, OBJ_OPT); 3093 vm_object_reference(srcobject); 3094 3095 if (oldobject) { 3096 TAILQ_REMOVE(&oldobject->shadow_head, 3097 first_object, shadow_list); 3098 oldobject->shadow_count--; 3099 /* XXX bump generation? */ 3100 vm_object_deallocate(oldobject); 3101 } 3102 3103 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 3104 first_object, shadow_list); 3105 srcobject->shadow_count++; 3106 /* XXX bump generation? */ 3107 3108 first_object->backing_object = srcobject; 3109 } 3110 first_object->backing_object_offset = cp; 3111 map->timestamp++; 3112 } else { 3113 pmap_remove (map->pmap, uaddr, tend); 3114 } 3115/* 3116 * Otherwise, we have to do a logical mmap. 3117 */ 3118 } else { 3119 3120 vm_object_set_flag(srcobject, OBJ_OPT); 3121 vm_object_reference(srcobject); 3122 3123 pmap_remove (map->pmap, uaddr, tend); 3124 3125 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3126 vm_map_lock_upgrade(map); 3127 3128 if (entry == &map->header) { 3129 map->first_free = &map->header; 3130 } else if (map->first_free->start >= start) { 3131 map->first_free = entry->prev; 3132 } 3133 3134 SAVE_HINT(map, entry->prev); 3135 vm_map_entry_delete(map, entry); 3136 3137 object = srcobject; 3138 ooffset = cp; 3139 3140 rv = vm_map_insert(map, object, ooffset, start, tend, 3141 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 3142 3143 if (rv != KERN_SUCCESS) 3144 panic("vm_uiomove: could not insert new entry: %d", rv); 3145 } 3146 3147/* 3148 * Map the window directly, if it is already in memory 3149 */ 3150 pmap_object_init_pt(map->pmap, uaddr, 3151 srcobject, oindex, tcnt, 0); 3152 3153 map->timestamp++; 3154 vm_map_unlock(map); 3155 3156 cnt -= tcnt; 3157 uaddr += tcnt; 3158 cp += tcnt; 3159 if (npages) 3160 *npages += osize; 3161 } 3162 return 0; 3163} 3164 3165/* 3166 * Performs the copy_on_write operations necessary to allow the virtual copies 3167 * into user space to work. This has to be called for write(2) system calls 3168 * from other processes, file unlinking, and file size shrinkage. 3169 */ 3170void 3171vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 3172{ 3173 int rv; 3174 vm_object_t robject; 3175 vm_pindex_t idx; 3176 3177 GIANT_REQUIRED; 3178 if ((object == NULL) || 3179 ((object->flags & OBJ_OPT) == 0)) 3180 return; 3181 3182 if (object->shadow_count > object->ref_count) 3183 panic("vm_freeze_copyopts: sc > rc"); 3184 3185 while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 3186 vm_pindex_t bo_pindex; 3187 vm_page_t m_in, m_out; 3188 3189 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 3190 3191 vm_object_reference(robject); 3192 3193 vm_object_pip_wait(robject, "objfrz"); 3194 3195 if (robject->ref_count == 1) { 3196 vm_object_deallocate(robject); 3197 continue; 3198 } 3199 3200 vm_object_pip_add(robject, 1); 3201 3202 for (idx = 0; idx < robject->size; idx++) { 3203 3204 m_out = vm_page_grab(robject, idx, 3205 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3206 3207 if (m_out->valid == 0) { 3208 m_in = vm_page_grab(object, bo_pindex + idx, 3209 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3210 if (m_in->valid == 0) { 3211 rv = vm_pager_get_pages(object, &m_in, 1, 0); 3212 if (rv != VM_PAGER_OK) { 3213 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 3214 continue; 3215 } 3216 vm_page_deactivate(m_in); 3217 } 3218 3219 vm_page_protect(m_in, VM_PROT_NONE); 3220 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 3221 m_out->valid = m_in->valid; 3222 vm_page_dirty(m_out); 3223 vm_page_activate(m_out); 3224 vm_page_wakeup(m_in); 3225 } 3226 vm_page_wakeup(m_out); 3227 } 3228 3229 object->shadow_count--; 3230 object->ref_count--; 3231 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 3232 robject->backing_object = NULL; 3233 robject->backing_object_offset = 0; 3234 3235 vm_object_pip_wakeup(robject); 3236 vm_object_deallocate(robject); 3237 } 3238 3239 vm_object_clear_flag(object, OBJ_OPT); 3240} 3241 3242#include "opt_ddb.h" 3243#ifdef DDB 3244#include <sys/kernel.h> 3245 3246#include <ddb/ddb.h> 3247 3248/* 3249 * vm_map_print: [ debug ] 3250 */ 3251DB_SHOW_COMMAND(map, vm_map_print) 3252{ 3253 static int nlines; 3254 /* XXX convert args. */ 3255 vm_map_t map = (vm_map_t)addr; 3256 boolean_t full = have_addr; 3257 3258 vm_map_entry_t entry; 3259 3260 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3261 (void *)map, 3262 (void *)map->pmap, map->nentries, map->timestamp); 3263 nlines++; 3264 3265 if (!full && db_indent) 3266 return; 3267 3268 db_indent += 2; 3269 for (entry = map->header.next; entry != &map->header; 3270 entry = entry->next) { 3271 db_iprintf("map entry %p: start=%p, end=%p\n", 3272 (void *)entry, (void *)entry->start, (void *)entry->end); 3273 nlines++; 3274 { 3275 static char *inheritance_name[4] = 3276 {"share", "copy", "none", "donate_copy"}; 3277 3278 db_iprintf(" prot=%x/%x/%s", 3279 entry->protection, 3280 entry->max_protection, 3281 inheritance_name[(int)(unsigned char)entry->inheritance]); 3282 if (entry->wired_count != 0) 3283 db_printf(", wired"); 3284 } 3285 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3286 /* XXX no %qd in kernel. Truncate entry->offset. */ 3287 db_printf(", share=%p, offset=0x%lx\n", 3288 (void *)entry->object.sub_map, 3289 (long)entry->offset); 3290 nlines++; 3291 if ((entry->prev == &map->header) || 3292 (entry->prev->object.sub_map != 3293 entry->object.sub_map)) { 3294 db_indent += 2; 3295 vm_map_print((db_expr_t)(intptr_t) 3296 entry->object.sub_map, 3297 full, 0, (char *)0); 3298 db_indent -= 2; 3299 } 3300 } else { 3301 /* XXX no %qd in kernel. Truncate entry->offset. */ 3302 db_printf(", object=%p, offset=0x%lx", 3303 (void *)entry->object.vm_object, 3304 (long)entry->offset); 3305 if (entry->eflags & MAP_ENTRY_COW) 3306 db_printf(", copy (%s)", 3307 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3308 db_printf("\n"); 3309 nlines++; 3310 3311 if ((entry->prev == &map->header) || 3312 (entry->prev->object.vm_object != 3313 entry->object.vm_object)) { 3314 db_indent += 2; 3315 vm_object_print((db_expr_t)(intptr_t) 3316 entry->object.vm_object, 3317 full, 0, (char *)0); 3318 nlines += 4; 3319 db_indent -= 2; 3320 } 3321 } 3322 } 3323 db_indent -= 2; 3324 if (db_indent == 0) 3325 nlines = 0; 3326} 3327 3328 3329DB_SHOW_COMMAND(procvm, procvm) 3330{ 3331 struct proc *p; 3332 3333 if (have_addr) { 3334 p = (struct proc *) addr; 3335 } else { 3336 p = curproc; 3337 } 3338 3339 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3340 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3341 (void *)vmspace_pmap(p->p_vmspace)); 3342 3343 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3344} 3345 3346#endif /* DDB */
|