Lines Matching defs:map

109 	vm_map_t		map,
126 if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT)))
130 VM_MAP_PAGE_MASK(map));
141 * will be donating to the map entry. We must do this before
142 * locking the map, or risk deadlock with the default pager.
151 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
161 /* Take an extra object ref in case the map entry gets deleted */
163 vm_map_unlock(map);
168 vm_map_remove(map,
170 VM_MAP_PAGE_MASK(map)),
172 VM_MAP_PAGE_MASK(map)),
189 kr = vm_map_wire(map,
191 VM_MAP_PAGE_MASK(map)),
193 VM_MAP_PAGE_MASK(map)),
202 vm_map_remove(map,
204 VM_MAP_PAGE_MASK(map)),
206 VM_MAP_PAGE_MASK(map)),
214 vm_map_simplify(map, map_addr);
225 * map : map to allocate into
240 register vm_map_t map,
268 VM_MAP_PAGE_MASK(map));
382 * locking the map, or risk deadlock with the default pager.
394 kr = vm_map_find_space(map, &map_addr,
416 vm_map_unlock(map);
500 vm_map_simplify(map, map_addr);
522 vm_map_t map,
624 * lock map;
628 * unlock map;
632 map, (uint64_t) addr, (uint64_t) size, flags);
692 vm_map_t map,
719 * lock map;
722 * unlock map;
726 map, (uint64_t) addr, (uint64_t) size, flags);
760 * Allocate wired-down memory in the kernel's address map
766 vm_map_t map,
770 kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0);
778 * Reallocate wired-down memory in the kernel's address map
787 vm_map_t map,
806 VM_MAP_PAGE_MASK(map));
808 VM_MAP_PAGE_MASK(map));
811 VM_MAP_PAGE_MASK(map));
818 vm_map_lock(map);
820 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
830 /* by grabbing the object lock before unlocking the map */
834 vm_map_unlock(map);
849 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
872 vm_map_unlock(map);
874 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
876 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
897 * Allocate wired-down memory in the kernel's address map
907 vm_map_t map,
911 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
923 vm_map_t map,
929 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
935 * Allocate pageable memory in the kernel's address map.
940 vm_map_t map,
949 map_addr = (vm_map_min(map)) + PAGE_SIZE;
951 map_addr = vm_map_min(map);
954 VM_MAP_PAGE_MASK(map));
956 kr = vm_map_enter(map, &map_addr, map_size,
978 vm_map_t map,
990 printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr);
995 kr = vm_map_remove(map,
997 VM_MAP_PAGE_MASK(map)),
999 VM_MAP_PAGE_MASK(map)),
1043 * The object is assumed to be mapped into the kernel map or
1113 * Allocates a map to manage a subrange
1122 * anywhere Can region be located anywhere in map
1134 vm_map_t map;
1164 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
1165 if (map == VM_MAP_NULL)
1167 /* inherit the parent map's page size */
1168 vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
1170 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
1176 vm_map_deallocate(map); /* also removes ref to pmap */
1181 *new_map = map;
1188 * Initialize the kernel's virtual memory map, taking
1252 * in the specified VM map. This implementation
1253 * is incomplete; it handles the current user map
1254 * and the kernel map/submaps.
1258 vm_map_t map,
1266 if (vm_map_pmap(map) == pmap_kernel())
1271 else if (current_map() == map)
1278 vm_map_reference(map);
1279 oldmap = vm_map_switch(map);
1283 vm_map_deallocate(map);
1292 * in the specified VM map. This implementation
1293 * is incomplete; it handles the current user map
1294 * and the kernel map/submaps.
1298 vm_map_t map,
1303 if (vm_map_pmap(map) == pmap_kernel()) {
1309 if (current_map() != map)
1321 vm_map_t map,
1336 base_map = map;
1340 vm_map_lock(map);
1341 while(vm_map_lookup_entry(map, off, &entry)) {
1345 vm_map_unlock(map);
1351 old_map = map;
1353 map = entry->object.sub_map;
1367 vm_map_unlock(map);
1380 * the target map and the file offset start in the same page
1383 vm_map_unlock(map);
1390 * within the encompass of the target map
1392 vm_map_unlock(map);
1399 * the target map's encompass
1401 vm_map_unlock(map);
1406 vm_map_unlock(map);
1412 vm_map_unlock(map);
1421 if(map != base_map) {
1422 vm_map_unlock(map);
1424 map = base_map;
1428 vm_map_unlock(map);