• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /barrelfish-2018-10-04/include/vm/

Lines Matching refs:map

223 	 * ld.so sometimes issues anonymous map requests with non-zero
474 vm_map_t map;
491 map = &td->td_proc->p_vmspace->vm_map;
496 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
532 vm_map_t map;
549 map = &td->td_proc->p_vmspace->vm_map;
550 if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
552 vm_map_lock(map);
559 if (vm_map_lookup_entry(map, addr, &entry)) {
561 entry != &map->header && entry->start < addr + size;
563 if (vm_map_check_protection(map, entry->start,
572 vm_map_delete(map, addr, addr + size);
576 vm_map_lock_downgrade(map);
579 vm_map_unlock_read(map);
581 vm_map_unlock(map);
686 vm_map_t map;
708 map = &td->td_proc->p_vmspace->vm_map;
709 if ((vm_offset_t)uap->addr < vm_map_min(map) ||
710 (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
722 if (vm_map_madvise(map, start, end, uap->behav))
746 vm_map_t map;
766 map = &td->td_proc->p_vmspace->vm_map;
767 if (end > vm_map_max(map) || end < addr)
777 vm_map_lock_read(map);
779 timestamp = map->timestamp;
781 if (!vm_map_lookup_entry(map, addr, &entry)) {
782 vm_map_unlock_read(map);
787 * Do this on a map entry basis so that if the pages are not
793 (current != &map->header) && (current->start < end);
800 (entry->next == &map->header ||
802 vm_map_unlock_read(map);
814 * limit this scan to the current map entry and the
913 * the map, we release the lock.
915 vm_map_unlock_read(map);
923 * If we have skipped map entries, we need to make sure that
945 * If the map has changed, due to the subyte, the previous
948 vm_map_lock_read(map);
949 if (timestamp != map->timestamp)
959 * the map, we release the lock.
961 vm_map_unlock_read(map);
977 * If the map has changed, due to the subyte, the previous
980 vm_map_lock_read(map);
981 if (timestamp != map->timestamp)
983 vm_map_unlock_read(map);
1011 vm_map_t map;
1028 map = &proc->p_vmspace->vm_map;
1030 nsize = ptoa(npages + pmap_wired_count(map->pmap));
1047 error = vm_map_wire(map, start, end,
1053 ptoa(pmap_wired_count(map->pmap)));
1074 vm_map_t map;
1077 map = &td->td_proc->p_vmspace->vm_map;
1091 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) {
1100 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1108 vm_map_lock(map);
1109 vm_map_modflags(map, MAP_WIREFUTURE, 0);
1110 vm_map_unlock(map);
1121 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1129 ptoa(pmap_wired_count(map->pmap)));
1151 vm_map_t map;
1154 map = &td->td_proc->p_vmspace->vm_map;
1160 vm_map_lock(map);
1161 vm_map_modflags(map, 0, MAP_WIREFUTURE);
1162 vm_map_unlock(map);
1165 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1195 vm_map_t map;
1214 map = &td->td_proc->p_vmspace->vm_map;
1216 ptoa(pmap_wired_count(map->pmap)));
1406 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1458 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1474 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1477 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1484 if (map == &td->td_proc->p_vmspace->vm_map) {
1486 if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) {
1490 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1494 if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1495 if (ptoa(pmap_wired_count(map->pmap)) + size >
1498 map->size);
1503 ptoa(pmap_wired_count(map->pmap)) + size);
1506 map->size);
1570 rv = vm_map_find(map, object, foff, addr, size,
1576 rv = vm_map_fixed(map, object, foff, *addr, size,
1585 if (map->flags & MAP_WIREFUTURE) {
1586 vm_map_wire(map, *addr, *addr + size,