Lines Matching refs:map

25 	a 1-bit first level (2 entries top level table) that would map kernel
119 #define IS_KERNEL_MAP(map) (map->arch_data->rtdir_phys == sKernelPhysicalPageRoot)
125 static void flush_tmap(vm_translation_map *map);
130 _m68k_translation_map_get_pgdir(vm_translation_map *map)
132 return map->arch_data->rtdir_phys;
270 /*! Acquires the map's recursive lock, and resets the invalidate pages counter
274 lock_tmap(vm_translation_map *map)
276 TRACE(("lock_tmap: map %p\n", map));
278 recursive_lock_lock(&map->lock);
279 if (recursive_lock_get_recursion(&map->lock) == 1) {
282 map->arch_data->num_invalidate_pages = 0;
289 /*! Unlocks the map, and, if we'll actually losing the recursive lock,
290 flush all pending changes of this map (ie. flush TLB caches as
294 unlock_tmap(vm_translation_map *map)
296 TRACE(("unlock_tmap: map %p\n", map));
298 if (recursive_lock_get_recursion(&map->lock) == 1) {
300 flush_tmap(map);
303 recursive_lock_unlock(&map->lock);
309 destroy_tmap(vm_translation_map *map)
316 if (map == NULL)
325 if (entry == map) {
340 if (map->arch_data->rtdir_virt != NULL) {
350 if (map->arch_data->rtdir_virt[i].type == DT_INVALID)
352 if (map->arch_data->rtdir_virt[i].type != DT_ROOT) {
357 pgdir_pn = PRE_TO_PN(map->arch_data->rtdir_virt[i]);
387 free(map->arch_data->rtdir_virt);
390 free(map->arch_data);
391 recursive_lock_destroy(&map->lock);
488 map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
517 map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
536 pr = map->arch_data->rtdir_virt;
572 map->map_count++;
615 map->map_count++;
629 IS_KERNEL_MAP(map));
634 if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
635 map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
637 map->arch_data->num_invalidate_pages++;
639 map->map_count++;
646 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
650 page_root_entry *pr = map->arch_data->rtdir_virt;
706 map->map_count--;
708 if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
709 map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
711 map->arch_data->num_invalidate_pages++;
722 query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
725 page_root_entry *pr = map->arch_data->rtdir_virt;
778 query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags)
783 page_directory_entry *pr = map->arch_data->rtdir_virt;
861 get_mapped_size_tmap(vm_translation_map *map)
863 return map->map_count;
868 protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attributes)
872 page_root_entry *pr = map->arch_data->rtdir_virt;
934 if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
935 map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
937 map->arch_data->num_invalidate_pages++;
948 clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
953 page_root_entry *pr = map->arch_data->rtdir_virt;
1020 if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
1021 map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
1023 map->arch_data->num_invalidate_pages++;
1031 flush_tmap(vm_translation_map *map)
1035 if (map->arch_data->num_invalidate_pages <= 0)
1040 if (map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
1043 map->arch_data->num_invalidate_pages));
1045 if (IS_KERNEL_MAP(map)) {
1052 map->arch_data->num_invalidate_pages));
1054 arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate,
1055 map->arch_data->num_invalidate_pages);
1057 map->arch_data->num_invalidate_pages = 0;
1163 m68k_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
1165 if (map == NULL)
1171 map->ops = &tmap_ops;
1172 map->map_count = 0;
1174 recursive_lock_init(&map->lock, "translation map");
1176 map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
1177 if (map == NULL) {
1178 recursive_lock_destroy(&map->lock);
1182 map->arch_data->num_invalidate_pages = 0;
1187 map->arch_data->rtdir_virt = (page_root_entry *)memalign(
1189 if (map->arch_data->rtdir_virt == NULL) {
1190 free(map->arch_data);
1191 recursive_lock_destroy(&map->lock);
1195 (addr_t)map->arch_data->rtdir_virt, (addr_t *)&map->arch_data->rtdir_phys);
1199 map->arch_data->rtdir_virt = sKernelVirtualPageRoot;
1200 map->arch_data->rtdir_phys = sKernelPhysicalPageRoot;
1204 memset(map->arch_data->rtdir_virt + FIRST_USER_PGROOT_ENT, 0,
1207 // insert this new map into the map list
1213 memcpy(map->arch_data->rtdir_virt + FIRST_KERNEL_PGROOT_ENT,
1217 map->next = tmap_list;
1218 tmap_list = map;
1229 m68k_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
1370 // insert the indirect descriptor in the tree so we can map the page we want from it.
1438 // XXX horrible back door to map a page quickly regardless of translation map object, etc.