Lines Matching defs:va

830  * All vmap_area objects in this tree are sorted by va->va_start
971 va_size(struct vmap_area *va)
973 return (va->va_end - va->va_start);
979 struct vmap_area *va;
981 va = rb_entry_safe(node, struct vmap_area, rb_node);
982 return va ? va->subtree_max_size : 0;
1007 struct vmap_area *va;
1009 va = rb_entry(n, struct vmap_area, rb_node);
1010 if (addr < va->va_start)
1012 else if (addr >= va->va_end)
1015 return va;
1025 struct vmap_area *va = NULL;
1035 va = tmp;
1044 return va;
1055 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1066 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1068 if (*va)
1069 if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1070 va_start_lowest = (*va)->va_start;
1083 *va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1085 if (*va)
1104 find_va_links(struct vmap_area *va,
1124 * it link, where the new va->rb_node will be attached to.
1134 if (va->va_end <= tmp_va->va_start)
1136 else if (va->va_start >= tmp_va->va_end)
1140 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1169 __link_va(struct vmap_area *va, struct rb_root *root,
1184 rb_link_node(&va->rb_node, parent, link);
1188 * to the tree. We do not set va->subtree_max_size to
1197 rb_insert_augmented(&va->rb_node,
1199 va->subtree_max_size = 0;
1201 rb_insert_color(&va->rb_node, root);
1205 list_add(&va->list, head);
1209 link_va(struct vmap_area *va, struct rb_root *root,
1213 __link_va(va, root, parent, link, head, false);
1217 link_va_augment(struct vmap_area *va, struct rb_root *root,
1221 __link_va(va, root, parent, link, head, true);
1225 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1227 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1231 rb_erase_augmented(&va->rb_node,
1234 rb_erase(&va->rb_node, root);
1236 list_del_init(&va->list);
1237 RB_CLEAR_NODE(&va->rb_node);
1241 unlink_va(struct vmap_area *va, struct rb_root *root)
1243 __unlink_va(va, root, false);
1247 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1249 __unlink_va(va, root, true);
1257 compute_subtree_max_size(struct vmap_area *va)
1259 return max3(va_size(va),
1260 get_subtree_max_size(va->rb_node.rb_left),
1261 get_subtree_max_size(va->rb_node.rb_right));
1267 struct vmap_area *va;
1270 list_for_each_entry(va, &free_vmap_area_list, list) {
1271 computed_size = compute_subtree_max_size(va);
1272 if (computed_size != va->subtree_max_size)
1274 va_size(va), va->subtree_max_size);
1307 augment_tree_propagate_from(struct vmap_area *va)
1314 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1322 insert_vmap_area(struct vmap_area *va,
1328 link = find_va_links(va, root, NULL, &parent);
1330 link_va(va, root, parent, link, head);
1334 insert_vmap_area_augment(struct vmap_area *va,
1342 link = find_va_links(va, NULL, from, &parent);
1344 link = find_va_links(va, root, NULL, &parent);
1347 link_va_augment(va, root, parent, link, head);
1348 augment_tree_propagate_from(va);
1364 __merge_or_add_vmap_area(struct vmap_area *va,
1377 link = find_va_links(va, root, NULL, &parent);
1397 if (sibling->va_start == va->va_end) {
1398 sibling->va_start = va->va_start;
1401 kmem_cache_free(vmap_area_cachep, va);
1404 va = sibling;
1418 if (sibling->va_end == va->va_start) {
1427 __unlink_va(va, root, augment);
1429 sibling->va_end = va->va_end;
1432 kmem_cache_free(vmap_area_cachep, va);
1435 va = sibling;
1442 __link_va(va, root, parent, link, head, augment);
1444 return va;
1448 merge_or_add_vmap_area(struct vmap_area *va,
1451 return __merge_or_add_vmap_area(va, root, head, false);
1455 merge_or_add_vmap_area_augment(struct vmap_area *va,
1458 va = __merge_or_add_vmap_area(va, root, head, true);
1459 if (va)
1460 augment_tree_propagate_from(va);
1462 return va;
1466 is_within_this_va(struct vmap_area *va, unsigned long size,
1471 if (va->va_start > vstart)
1472 nva_start_addr = ALIGN(va->va_start, align);
1481 return (nva_start_addr + size <= va->va_end);
1495 struct vmap_area *va;
1506 va = rb_entry(node, struct vmap_area, rb_node);
1509 vstart < va->va_start) {
1512 if (is_within_this_va(va, size, align, vstart))
1513 return va;
1532 va = rb_entry(node, struct vmap_area, rb_node);
1533 if (is_within_this_va(va, size, align, vstart))
1534 return va;
1537 vstart <= va->va_start) {
1544 vstart = va->va_start + 1;
1562 struct vmap_area *va;
1564 list_for_each_entry(va, head, list) {
1565 if (!is_within_this_va(va, size, align, vstart))
1568 return va;
1603 classify_va_fit_type(struct vmap_area *va,
1609 if (nva_start_addr < va->va_start ||
1610 nva_start_addr + size > va->va_end)
1614 if (va->va_start == nva_start_addr) {
1615 if (va->va_end == nva_start_addr + size)
1619 } else if (va->va_end == nva_start_addr + size) {
1630 struct vmap_area *va, unsigned long nva_start_addr,
1634 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1644 unlink_va_augment(va, root);
1645 kmem_cache_free(vmap_area_cachep, va);
1654 va->va_start += size;
1663 va->va_end = nva_start_addr;
1707 lva->va_start = va->va_start;
1713 va->va_start = nva_start_addr + size;
1719 augment_tree_propagate_from(va);
1722 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1729 va_alloc(struct vmap_area *va,
1737 if (va->va_start > vstart)
1738 nva_start_addr = ALIGN(va->va_start, align);
1747 ret = va_clip(root, head, va, nva_start_addr, size);
1765 struct vmap_area *va;
1779 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1780 if (unlikely(!va))
1783 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1797 static void free_vmap_area(struct vmap_area *va)
1799 struct vmap_node *vn = addr_to_node(va->va_start);
1805 unlink_va(va, &vn->busy.root);
1812 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1819 struct vmap_area *va = NULL;
1831 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1835 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1836 kmem_cache_free(vmap_area_cachep, va);
1851 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1855 vp = size_to_va_pool(n, va_size(va));
1860 list_add(&va->list, &vp->head);
1872 struct vmap_area *va = NULL;
1882 va = list_first_entry(&vp->head, struct vmap_area, list);
1884 if (IS_ALIGNED(va->va_start, align)) {
1889 err |= (va_size(va) != size);
1890 err |= (va->va_start < vstart);
1891 err |= (va->va_end > vend);
1894 list_del_init(&va->list);
1897 va = NULL;
1900 list_move_tail(&va->list, &vp->head);
1901 va = NULL;
1906 return va;
1914 struct vmap_area *va;
1928 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
1931 if (va)
1932 *addr = va->va_start;
1934 return va;
1938 struct vmap_area *va, unsigned long flags, const void *caller)
1941 vm->addr = (void *)va->va_start;
1942 vm->size = va->va_end - va->va_start;
1944 va->vm = vm;
1958 struct vmap_area *va;
1981 va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
1982 if (!va) {
1985 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1986 if (unlikely(!va))
1993 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
2013 va->va_start = addr;
2014 va->va_end = addr + size;
2015 va->vm = NULL;
2016 va->flags = (va_flags | vn_id);
2019 vm->addr = (void *)va->va_start;
2020 vm->size = va->va_end - va->va_start;
2021 va->vm = vm;
2024 vn = addr_to_node(va->va_start);
2027 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2030 BUG_ON(!IS_ALIGNED(va->va_start, align));
2031 BUG_ON(va->va_start < vstart);
2032 BUG_ON(va->va_end > vend);
2036 free_vmap_area(va);
2040 return va;
2061 kmem_cache_free(vmap_area_cachep, va);
2118 struct vmap_area *va, *n;
2124 list_for_each_entry_safe(va, n, head, list)
2125 merge_or_add_vmap_area_augment(va,
2133 struct vmap_area *va, *nva;
2161 list_for_each_entry_safe(va, nva, &tmp_list, list) {
2162 list_del_init(&va->list);
2163 merge_or_add_vmap_area(va, &decay_root, &decay_list);
2193 struct vmap_area *va, *n_va;
2198 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2199 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
2200 unsigned long orig_start = va->va_start;
2201 unsigned long orig_end = va->va_end;
2202 unsigned int vn_id = decode_vn_id(va->flags);
2204 list_del_init(&va->list);
2208 va->va_start, va->va_end);
2214 if (node_pool_add_va(vn, va))
2218 list_add(&va->list, &local_list);
2332 static void free_vmap_area_noflush(struct vmap_area *va)
2335 unsigned long va_start = va->va_start;
2336 unsigned int vn_id = decode_vn_id(va->flags);
2340 if (WARN_ON_ONCE(!list_empty(&va->list)))
2343 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
2351 id_to_node(vn_id):addr_to_node(va->va_start);
2354 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2359 /* After this point, we may free va at any time */
2367 static void free_unmap_vmap_area(struct vmap_area *va)
2369 flush_cache_vunmap(va->va_start, va->va_end);
2370 vunmap_range_noflush(va->va_start, va->va_end);
2372 flush_tlb_kernel_range(va->va_start, va->va_end);
2374 free_vmap_area_noflush(va);
2380 struct vmap_area *va;
2389 * addr is not the same as va->va_start, what is not common, we
2392 * <----va---->
2404 va = __find_vmap_area(addr, &vn->busy.root);
2407 if (va)
2408 return va;
2417 struct vmap_area *va;
2428 va = __find_vmap_area(addr, &vn->busy.root);
2429 if (va)
2430 unlink_va(va, &vn->busy.root);
2433 if (va)
2434 return va;
2494 struct vmap_area *va;
2585 struct vmap_area *va;
2598 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2602 if (IS_ERR(va)) {
2604 return ERR_CAST(va);
2607 vaddr = vmap_block_vaddr(va->va_start, 0);
2609 vb->va = va;
2620 xa = addr_to_vb_xa(va->va_start);
2621 vb_idx = addr_to_vb_idx(va->va_start);
2625 free_vmap_area(va);
2643 xa = addr_to_vb_xa(vb->va->va_start);
2644 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2647 vn = addr_to_node(vb->va->va_start);
2649 unlink_va(vb->va, &vn->busy.root);
2652 free_vmap_area_noflush(vb->va);
2756 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2846 unsigned long va_start = vb->va->va_start;
2903 struct vmap_area *va;
2919 va = find_unlink_vmap_area(addr);
2920 if (WARN_ON_ONCE(!va))
2923 debug_check_no_locks_freed((void *)va->va_start,
2924 (va->va_end - va->va_start));
2925 free_unmap_vmap_area(va);
2955 struct vmap_area *va;
2956 va = alloc_vmap_area(size, PAGE_SIZE,
2960 if (IS_ERR(va))
2963 addr = va->va_start;
3078 struct vmap_area *va;
3101 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
3102 if (IS_ERR(va)) {
3169 struct vmap_area *va;
3171 va = find_vmap_area((unsigned long)addr);
3172 if (!va)
3175 return va->vm;
3190 struct vmap_area *va;
3199 va = find_unlink_vmap_area((unsigned long)addr);
3200 if (!va || !va->vm)
3202 vm = va->vm;
3209 free_unmap_vmap_area(va);
4183 start = vmap_block_vaddr(vb->va->va_start, rs);
4249 struct vmap_area *va;
4263 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4268 if ((unsigned long)addr + remains <= va->va_start)
4277 vm = va->vm;
4278 flags = va->flags & VMAP_FLAGS_MASK;
4294 vaddr = (char *) va->va_start;
4295 size = vm ? get_vm_area_size(vm) : va_size(va);
4329 next = va->va_end;
4331 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4454 * i.e. va->va_start < addr && va->va_end < addr or NULL
4460 struct vmap_area *va, *tmp;
4464 va = NULL;
4469 va = tmp;
4479 return va;
4485 * @va:
4493 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4498 if (likely(*va)) {
4499 list_for_each_entry_from_reverse((*va),
4501 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4502 if ((*va)->va_start < addr)
4540 struct vmap_area **vas, *va;
4593 va = pvm_find_va_enclose_addr(vmalloc_end);
4594 base = pvm_determine_end_from_reverse(&va, align) - end;
4607 if (va == NULL)
4614 if (base + end > va->va_end) {
4615 base = pvm_determine_end_from_reverse(&va, align) - end;
4623 if (base + start < va->va_start) {
4624 va = node_to_va(rb_prev(&va->rb_node));
4625 base = pvm_determine_end_from_reverse(&va, align) - end;
4640 va = pvm_find_va_enclose_addr(base + end);
4643 /* we've found a fitting base, insert all va's */
4650 va = pvm_find_va_enclose_addr(start);
4651 if (WARN_ON_ONCE(va == NULL))
4656 &free_vmap_area_list, va, start, size);
4662 va = vas[area];
4663 va->va_start = start;
4664 va->va_end = start + size;
4709 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4711 if (va)
4713 va->va_start, va->va_end);
4759 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4761 if (va)
4763 va->va_start, va->va_end);
4795 struct vmap_area *va;
4806 va = __find_vmap_area(addr, &vn->busy.root);
4807 if (!va || !va->vm) {
4812 vm = va->vm;
4853 struct vmap_area *va;
4860 list_for_each_entry(va, &vn->lazy.head, list) {
4862 (void *)va->va_start, (void *)va->va_end,
4863 va->va_end - va->va_start);
4872 struct vmap_area *va;
4880 list_for_each_entry(va, &vn->busy.head, list) {
4881 if (!va->vm) {
4882 if (va->flags & VMAP_RAM)
4884 (void *)va->va_start, (void *)va->va_end,
4885 va->va_end - va->va_start);
4890 v = va->vm;
5081 struct vmap_area *va;
5111 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5112 if (WARN_ON_ONCE(!va))
5115 va->va_start = (unsigned long)tmp->addr;
5116 va->va_end = va->va_start + tmp->size;
5117 va->vm = tmp;
5119 vn = addr_to_node(va->va_start);
5120 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);