Lines Matching refs:vma

802 	struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
803 if (vma) {
804 vma->addr = addr;
805 vma->size = size;
806 vma->page = NVKM_VMA_PAGE_NONE;
807 vma->refd = NVKM_VMA_PAGE_NONE;
809 return vma;
813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
817 BUG_ON(vma->size == tail);
819 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
821 vma->size -= tail;
823 new->mapref = vma->mapref;
824 new->sparse = vma->sparse;
825 new->page = vma->page;
826 new->refd = vma->refd;
827 new->used = vma->used;
828 new->part = vma->part;
829 new->busy = vma->busy;
830 new->mapped = vma->mapped;
831 list_add(&new->head, &vma->head);
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
838 rb_erase(&vma->tree, &vmm->free);
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
844 nvkm_vmm_free_remove(vmm, vma);
845 list_del(&vma->head);
846 kfree(vma);
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
858 if (vma->size < this->size)
861 if (vma->size > this->size)
864 if (vma->addr < this->addr)
867 if (vma->addr > this->addr)
873 rb_link_node(&vma->tree, parent, ptr);
874 rb_insert_color(&vma->tree, &vmm->free);
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
880 rb_erase(&vma->tree, &vmm->root);
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
886 nvkm_vmm_node_remove(vmm, vma);
887 list_del(&vma->head);
888 kfree(vma);
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
900 if (vma->addr < this->addr)
903 if (vma->addr > this->addr)
909 rb_link_node(&vma->tree, parent, ptr);
910 rb_insert_color(&vma->tree, &vmm->root);
918 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
919 if (addr < vma->addr)
922 if (addr >= vma->addr + vma->size)
925 return vma;
935 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
938 if (vma->size == size) {
939 vma->size += next->size;
942 prev->size += vma->size;
943 nvkm_vmm_node_delete(vmm, vma);
946 return vma;
951 vma->size -= size;
959 if (vma->size != size) {
960 nvkm_vmm_node_remove(vmm, vma);
962 vma->addr += size;
963 vma->size -= size;
964 nvkm_vmm_node_insert(vmm, vma);
966 prev->size += vma->size;
967 nvkm_vmm_node_delete(vmm, vma);
972 return vma;
977 struct nvkm_vma *vma, u64 addr, u64 size)
981 if (vma->addr != addr) {
982 prev = vma;
983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
985 vma->part = true;
986 nvkm_vmm_node_insert(vmm, vma);
989 if (vma->size != size) {
991 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
999 return vma;
1003 nvkm_vma_dump(struct nvkm_vma *vma)
1006 vma->addr, (u64)vma->size,
1007 vma->used ? '-' : 'F',
1008 vma->mapref ? 'R' : '-',
1009 vma->sparse ? 'S' : '-',
1010 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
1011 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
1012 vma->part ? 'P' : '-',
1013 vma->busy ? 'B' : '-',
1014 vma->mapped ? 'M' : '-',
1015 vma->memory);
1021 struct nvkm_vma *vma;
1022 list_for_each_entry(vma, &vmm->list, head) {
1023 nvkm_vma_dump(vma);
1030 struct nvkm_vma *vma;
1044 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
1045 nvkm_vmm_put(vmm, &vma);
1059 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1060 list_del(&vma->head);
1061 kfree(vma);
1078 struct nvkm_vma *vma;
1079 if (!(vma = nvkm_vma_new(addr, size)))
1081 vma->mapref = true;
1082 vma->sparse = false;
1083 vma->used = true;
1084 nvkm_vmm_node_insert(vmm, vma);
1085 list_add_tail(&vma->head, &vmm->list);
1098 struct nvkm_vma *vma;
1170 if (!(vma = nvkm_vma_new(addr, size)))
1172 nvkm_vmm_free_insert(vmm, vma);
1173 list_add_tail(&vma->head, &vmm->list);
1193 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1196 nvkm_vmm_free_insert(vmm, vma);
1197 list_add(&vma->head, &vmm->list);
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1221 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1226 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1233 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1234 return nvkm_vmm_node_split(vmm, vma, addr, size);
1240 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1245 if (!vma)
1249 if (!vma->mapped || vma->memory)
1252 size = min(limit - start, vma->size - (start - vma->addr));
1254 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1257 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1259 vma = next;
1260 vma->refd = NVKM_VMA_PAGE_NONE;
1261 vma->mapped = false;
1263 } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1277 struct nvkm_vma *vma, *tmp;
1298 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1303 bool mapped = vma->mapped;
1316 size = min_t(u64, size, vma->size + vma->addr - addr);
1321 if (!vma->mapref || vma->memory) {
1338 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1350 vma = tmp;
1376 if (vma->addr + vma->size == addr + size)
1377 vma = node(vma, next);
1391 } while (vma && start < limit);
1397 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1402 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1403 nvkm_memory_unref(&vma->memory);
1404 vma->mapped = false;
1406 if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1408 if ((next = node(vma, next)) && (!next->part || next->mapped))
1410 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1414 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1416 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1418 if (vma->mapref) {
1419 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1420 vma->refd = NVKM_VMA_PAGE_NONE;
1422 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1425 nvkm_vmm_unmap_region(vmm, vma);
1429 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1431 if (vma->memory) {
1433 nvkm_vmm_unmap_locked(vmm, vma, false);
1439 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1461 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
1462 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1466 vma->addr, (u64)vma->size, map->offset, map->page->shift,
1475 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1480 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1487 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1493 map->no_comp = vma->no_comp;
1496 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1499 map->offset, (u64)vma->size);
1504 if (vma->page == NVKM_VMA_PAGE_NONE &&
1505 vma->refd == NVKM_VMA_PAGE_NONE) {
1509 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1513 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1518 if (vma->refd != NVKM_VMA_PAGE_NONE)
1519 map->page = &vmm->func->page[vma->refd];
1521 map->page = &vmm->func->page[vma->page];
1523 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1556 if (vma->refd == NVKM_VMA_PAGE_NONE) {
1557 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1561 vma->refd = map->page - vmm->func->page;
1563 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1566 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1567 nvkm_memory_unref(&vma->memory);
1568 vma->memory = nvkm_memory_ref(map->memory);
1569 vma->mapped = true;
1570 vma->tags = map->tags;
1575 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1580 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
1582 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1585 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1586 vma->busy = false;
1592 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1596 if ((prev = node(vma, prev)) && !prev->used) {
1597 vma->addr = prev->addr;
1598 vma->size += prev->size;
1602 if ((next = node(vma, next)) && !next->used) {
1603 vma->size += next->size;
1607 nvkm_vmm_free_insert(vmm, vma);
1611 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1614 struct nvkm_vma *next = vma;
1616 BUG_ON(vma->part);
1618 if (vma->mapref || !vma->sparse) {
1639 size, vma->sparse,
1653 next = vma;
1657 } while ((next = node(vma, next)) && next->part);
1659 if (vma->sparse && !vma->mapref) {
1668 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1670 if (vma->sparse) {
1679 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1683 nvkm_vmm_node_remove(vmm, vma);
1686 vma->page = NVKM_VMA_PAGE_NONE;
1687 vma->refd = NVKM_VMA_PAGE_NONE;
1688 vma->used = false;
1689 nvkm_vmm_put_region(vmm, vma);
1695 struct nvkm_vma *vma = *pvma;
1696 if (vma) {
1698 nvkm_vmm_put_locked(vmm, vma);
1710 struct nvkm_vma *vma = NULL, *tmp;
1790 vma = this;
1795 if (unlikely(!vma))
1801 if (addr != vma->addr) {
1802 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1803 nvkm_vmm_put_region(vmm, vma);
1806 nvkm_vmm_free_insert(vmm, vma);
1807 vma = tmp;
1810 if (size != vma->size) {
1811 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1812 nvkm_vmm_put_region(vmm, vma);
1820 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1822 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1824 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1828 nvkm_vmm_put_region(vmm, vma);
1832 vma->mapref = mapref && !getref;
1833 vma->sparse = sparse;
1834 vma->page = page - vmm->func->page;
1835 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1836 vma->used = true;
1837 nvkm_vmm_node_insert(vmm, vma);
1838 *pvma = vma;