Lines Matching defs:vmm

23 #include "vmm.h"
75 struct nvkm_vmm *vmm;
113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
129 if (it->vmm->func->flush) {
131 it->vmm->func->flush(it->vmm, it->flush);
145 struct nvkm_vmm *vmm = it->vmm;
159 func->sparse(vmm, pgd->pt[0], pdei, 1);
162 func->unmap(vmm, pgd->pt[0], pdei, 1);
170 func->pde(vmm, pgd, pdei);
177 func->pde(vmm, pgd, pdei);
190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
203 struct nvkm_vmm *vmm = it->vmm;
244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
267 dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
272 desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
302 struct nvkm_vmm *vmm = it->vmm;
348 desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
351 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
358 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
419 struct nvkm_vmm *vmm = it->vmm;
420 struct nvkm_mmu *mmu = vmm->mmu;
457 desc->func->sparse(vmm, pt, pteb, ptes);
459 desc->func->invalid(vmm, pt, pteb, ptes);
462 desc->func->unmap(vmm, pt, pteb, ptes);
470 desc->func->sparse(vmm, pt, 0, pten);
472 desc->func->invalid(vmm, pt, 0, pten);
478 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
513 it.vmm = vmm;
523 it.pt[it.max] = vmm->pd;
567 MAP_PTES(vmm, pt, ptei, ptes, map);
569 CLR_PTES(vmm, pt, ptei, ptes);
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
630 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
632 const struct nvkm_vmm_page *page = vmm->func->page;
662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
665 nvkm_vmm_ptes_sparse(vmm, start, size, false);
669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
680 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
685 mutex_lock(&vmm->mutex.map);
686 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
690 mutex_unlock(&vmm->mutex.map);
694 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
698 mutex_lock(&vmm->mutex.map);
699 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
701 mutex_unlock(&vmm->mutex.map);
705 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
708 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
716 mutex_lock(&vmm->mutex.ref);
717 nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
718 mutex_unlock(&vmm->mutex.ref);
722 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
727 mutex_lock(&vmm->mutex.ref);
728 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
732 nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
733 mutex_unlock(&vmm->mutex.ref);
736 mutex_unlock(&vmm->mutex.ref);
741 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
746 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
753 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
756 if (vmm->managed.raw) {
757 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
758 nvkm_vmm_ptes_put(vmm, page, addr, size);
760 __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
765 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
769 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
773 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
780 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
786 if (vmm->managed.raw) {
787 ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
791 nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
795 return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
838 rb_erase(&vma->tree, &vmm->free);
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
844 nvkm_vmm_free_remove(vmm, vma);
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
852 struct rb_node **ptr = &vmm->free.rb_node;
874 rb_insert_color(&vma->tree, &vmm->free);
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
880 rb_erase(&vma->tree, &vmm->root);
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
886 nvkm_vmm_node_remove(vmm, vma);
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
894 struct rb_node **ptr = &vmm->root.rb_node;
910 rb_insert_color(&vma->tree, &vmm->root);
914 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
916 struct rb_node *node = vmm->root.rb_node;
930 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
934 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
940 nvkm_vmm_node_delete(vmm, next);
943 nvkm_vmm_node_delete(vmm, vma);
950 nvkm_vmm_node_remove(vmm, next);
954 nvkm_vmm_node_insert(vmm, next);
960 nvkm_vmm_node_remove(vmm, vma);
964 nvkm_vmm_node_insert(vmm, vma);
967 nvkm_vmm_node_delete(vmm, vma);
976 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
986 nvkm_vmm_node_insert(vmm, vma);
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
996 nvkm_vmm_node_insert(vmm, tmp);
1019 nvkm_vmm_dump(struct nvkm_vmm *vmm)
1022 list_for_each_entry(vma, &vmm->list, head) {
1028 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
1033 if (vmm->rm.client.gsp) {
1034 nvkm_gsp_rm_free(&vmm->rm.object);
1035 nvkm_gsp_device_dtor(&vmm->rm.device);
1036 nvkm_gsp_client_dtor(&vmm->rm.client);
1037 nvkm_vmm_put(vmm, &vmm->rm.rsvd);
1041 nvkm_vmm_dump(vmm);
1043 while ((node = rb_first(&vmm->root))) {
1045 nvkm_vmm_put(vmm, &vma);
1048 if (vmm->bootstrapped) {
1049 const struct nvkm_vmm_page *page = vmm->func->page;
1050 const u64 limit = vmm->limit - vmm->start;
1055 nvkm_mmu_ptc_dump(vmm->mmu);
1056 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
1059 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1062 WARN_ON(!list_empty(&vmm->list));
1064 if (vmm->nullp) {
1065 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1066 vmm->nullp, vmm->null);
1069 if (vmm->pd) {
1070 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1071 nvkm_vmm_pt_del(&vmm->pd);
1076 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1084 nvkm_vmm_node_insert(vmm, vma);
1085 list_add_tail(&vma->head, &vmm->list);
1093 struct nvkm_vmm *vmm)
1101 vmm->func = func;
1102 vmm->mmu = mmu;
1103 vmm->name = name;
1104 vmm->debug = mmu->subdev.debug;
1105 kref_init(&vmm->kref);
1107 __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
1108 mutex_init(&vmm->mutex.ref);
1109 mutex_init(&vmm->mutex.map);
1130 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1131 if (!vmm->pd)
1133 vmm->pd->refs[0] = 1;
1134 INIT_LIST_HEAD(&vmm->join);
1141 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1142 if (!vmm->pd->pt[0])
1147 INIT_LIST_HEAD(&vmm->list);
1148 vmm->free = RB_ROOT;
1149 vmm->root = RB_ROOT;
1156 vmm->start = 0;
1157 vmm->limit = 1ULL << bits;
1158 if (addr + size < addr || addr + size > vmm->limit)
1162 if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1165 vmm->managed.p.addr = 0;
1166 vmm->managed.p.size = addr;
1172 nvkm_vmm_free_insert(vmm, vma);
1173 list_add_tail(&vma->head, &vmm->list);
1178 size = vmm->limit - addr;
1179 if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1182 vmm->managed.n.addr = addr;
1183 vmm->managed.n.size = size;
1188 vmm->start = addr;
1189 vmm->limit = size ? (addr + size) : (1ULL << bits);
1190 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1193 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1196 nvkm_vmm_free_insert(vmm, vma);
1197 list_add(&vma->head, &vmm->list);
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1233 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1234 return nvkm_vmm_node_split(vmm, vma, addr, size);
1238 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1240 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1254 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1257 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1274 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1276 const struct nvkm_vmm_page *page = vmm->func->page;
1292 addr + size < addr || addr + size > vmm->limit) {
1293 VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
1298 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1338 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1340 vmm->func->page, map);
1347 tmp->refd = page - vmm->func->page;
1360 ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1364 nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1369 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1397 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1402 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1410 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1414 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1416 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1419 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1422 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1425 nvkm_vmm_unmap_region(vmm, vma);
1429 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1432 mutex_lock(&vmm->mutex.vmm);
1433 nvkm_vmm_unmap_locked(vmm, vma, false);
1434 mutex_unlock(&vmm->mutex.vmm);
1439 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1445 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1452 VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1465 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
1471 return vmm->func->valid(vmm, argv, argc, map);
1475 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1478 for (map->page = vmm->func->page; map->page->shift; map->page++) {
1479 VMM_DEBUG(vmm, "trying %d", map->page->shift);
1480 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1487 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1497 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
1507 const u32 debug = vmm->debug;
1508 vmm->debug = 0;
1509 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1510 vmm->debug = debug;
1512 VMM_DEBUG(vmm, "invalid at any page size");
1513 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1519 map->page = &vmm->func->page[vma->refd];
1521 map->page = &vmm->func->page[vma->page];
1523 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1525 VMM_DEBUG(vmm, "invalid %d\n", ret);
1557 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1561 vma->refd = map->page - vmm->func->page;
1563 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1566 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1575 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1580 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
1581 vmm->managed.raw)
1582 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1584 mutex_lock(&vmm->mutex.vmm);
1585 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1587 mutex_unlock(&vmm->mutex.vmm);
1592 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1599 nvkm_vmm_free_delete(vmm, prev);
1604 nvkm_vmm_free_delete(vmm, next);
1607 nvkm_vmm_free_insert(vmm, vma);
1611 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1613 const struct nvkm_vmm_page *page = vmm->func->page;
1638 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1644 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1656 nvkm_vmm_unmap_region(vmm, next);
1668 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1679 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1683 nvkm_vmm_node_remove(vmm, vma);
1689 nvkm_vmm_put_region(vmm, vma);
1693 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1697 mutex_lock(&vmm->mutex.vmm);
1698 nvkm_vmm_put_locked(vmm, vma);
1699 mutex_unlock(&vmm->mutex.vmm);
1705 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1708 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1714 VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1720 VMM_DEBUG(vmm, "args %016llx %d %d %d",
1731 if (unlikely((getref || vmm->func->page_block) && !shift)) {
1732 VMM_DEBUG(vmm, "page size required: %d %016llx",
1733 getref, vmm->func->page_block);
1741 for (page = vmm->func->page; page->shift; page++) {
1747 VMM_DEBUG(vmm, "page %d %016llx", shift, size);
1756 temp = vmm->free.rb_node;
1777 const int p = page - vmm->func->page;
1780 if (vmm->func->page_block && prev && prev->page != p)
1781 addr = ALIGN(addr, vmm->func->page_block);
1785 if (vmm->func->page_block && next && next->page != p)
1786 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1789 nvkm_vmm_free_remove(vmm, this);
1803 nvkm_vmm_put_region(vmm, vma);
1806 nvkm_vmm_free_insert(vmm, vma);
1812 nvkm_vmm_put_region(vmm, vma);
1815 nvkm_vmm_free_insert(vmm, tmp);
1820 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1822 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1824 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1828 nvkm_vmm_put_region(vmm, vma);
1834 vma->page = page - vmm->func->page;
1837 nvkm_vmm_node_insert(vmm, vma);
1843 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1846 mutex_lock(&vmm->mutex.vmm);
1847 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1848 mutex_unlock(&vmm->mutex.vmm);
1853 nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
1856 const struct nvkm_vmm_page *page = &vmm->func->page[refd];
1858 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
1862 nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
1864 const struct nvkm_vmm_page *page = vmm->func->page;
1866 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1870 nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
1872 const struct nvkm_vmm_page *page = vmm->func->page;
1877 return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
1881 nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
1885 mutex_lock(&vmm->mutex.ref);
1886 ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
1887 mutex_unlock(&vmm->mutex.ref);
1893 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1895 if (inst && vmm && vmm->func->part) {
1896 mutex_lock(&vmm->mutex.vmm);
1897 vmm->func->part(vmm, inst);
1898 mutex_unlock(&vmm->mutex.vmm);
1903 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1906 if (vmm->func->join) {
1907 mutex_lock(&vmm->mutex.vmm);
1908 ret = vmm->func->join(vmm, inst);
1909 mutex_unlock(&vmm->mutex.vmm);
1919 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1924 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1926 const struct nvkm_vmm_page *page = vmm->func->page;
1927 const u64 limit = vmm->limit - vmm->start;
1933 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1937 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1939 vmm->bootstrapped = true;
1946 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1947 nvkm_vmm_dtor(vmm);
1948 kfree(vmm);
1954 struct nvkm_vmm *vmm = *pvmm;
1955 if (vmm) {
1956 kref_put(&vmm->kref, nvkm_vmm_del);
1962 nvkm_vmm_ref(struct nvkm_vmm *vmm)
1964 if (vmm)
1965 kref_get(&vmm->kref);
1966 return vmm;
1975 struct nvkm_vmm *vmm = NULL;
1977 ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
1978 key, name, &vmm);
1980 nvkm_vmm_unref(&vmm);
1981 *pvmm = vmm;