Lines Matching defs:vmm

42 	return nvkm_vmm_ref(nvkm_uvmm(object)->vmm);
51 struct nvkm_vmm *vmm = uvmm->vmm;
61 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
65 mutex_lock(&vmm->mutex.vmm);
66 ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
67 mutex_unlock(&vmm->mutex.vmm);
79 struct nvkm_vmm *vmm = uvmm->vmm;
94 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
98 mutex_lock(&vmm->mutex.vmm);
99 ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
100 mutex_unlock(&vmm->mutex.vmm);
112 struct nvkm_vmm *vmm = uvmm->vmm;
122 if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
125 mutex_lock(&vmm->mutex.vmm);
126 vma = nvkm_vmm_node_search(vmm, addr);
128 VMM_DEBUG(vmm, "lookup %016llx: %016llx",
134 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
139 VMM_DEBUG(vmm, "unmapped");
143 nvkm_vmm_unmap_locked(vmm, vma, false);
146 mutex_unlock(&vmm->mutex.vmm);
158 struct nvkm_vmm *vmm = uvmm->vmm;
171 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
176 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
180 mutex_lock(&vmm->mutex.vmm);
181 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
182 VMM_DEBUG(vmm, "lookup %016llx", addr);
187 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
192 VMM_DEBUG(vmm, "pfnmap %016llx", addr);
199 VMM_DEBUG(vmm, "split %d %d %d "
206 vma = nvkm_vmm_node_split(vmm, vma, addr, size);
213 mutex_unlock(&vmm->mutex.vmm);
215 ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
222 mutex_lock(&vmm->mutex.vmm);
224 nvkm_vmm_unmap_region(vmm, vma);
226 mutex_unlock(&vmm->mutex.vmm);
237 struct nvkm_vmm *vmm = uvmm->vmm;
247 mutex_lock(&vmm->mutex.vmm);
248 vma = nvkm_vmm_node_search(vmm, args->v0.addr);
250 VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
256 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
260 nvkm_vmm_put_locked(vmm, vma);
263 mutex_unlock(&vmm->mutex.vmm);
273 struct nvkm_vmm *vmm = uvmm->vmm;
290 mutex_lock(&vmm->mutex.vmm);
291 ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
293 mutex_unlock(&vmm->mutex.vmm);
311 page = uvmm->vmm->func->page;
332 struct nvkm_vmm *vmm = uvmm->vmm;
336 for (page = vmm->func->page; page->shift; page++) {
342 VMM_DEBUG(vmm, "page %d %016llx", shift, size);
348 *refd = page - vmm->func->page;
356 struct nvkm_vmm *vmm = uvmm->vmm;
360 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
367 return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
373 struct nvkm_vmm *vmm = uvmm->vmm;
377 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
384 nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
393 struct nvkm_vmm *vmm = uvmm->vmm;
408 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
419 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
423 ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
433 struct nvkm_vmm *vmm = uvmm->vmm;
437 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
444 nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
453 struct nvkm_vmm *vmm = uvmm->vmm;
455 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
458 return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
469 if (!uvmm->vmm->managed.raw)
505 if (uvmm->vmm->func->mthd) {
506 return uvmm->vmm->func->mthd(uvmm->vmm,
521 nvkm_vmm_unref(&uvmm->vmm);
560 if (!mmu->vmm) {
561 ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
562 argv, argc, NULL, "user", &uvmm->vmm);
566 uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
571 uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
573 uvmm->vmm->managed.raw = raw;
576 ret = mmu->func->promote_vmm(uvmm->vmm);
581 page = uvmm->vmm->func->page;
585 args->v0.addr = uvmm->vmm->start;
586 args->v0.size = uvmm->vmm->limit;