Lines Matching refs:vm

119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
121 struct amdgpu_vm *vm;
130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
133 * @vm: amdgpu_vm pointer
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
145 if (vm->pasid == pasid)
148 if (vm->pasid) {
149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
153 vm->pasid = 0;
157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
162 vm->pasid = pasid;
179 struct amdgpu_vm *vm = vm_bo->vm;
183 spin_lock(&vm_bo->vm->status_lock);
185 list_move(&vm_bo->vm_status, &vm->evicted);
187 list_move_tail(&vm_bo->vm_status, &vm->evicted);
188 spin_unlock(&vm_bo->vm->status_lock);
200 spin_lock(&vm_bo->vm->status_lock);
201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202 spin_unlock(&vm_bo->vm->status_lock);
215 spin_lock(&vm_bo->vm->status_lock);
216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217 spin_unlock(&vm_bo->vm->status_lock);
231 spin_lock(&vm_bo->vm->status_lock);
232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233 spin_unlock(&vm_bo->vm->status_lock);
247 spin_lock(&vm_bo->vm->status_lock);
248 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
249 spin_unlock(&vm_bo->vm->status_lock);
263 spin_lock(&vm_bo->vm->status_lock);
264 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
265 spin_unlock(&vm_bo->vm->status_lock);
281 spin_lock(&vm_bo->vm->status_lock);
282 list_move(&vm_bo->vm_status, &vm_bo->vm->done);
283 spin_unlock(&vm_bo->vm->status_lock);
288 * @vm: the VM which state machine to reset
293 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
297 spin_lock(&vm->status_lock);
298 list_splice_init(&vm->done, &vm->invalidated);
299 list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
306 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
310 spin_unlock(&vm->status_lock);
314 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
317 * @vm: vm to which bo is to be added
324 struct amdgpu_vm *vm, struct amdgpu_bo *bo)
326 base->vm = vm;
336 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
339 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
341 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
352 * we checked all the prerequisites, but it looks like this per vm bo
354 * is validated on next vm use to avoid fault.
362 * @vm: vm providing the BOs
368 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
372 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
380 * @vm: vm providing the BOs
386 struct amdgpu_vm *vm)
389 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
395 struct amdgpu_vm *vm)
399 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
405 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
410 drm_sched_entity_destroy(&vm->immediate);
415 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
417 drm_sched_entity_destroy(&vm->immediate);
418 drm_sched_entity_destroy(&vm->delayed);
424 * @vm: optional VM to check, might be NULL
430 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
434 if (!vm)
437 result += vm->generation;
439 if (drm_sched_entity_error(&vm->delayed))
449 * @vm: vm providing the BOs
461 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
471 if (drm_sched_entity_error(&vm->delayed)) {
472 ++vm->generation;
473 amdgpu_vm_bo_reset_state_machine(vm);
474 amdgpu_vm_fini_entities(vm);
475 r = amdgpu_vm_init_entities(adev, vm);
480 spin_lock(&vm->status_lock);
481 while (!list_empty(&vm->evicted)) {
482 bo_base = list_first_entry(&vm->evicted,
485 spin_unlock(&vm->status_lock);
502 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
505 spin_lock(&vm->status_lock);
507 while (ticket && !list_empty(&vm->evicted_user)) {
508 bo_base = list_first_entry(&vm->evicted_user,
511 spin_unlock(&vm->status_lock);
516 struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
533 spin_lock(&vm->status_lock);
535 spin_unlock(&vm->status_lock);
537 amdgpu_vm_eviction_lock(vm);
538 vm->evicting = false;
539 amdgpu_vm_eviction_unlock(vm);
547 * @vm: VM to check
554 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
559 amdgpu_vm_eviction_lock(vm);
560 ret = !vm->evicting;
561 amdgpu_vm_eviction_unlock(vm);
563 spin_lock(&vm->status_lock);
564 empty = list_empty(&vm->evicted);
565 spin_unlock(&vm->status_lock);
571 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
637 * amdgpu_vm_flush - hardware flush the vm
749 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
751 * @vm: requested vm
754 * Find @bo inside the requested vm.
755 * Search inside the @bos vm list for the requested vm
763 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
769 if (base->vm != vm)
808 * @vm: requested vm
817 struct amdgpu_vm *vm, bool immediate)
825 spin_lock(&vm->status_lock);
826 list_splice_init(&vm->relocated, &relocated);
827 spin_unlock(&vm->status_lock);
837 params.vm = vm;
840 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
853 r = vm->update_funcs->commit(&params, &vm->last_update);
858 atomic64_inc(&vm->tlb_seq);
884 atomic64_inc(&tlb_cb->vm->tlb_seq);
902 struct amdgpu_vm *vm = params->vm;
907 tlb_cb->vm = vm;
910 dma_fence_put(vm->last_tlb_flush);
911 vm->last_tlb_flush = dma_fence_get(*fence);
917 if (!params->unlocked && vm->is_compute_context) {
918 amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
921 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
927 * amdgpu_vm_update_range - update a range in the vm page table
930 * @vm: the VM to update the range
950 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
985 params.vm = vm;
1001 amdgpu_vm_eviction_lock(vm);
1002 if (vm->evicting) {
1007 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1010 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1011 swap(vm->last_unlocked, tmp);
1015 r = vm->update_funcs->prepare(&params, resv, sync_mode);
1073 r = vm->update_funcs->commit(&params, fence);
1086 amdgpu_vm_eviction_unlock(vm);
1094 struct amdgpu_vm *vm = bo_va->base.vm;
1104 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1109 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
1113 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1118 spin_lock(&vm->status_lock);
1119 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1122 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1125 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1128 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1131 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1134 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1136 spin_unlock(&vm->status_lock);
1140 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1155 struct amdgpu_vm *vm = bo_va->base.vm;
1169 resv = vm->root.bo->tbo.base.resv;
1207 vm->root.bo->tbo.base.resv))
1208 last_update = &vm->last_update;
1236 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1249 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1361 * @vm: requested vm
1368 struct amdgpu_vm *vm,
1381 * @vm: requested vm
1385 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1387 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1402 * @vm: requested vm
1414 struct amdgpu_vm *vm,
1417 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1423 while (!list_empty(&vm->freed)) {
1424 mapping = list_first_entry(&vm->freed,
1428 r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
1432 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1454 * @vm: requested vm
1465 struct amdgpu_vm *vm,
1473 spin_lock(&vm->status_lock);
1474 while (!list_empty(&vm->moved)) {
1475 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1477 spin_unlock(&vm->status_lock);
1483 spin_lock(&vm->status_lock);
1486 while (!list_empty(&vm->invalidated)) {
1487 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1490 spin_unlock(&vm->status_lock);
1516 if (vm->is_compute_context &&
1522 spin_lock(&vm->status_lock);
1524 spin_unlock(&vm->status_lock);
1533 * @vm: requested vm
1543 struct amdgpu_vm *vm,
1547 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
1551 WARN_ON_ONCE(!vm->is_compute_context);
1558 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1566 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1575 * amdgpu_vm_bo_add - add a bo to a specific vm
1578 * @vm: requested vm
1581 * Add @bo into the requested vm.
1582 * Add @bo to the list of bos associated with the vm
1590 struct amdgpu_vm *vm,
1599 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1633 struct amdgpu_vm *vm = bo_va->base.vm;
1638 amdgpu_vm_it_insert(mapping, &vm->va);
1643 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1682 * amdgpu_vm_bo_map - map bo inside a vm
1705 struct amdgpu_vm *vm = bo_va->base.vm;
1716 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1740 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1776 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1796 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1814 struct amdgpu_vm *vm = bo_va->base.vm;
1837 amdgpu_vm_it_remove(mapping, &vm->va);
1842 list_add(&mapping->list, &vm->freed);
1844 amdgpu_vm_free_mapping(adev, vm, mapping,
1854 * @vm: VM structure to use
1864 struct amdgpu_vm *vm,
1893 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1924 amdgpu_vm_it_remove(tmp, &vm->va);
1933 list_add(&tmp->list, &vm->freed);
1941 amdgpu_vm_it_insert(before, &vm->va);
1945 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1956 amdgpu_vm_it_insert(after, &vm->va);
1960 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1973 * @vm: the requested VM
1982 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1985 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1991 * @vm: the requested vm
1996 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2003 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2019 * amdgpu_vm_bo_del - remove a bo from a specific vm
2024 * Remove @bo_va->bo from the requested vm.
2033 struct amdgpu_vm *vm = bo_va->base.vm;
2036 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2040 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2053 spin_lock(&vm->status_lock);
2055 spin_unlock(&vm->status_lock);
2059 amdgpu_vm_it_remove(mapping, &vm->va);
2062 list_add(&mapping->list, &vm->freed);
2066 amdgpu_vm_it_remove(mapping, &vm->va);
2067 amdgpu_vm_free_mapping(adev, vm, mapping,
2091 if (!bo_base || !bo_base->vm)
2099 if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2103 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2104 amdgpu_vm_eviction_unlock(bo_base->vm);
2108 bo_base->vm->evicting = true;
2109 amdgpu_vm_eviction_unlock(bo_base->vm);
2132 struct amdgpu_vm *vm = bo_base->vm;
2134 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
2145 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2174 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2177 * @min_vm_size: the minimum vm size in GB if it's set auto
2191 /* adjust vm size first */
2245 /* block size depends on vm size and hw setup*/
2261 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2270 * @vm: VM object to wait for
2273 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2275 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2281 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2294 struct amdgpu_vm *vm;
2298 vm = xa_load(&adev->vm_manager.pasids, pasid);
2301 return vm;
2305 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2309 * frees the vm task_info ptr at the last put
2317 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2319 * @vm: VM to get info from
2325 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
2329 if (vm) {
2330 ti = vm->task_info;
2331 kref_get(&vm->task_info->refcount);
2353 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
2355 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2356 if (!vm->task_info)
2359 kref_init(&vm->task_info->refcount);
2366 * @vm: vm for which to set the info
2368 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2370 if (!vm->task_info)
2373 if (vm->task_info->pid == current->pid)
2376 vm->task_info->pid = current->pid;
2377 get_task_comm(vm->task_info->task_name, current);
2382 vm->task_info->tgid = current->group_leader->pid;
2383 get_task_comm(vm->task_info->process_name, current->group_leader);
2387 * amdgpu_vm_init - initialize a vm instance
2390 * @vm: requested vm
2393 * Init @vm fields.
2398 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2405 vm->va = RB_ROOT_CACHED;
2407 vm->reserved_vmid[i] = NULL;
2408 INIT_LIST_HEAD(&vm->evicted);
2409 INIT_LIST_HEAD(&vm->evicted_user);
2410 INIT_LIST_HEAD(&vm->relocated);
2411 INIT_LIST_HEAD(&vm->moved);
2412 INIT_LIST_HEAD(&vm->idle);
2413 INIT_LIST_HEAD(&vm->invalidated);
2414 spin_lock_init(&vm->status_lock);
2415 INIT_LIST_HEAD(&vm->freed);
2416 INIT_LIST_HEAD(&vm->done);
2417 INIT_LIST_HEAD(&vm->pt_freed);
2418 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2419 INIT_KFIFO(vm->faults);
2421 r = amdgpu_vm_init_entities(adev, vm);
2425 vm->is_compute_context = false;
2427 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2431 vm->use_cpu_for_update ? "CPU" : "SDMA");
2432 WARN_ONCE((vm->use_cpu_for_update &&
2436 if (vm->use_cpu_for_update)
2437 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2439 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2441 vm->last_update = dma_fence_get_stub();
2442 vm->last_unlocked = dma_fence_get_stub();
2443 vm->last_tlb_flush = dma_fence_get_stub();
2444 vm->generation = 0;
2446 mutex_init(&vm->eviction_lock);
2447 vm->evicting = false;
2448 vm->tlb_fence_context = dma_fence_context_alloc(1);
2450 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2463 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2468 r = amdgpu_vm_pt_clear(adev, vm, root, false);
2472 r = amdgpu_vm_create_task_info(vm);
2476 amdgpu_bo_unreserve(vm->root.bo);
2482 amdgpu_vm_pt_free_root(adev, vm);
2483 amdgpu_bo_unreserve(vm->root.bo);
2487 dma_fence_put(vm->last_tlb_flush);
2488 dma_fence_put(vm->last_unlocked);
2489 amdgpu_vm_fini_entities(vm);
2498 * @vm: requested vm
2513 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2517 r = amdgpu_bo_reserve(vm->root.bo, true);
2522 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2525 vm->use_cpu_for_update ? "CPU" : "SDMA");
2526 WARN_ONCE((vm->use_cpu_for_update &&
2530 if (vm->use_cpu_for_update) {
2532 r = amdgpu_bo_sync_wait(vm->root.bo,
2537 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2538 r = amdgpu_vm_pt_map_tables(adev, vm);
2543 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2546 dma_fence_put(vm->last_update);
2547 vm->last_update = dma_fence_get_stub();
2548 vm->is_compute_context = true;
2551 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2556 amdgpu_bo_unreserve(vm->root.bo);
2561 * amdgpu_vm_release_compute - release a compute vm
2563 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2566 * pasid from vm. Compute should stop use of vm after this call.
2568 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2570 amdgpu_vm_set_pasid(adev, vm, 0);
2571 vm->is_compute_context = false;
2575 * amdgpu_vm_fini - tear down a vm instance
2578 * @vm: requested vm
2580 * Tear down @vm.
2581 * Unbind the VM and remove all bos from the vm bo list
2583 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2591 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2593 flush_work(&vm->pt_free_work);
2595 root = amdgpu_bo_ref(vm->root.bo);
2597 amdgpu_vm_put_task_info(vm->task_info);
2598 amdgpu_vm_set_pasid(adev, vm, 0);
2599 dma_fence_wait(vm->last_unlocked, false);
2600 dma_fence_put(vm->last_unlocked);
2601 dma_fence_wait(vm->last_tlb_flush, false);
2603 spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2604 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2605 dma_fence_put(vm->last_tlb_flush);
2607 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2609 amdgpu_vm_prt_fini(adev, vm);
2614 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2617 amdgpu_vm_pt_free_root(adev, vm);
2620 WARN_ON(vm->root.bo);
2622 amdgpu_vm_fini_entities(vm);
2624 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2625 dev_err(adev->dev, "still active bo inside vm\n");
2628 &vm->va.rb_root, rb) {
2636 dma_fence_put(vm->last_update);
2639 if (vm->reserved_vmid[i]) {
2641 vm->reserved_vmid[i] = false;
2713 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2735 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2737 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2742 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2744 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2775 struct amdgpu_vm *vm;
2779 vm = xa_load(&adev->vm_manager.pasids, pasid);
2780 if (vm) {
2781 root = amdgpu_bo_ref(vm->root.bo);
2782 is_compute_context = vm->is_compute_context;
2805 vm = xa_load(&adev->vm_manager.pasids, pasid);
2806 if (vm && vm->root.bo != root)
2807 vm = NULL;
2809 if (!vm)
2838 r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
2843 r = amdgpu_vm_update_pdes(adev, vm, true);
2860 * @vm: Requested VM for printing BO info
2865 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2882 spin_lock(&vm->status_lock);
2884 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2893 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2902 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2911 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2920 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2929 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2934 spin_unlock(&vm->status_lock);
2968 struct amdgpu_vm *vm;
2973 vm = xa_load(&adev->vm_manager.pasids, pasid);
2979 if (vm && status) {
2980 vm->fault_info.addr = addr;
2981 vm->fault_info.status = status;
2984 * when vm could be stale or freed.
2991 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
2992 vm->fault_info.vmhub |=
2995 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
2996 vm->fault_info.vmhub |=
2999 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3000 vm->fault_info.vmhub |=