Lines Matching refs:vm

58 	struct drm_i915_private *i915 = ggtt->vm.i915;
60 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
62 ggtt->vm.is_ggtt = true;
65 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
68 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
74 ggtt->vm.cleanup(&ggtt->vm);
110 * @vm: The VM to suspend the mappings for
115 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
120 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
123 i915_gem_drain_freed_objects(vm->i915);
125 mutex_lock(&vm->mutex);
131 save_skip_rewrite = vm->skip_pte_rewrite;
132 vm->skip_pte_rewrite = true;
134 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
150 mutex_unlock(&vm->mutex);
157 vm->skip_pte_rewrite = save_skip_rewrite;
171 vm->clear_range(vm, 0, vm->total);
173 vm->skip_pte_rewrite = save_skip_rewrite;
175 mutex_unlock(&vm->mutex);
182 i915_ggtt_suspend_vm(&ggtt->vm);
191 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
216 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
224 if (needs_wc_ggtt_mapping(ggtt->vm.i915))
243 struct drm_i915_private *i915 = ggtt->vm.i915;
295 struct intel_gt *gt = ggtt->vm.gt;
303 struct intel_gt *gt = ggtt->vm.gt;
337 struct intel_gt *gt = ggtt->vm.gt;
338 const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
439 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
445 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
449 gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
454 static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
458 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
461 pte = ggtt->vm.pte_encode(addr, pat_index, flags);
462 if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
466 gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
469 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
474 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
475 const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
490 gen8_set_pte(gte++, vm->scratch[0]->encode);
499 gen8_set_pte(gte++, vm->scratch[0]->encode);
508 static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
512 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
513 gen8_pte_t scratch_pte = vm->scratch[0]->encode;
517 pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
539 static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
543 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
545 if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
546 __gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
549 gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
552 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
555 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
558 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
573 static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
576 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
579 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
591 gen8_ggtt_clear_range(vm, start, length);
594 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
600 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
604 iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
615 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
620 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
631 iowrite32(vm->scratch[0]->encode, gte++);
634 iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
639 iowrite32(vm->scratch[0]->encode, gte++);
648 static void nop_clear_range(struct i915_address_space *vm,
653 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
662 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
666 struct i915_address_space *vm;
676 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
678 bxt_vtd_ggtt_wa(arg->vm);
683 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
689 struct insert_page arg = { vm, addr, offset, pat_index };
695 struct i915_address_space *vm;
705 gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
707 bxt_vtd_ggtt_wa(arg->vm);
712 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
717 struct insert_entries arg = { vm, vma_res, pat_index, flags };
722 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
725 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
738 scratch_pte = vm->scratch[0]->encode;
743 void intel_ggtt_bind_vma(struct i915_address_space *vm,
763 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
767 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
770 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
789 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
792 GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
793 offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
795 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
799 drm_dbg(&ggtt->vm.i915->drm,
842 intel_wopcm_guc_size(&ggtt->vm.gt->wopcm));
874 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
875 drm_mm_insert_node_in_range(&ggtt->vm.mm,
886 ggtt->vm.scratch_range(&ggtt->vm, start, size);
887 drm_dbg(&ggtt->vm.i915->drm,
902 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
903 drm_dbg(&ggtt->vm.i915->drm,
906 ggtt->vm.clear_range(&ggtt->vm, hole_start,
911 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
920 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
934 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
938 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
943 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
947 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
950 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
959 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
963 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
968 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
972 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
973 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
974 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
984 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
987 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
989 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
990 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
992 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
993 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
995 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
999 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
1001 i915_vm_put(&ppgtt->vm);
1013 i915_vm_put(&ppgtt->vm);
1015 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1016 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1040 flush_workqueue(ggtt->vm.i915->wq);
1041 i915_gem_drain_freed_objects(ggtt->vm.i915);
1043 mutex_lock(&ggtt->vm.mutex);
1045 ggtt->vm.skip_pte_rewrite = true;
1047 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
1066 ggtt->vm.cleanup(&ggtt->vm);
1068 mutex_unlock(&ggtt->vm.mutex);
1069 i915_address_space_fini(&ggtt->vm);
1100 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
1101 dma_resv_fini(&ggtt->vm._resv);
1155 struct drm_i915_private *i915 = ggtt->vm.i915;
1156 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
1181 kref_init(&ggtt->vm.resv_ref);
1182 ret = setup_scratch_page(&ggtt->vm);
1191 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
1194 ggtt->vm.scratch[0]->encode =
1195 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
1203 static void gen6_gmch_remove(struct i915_address_space *vm)
1205 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1208 free_scratch(vm);
1219 struct drm_i915_private *i915 = ggtt->vm.i915;
1238 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1239 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1240 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
1242 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
1243 ggtt->vm.cleanup = gen6_gmch_remove;
1244 ggtt->vm.insert_page = gen8_ggtt_insert_page;
1245 ggtt->vm.clear_range = nop_clear_range;
1246 ggtt->vm.scratch_range = gen8_ggtt_clear_range;
1248 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
1255 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
1256 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
1264 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
1265 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
1267 ggtt->vm.bind_async_flags =
1272 ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
1273 ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
1274 ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
1279 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
1282 if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
1287 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1288 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1291 ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
1293 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
1396 struct drm_i915_private *i915 = ggtt->vm.i915;
1421 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1423 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1424 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1426 ggtt->vm.clear_range = nop_clear_range;
1428 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1429 ggtt->vm.scratch_range = gen6_ggtt_clear_range;
1430 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1431 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1432 ggtt->vm.cleanup = gen6_gmch_remove;
1437 ggtt->vm.pte_encode = iris_pte_encode;
1439 ggtt->vm.pte_encode = hsw_pte_encode;
1441 ggtt->vm.pte_encode = byt_pte_encode;
1443 ggtt->vm.pte_encode = ivb_pte_encode;
1445 ggtt->vm.pte_encode = snb_pte_encode;
1447 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1448 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1458 ggtt->vm.gt = gt;
1459 ggtt->vm.i915 = i915;
1460 ggtt->vm.dma = i915->drm.dev;
1461 dma_resv_init(&ggtt->vm._resv);
1471 dma_resv_fini(&ggtt->vm._resv);
1475 if ((ggtt->vm.total - 1) >> 32) {
1479 ggtt->vm.total >> 20);
1480 ggtt->vm.total = 1ULL << 32;
1482 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1485 if (ggtt->mappable_end > ggtt->vm.total) {
1489 &ggtt->mappable_end, ggtt->vm.total);
1490 ggtt->mappable_end = ggtt->vm.total;
1494 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1551 * @vm: The VM to restore the mappings for
1559 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1564 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1567 vm->clear_range(vm, 0, vm->total);
1570 list_for_each_entry(vma, &vm->bound_list, vm_link) {
1582 vma->ops->bind_vma(vm, NULL, vma->resource,
1584 i915_gem_get_pat_index(vm->i915,
1605 flush = i915_ggtt_resume_vm(&ggtt->vm);
1608 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,