Lines Matching defs:xe

71 	struct xe_device *xe = vm->xe;
74 xe_assert(xe, xe_vma_is_userptr(vma));
168 xe_assert(vm->xe, link != list);
234 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
476 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
541 spin_lock(&vm->xe->ttm.lru_lock);
543 spin_unlock(&vm->xe->ttm.lru_lock);
559 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
581 xe_assert(vm->xe, xe_vma_is_userptr(vma));
587 vm_dbg(&xe_vma_vm(vma)->xe->drm,
651 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
728 xe_assert(vm->xe, vma->tile_present);
769 xe_assert(vm->xe, start < end);
770 xe_assert(vm->xe, end < vm->size);
805 for_each_tile(tile, vm->xe, id)
808 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
911 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
914 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
992 xe_assert(vm->xe, start + range <= vm->size);
1003 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1016 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1046 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1059 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1101 struct xe_device *xe = xe_bo_device(bo);
1106 pde |= pde_encode_pat_index(xe, pat_index);
1114 struct xe_device *xe = xe_bo_device(bo);
1119 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1131 struct xe_device *xe = xe_vma_vm(vma)->xe;
1138 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1147 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1154 xe_assert(xe, !(flags & ~XE_PTE_PS64));
1158 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1179 * @xe: xe device.
1190 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1215 for_each_tile(tile, vm->xe, id) {
1227 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1239 vm->xe = xe;
1241 vm->size = 1ull << xe->info.va_bits;
1258 for_each_tile(tile, xe, id)
1264 xe_pm_runtime_get_noresume(xe);
1266 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1272 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1281 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1284 for_each_tile(tile, xe, id) {
1289 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1298 for_each_tile(tile, xe, id) {
1302 err = xe_vm_create_scratch(xe, tile, vm);
1315 for_each_tile(tile, xe, id) {
1325 for_each_tile(tile, xe, id) {
1335 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1351 mutex_lock(&xe->usm.lock);
1353 xe->usm.num_vm_in_fault_mode++;
1355 xe->usm.num_vm_in_non_fault_mode++;
1356 mutex_unlock(&xe->usm.lock);
1370 for_each_tile(tile, xe, id)
1374 xe_pm_runtime_put(xe);
1388 struct xe_device *xe = vm->xe;
1394 xe_assert(xe, !vm->preempt.num_exec_queues);
1401 for_each_tile(tile, xe, id) {
1407 for_each_tile(tile, xe, id) {
1448 for_each_tile(tile, xe, id) {
1469 mutex_lock(&xe->usm.lock);
1471 xe->usm.num_vm_in_fault_mode--;
1473 xe->usm.num_vm_in_non_fault_mode--;
1478 xe_assert(xe, xe->info.has_asid);
1479 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1481 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1482 xe_assert(xe, lookup == vm);
1484 mutex_unlock(&xe->usm.lock);
1486 for_each_tile(tile, xe, id)
1495 struct xe_device *xe = vm->xe;
1500 xe_assert(xe, !vm->size);
1508 xe_pm_runtime_put(xe);
1510 for_each_tile(tile, xe, id)
1577 for_each_tile(tile, vm->xe, id) {
1652 for_each_tile(tile, vm->xe, id) {
1743 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1808 struct xe_device *xe = to_xe_device(dev);
1817 if (XE_IOCTL_DBG(xe, args->extensions))
1820 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1823 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1824 !xe->info.has_usm))
1827 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1830 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1833 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1837 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1841 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1842 xe_device_in_non_fault_mode(xe)))
1845 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1846 xe_device_in_fault_mode(xe)))
1849 if (XE_IOCTL_DBG(xe, args->extensions))
1859 vm = xe_vm_create(xe, flags);
1869 if (xe->info.has_asid) {
1870 mutex_lock(&xe->usm.lock);
1871 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1873 &xe->usm.next_asid, GFP_KERNEL);
1874 mutex_unlock(&xe->usm.lock);
1885 for_each_tile(tile, xe, id)
1909 struct xe_device *xe = to_xe_device(dev);
1915 if (XE_IOCTL_DBG(xe, args->pad) ||
1916 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1921 if (XE_IOCTL_DBG(xe, !vm))
1923 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1949 xe_assert(vm->xe, region < ARRAY_SIZE(region_to_mem_type));
1992 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1998 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2003 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2007 vm_dbg(&xe->drm,
2012 vm_dbg(&xe->drm,
2019 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2025 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2029 drm_warn(&xe->drm, "NOT POSSIBLE");
2033 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2056 vm_dbg(&vm->xe->drm,
2074 xe_assert(vm->xe, bo);
2091 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2112 print_op(vm->xe, __op);
2260 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2272 struct xe_device *xe = vm->xe;
2353 vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2389 vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2401 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2506 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2589 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2665 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2706 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2728 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2735 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2736 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2739 if (XE_IOCTL_DBG(xe, args->extensions))
2755 if (XE_IOCTL_DBG(xe, err)) {
2775 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2780 pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2782 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2783 if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2793 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2794 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2795 XE_IOCTL_DBG(xe, obj && is_null) ||
2796 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2797 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2799 XE_IOCTL_DBG(xe, !obj &&
2802 XE_IOCTL_DBG(xe, !obj &&
2804 XE_IOCTL_DBG(xe, addr &&
2806 XE_IOCTL_DBG(xe, range &&
2808 XE_IOCTL_DBG(xe, obj &&
2810 XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2812 XE_IOCTL_DBG(xe, obj &&
2814 XE_IOCTL_DBG(xe, prefetch_region &&
2816 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2817 xe->info.mem_region_mask)) ||
2818 XE_IOCTL_DBG(xe, obj &&
2824 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2825 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2826 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2827 XE_IOCTL_DBG(xe, !range &&
2867 struct xe_device *xe = to_xe_device(dev);
2882 err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2888 if (XE_IOCTL_DBG(xe, !q)) {
2893 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2900 if (XE_IOCTL_DBG(xe, !vm)) {
2909 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2918 if (XE_IOCTL_DBG(xe, range > vm->size) ||
2919 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2954 if (XE_IOCTL_DBG(xe, !gem_obj)) {
2960 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
2961 XE_IOCTL_DBG(xe, obj_offset >
2968 if (XE_IOCTL_DBG(xe, obj_offset &
2970 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
2971 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
2977 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2979 if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2984 } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3006 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3019 if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3149 struct xe_device *xe = xe_vma_vm(vma)->xe;
3156 xe_assert(xe, !xe_vma_is_null(vma));
3159 vm_dbg(&xe_vma_vm(vma)->xe->drm,
3177 for_each_tile(tile, xe, id) {
3180 xe_device_wmb(xe);
3191 for_each_tile(tile, xe, id) {