Lines Matching defs:xe

125 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
131 addr -= xe->mem.vram.dpa_base;
138 struct xe_device *xe = tile_to_xe(tile);
139 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
156 bo = xe_bo_create_pin_map(vm->xe, tile, vm,
165 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
174 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
182 if (!IS_DGFX(xe)) {
191 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
195 if (xe->info.has_usm) {
208 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
216 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
218 if (xe->info.has_usm) {
221 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
233 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
242 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
248 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
249 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
254 if (IS_DGFX(xe)) {
259 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
266 for (pos = xe->mem.vram.dpa_base;
267 pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
269 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
336 struct xe_device *xe = tile_to_xe(tile);
342 m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
349 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
362 if (xe->info.has_usm) {
372 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
377 m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
389 err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
393 if (IS_DGFX(xe)) {
394 if (xe_device_has_flat_ccs(xe))
397 xe_device_ccs_bytes(xe, SZ_64K);
402 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
409 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
411 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
419 struct xe_device *xe = tile_to_xe(m->tile);
420 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
490 struct xe_device *xe = tile_to_xe(m->tile);
498 if (GRAPHICS_VERx100(xe) >= 2000)
499 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
500 xe->pat.idx[XE_CACHE_WB];
502 pat_index = xe->pat.idx[XE_CACHE_WB];
526 xe_assert(xe, (va & (SZ_64K - 1)) ==
536 addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
554 struct xe_device *xe = gt_to_xe(gt);
561 if (GRAPHICS_VERx100(xe) >= 2000) {
594 struct xe_device *xe = gt_to_xe(gt);
602 if (GRAPHICS_VER(xe) >= 20)
605 if (GRAPHICS_VERx100(xe) >= 1250)
700 struct xe_device *xe = gt_to_xe(gt);
713 bool copy_ccs = xe_device_has_flat_ccs(xe) &&
735 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
747 bool usm = xe->info.has_usm;
748 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
753 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
767 ccs_size = xe_device_ccs_bytes(xe, src_L0);
772 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
777 ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
807 IS_DGFX(xe) ? src_is_vram : src_is_pltt,
809 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
868 struct xe_device *xe = gt_to_xe(gt);
878 if (GRAPHICS_VERx100(xe) >= 2000)
891 struct xe_device *xe = gt_to_xe(gt);
895 if (GRAPHICS_VERx100(xe) < 1250)
900 if (GRAPHICS_VERx100(xe) >= 2000)
981 struct xe_device *xe = gt_to_xe(gt);
982 bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
1003 bool usm = xe->info.has_usm;
1004 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1015 if (xe_device_has_flat_ccs(xe))
1043 if (xe_device_has_flat_ccs(xe)) {
1206 xe_device_wmb(vm->xe);
1279 struct xe_device *xe = tile_to_xe(tile);
1288 bool usm = !q && xe->info.has_usm;
1292 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1305 if (IS_DGFX(xe))
1325 bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1330 if (!IS_DGFX(xe)) {