Lines Matching refs:bb

120 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
484 struct xe_bb *bb, u32 at_pt,
508 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
509 bb->cs[bb->len++] = ofs;
510 bb->cs[bb->len++] = 0;
538 bb->cs[bb->len++] = lower_32_bits(addr);
539 bb->cs[bb->len++] = upper_32_bits(addr);
548 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
554 u32 *cs = bb->cs + bb->len;
585 bb->len = cs - bb->cs;
589 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
607 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
608 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
609 bb->cs[bb->len++] = 0;
610 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
611 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
612 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
613 bb->cs[bb->len++] = 0;
614 bb->cs[bb->len++] = pitch | mocs;
615 bb->cs[bb->len++] = lower_32_bits(src_ofs);
616 bb->cs[bb->len++] = upper_32_bits(src_ofs);
631 struct xe_bb *bb,
650 emit_copy_ccs(gt, bb,
663 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
740 struct xe_bb *bb;
778 bb = xe_bb_new(gt, batch_size, usm);
779 if (IS_ERR(bb)) {
780 err = PTR_ERR(bb);
787 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
793 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
797 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
799 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
800 update_idx = bb->len;
803 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
805 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
812 job = xe_bb_create_migration_job(m->q, bb,
841 xe_bb_free(bb, fence);
849 xe_bb_free(bb, NULL);
864 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
868 u32 *cs = bb->cs + bb->len;
882 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
884 bb->len += len;
887 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
891 u32 *cs = bb->cs + bb->len;
923 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
925 bb->len += len;
950 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
954 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
956 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1000 struct xe_bb *bb;
1025 bb = xe_bb_new(gt, batch_size, usm);
1026 if (IS_ERR(bb)) {
1027 err = PTR_ERR(bb);
1036 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
1039 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1040 update_idx = bb->len;
1043 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1046 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1052 job = xe_bb_create_migration_job(m->q, bb,
1084 xe_bb_free(bb, fence);
1091 xe_bb_free(bb, NULL);
1108 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1136 /* Ensure populatefn can do memset64 by aligning bb->cs */
1137 if (!(bb->len & 1))
1138 bb->cs[bb->len++] = MI_NOOP;
1140 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1141 bb->cs[bb->len++] = lower_32_bits(addr);
1142 bb->cs[bb->len++] = upper_32_bits(addr);
1143 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
1146 bb->len += chunk * 2;
1286 struct xe_bb *bb;
1327 bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1328 if (IS_ERR(bb))
1329 return ERR_CAST(bb);
1353 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
1354 bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1355 bb->cs[bb->len++] = 0; /* upper_32_bits */
1363 bb->cs[bb->len++] = lower_32_bits(addr);
1364 bb->cs[bb->len++] = upper_32_bits(addr);
1367 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1368 update_idx = bb->len;
1373 write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
1377 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1378 update_idx = bb->len;
1381 write_pgtable(tile, bb, 0, &updates[i], pt_update);
1387 job = xe_bb_create_migration_job(q ?: m->q, bb,
1434 xe_bb_free(bb, fence);
1444 xe_bb_free(bb, NULL);