/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_lrc.h | 20 int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, 22 void xe_lrc_finish(struct xe_lrc *lrc); 25 u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc); 27 void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head); 28 u32 xe_lrc_ring_head(struct xe_lrc *lrc); 29 u32 xe_lrc_ring_space(struct xe_lrc *lrc); 30 void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size); 32 u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc); 33 u32 *xe_lrc_regs(struct xe_lrc *lrc); 35 u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, in [all...] |
H A D | xe_lrc.c | 35 lrc_to_xe(struct xe_lrc *lrc) argument 37 return gt_to_xe(lrc->fence_ctx.gt); 574 static inline u32 __xe_lrc_ring_offset(struct xe_lrc *lrc) argument 579 u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc) argument 581 return lrc->ring.size; 605 static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc) argument 608 return xe_lrc_pphwsp_offset(lrc) + LRC_SEQNO_PPHWSP_OFFSET; 611 static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc) argument 614 return xe_lrc_pphwsp_offset(lrc) + LRC_START_SEQNO_PPHWSP_OFFSET; 617 static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc) argument 623 __xe_lrc_regs_offset(struct xe_lrc *lrc) argument 656 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr) argument 666 xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val) argument 696 xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm) argument 707 xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size) argument 809 xe_lrc_finish(struct xe_lrc *lrc) argument 818 xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head) argument 823 xe_lrc_ring_head(struct xe_lrc *lrc) argument 828 xe_lrc_ring_space(struct xe_lrc *lrc) argument 837 __xe_lrc_write_ring(struct xe_lrc *lrc, struct iosys_map ring, const void *data, size_t size) argument 847 xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size) argument 875 xe_lrc_descriptor(struct xe_lrc *lrc) argument 880 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc) argument 885 xe_lrc_create_seqno_fence(struct xe_lrc *lrc) argument 891 xe_lrc_seqno(struct xe_lrc *lrc) argument 898 xe_lrc_start_seqno(struct xe_lrc *lrc) argument 905 xe_lrc_start_seqno_ggtt_addr(struct xe_lrc *lrc) argument 910 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc) argument 915 xe_lrc_parallel_map(struct xe_lrc *lrc) argument [all...] |
H A D | xe_ring_ops.c | 217 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, argument 226 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 230 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 241 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); 247 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); 264 static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, argument 284 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 290 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 300 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); 306 xe_lrc_write_ring(lrc, d 309 __emit_job_gen12_render_compute(struct xe_sched_job *job, struct xe_lrc *lrc, u64 batch_addr, u32 seqno) argument 358 emit_migration_job_gen12(struct xe_sched_job *job, struct xe_lrc *lrc, u32 seqno) argument [all...] |
H A D | xe_guc_submit.c | 485 struct xe_lrc *lrc = q->lrc + i; local 487 action[len++] = lower_32_bits(xe_lrc_descriptor(lrc)); 488 action[len++] = upper_32_bits(xe_lrc_descriptor(lrc)); 522 struct xe_lrc *lrc = q->lrc; local 531 info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc)); 532 info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc)); 536 u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc); 537 struct iosys_map map = xe_lrc_parallel_map(lrc); 650 struct xe_lrc *lrc = q->lrc + j; local 674 struct xe_lrc *lrc = q->lrc; local 1822 struct xe_lrc *lrc = q->lrc + i; local [all...] |
H A D | xe_sched_job.c | 112 job->fence = xe_lrc_create_seqno_fence(q->lrc); 127 fences[j] = xe_lrc_create_seqno_fence(q->lrc + j); 168 --q->lrc[j].fence_ctx.next_seqno; 231 struct xe_lrc *lrc = job->q->lrc; local 234 xe_lrc_start_seqno(lrc), 240 struct xe_lrc *lrc = job->q->lrc; local 247 return !__dma_fence_is_later(xe_sched_job_seqno(job), xe_lrc_seqno(lrc),
|
H A D | xe_execlist.c | 44 static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, argument 51 lrc_desc = xe_lrc_descriptor(lrc); 65 xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail); 66 lrc->ring.old_tail = lrc->ring.tail; 113 __start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id); 134 struct xe_lrc *lrc = exl->q->lrc; local 136 return lrc [all...] |
H A D | xe_guc_submit_types.h | 111 /** @lrc: LRC Snapshot */ 112 struct lrc_snapshot *lrc; member in struct:xe_guc_submit_exec_queue_snapshot
|
H A D | xe_exec_queue.c | 49 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); 113 err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K); 137 xe_lrc_finish(q->lrc + i); 218 xe_lrc_finish(q->lrc + i); 731 return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1; 742 struct xe_lrc *lrc = q->lrc; local 743 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; 768 if (xe_lrc_seqno(&q->lrc[ [all...] |
H A D | xe_exec_queue_types.h | 156 /** @lrc: logical ring context for this exec queue */ 157 struct xe_lrc lrc[]; member in struct:xe_exec_queue
|
H A D | xe_gt_types.h | 353 /** @wa_active.lrc: bitmap with active LRC workarounds */ 354 unsigned long *lrc; member in struct:xe_gt::__anon98
|
H A D | xe_wa.c | 658 xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc, 689 gt->wa_active.lrc = p; 709 for_each_set_bit(idx, gt->wa_active.lrc, ARRAY_SIZE(lrc_was))
|
H A D | xe_gt.c | 293 &q->lrc[0].bo->vmap, 294 xe_lrc_pphwsp_offset(&q->lrc[0]),
|
/linux-master/drivers/nfc/fdp/ |
H A D | i2c.c | 66 u8 lrc = 0; local 74 /* Compute and add lrc */ 76 lrc ^= skb->data[i]; 78 skb_put_u8(skb, lrc); 132 u8 tmp[FDP_NCI_I2C_MAX_PAYLOAD], lrc, k; local 151 for (lrc = i = 0; i < r; i++) 152 lrc ^= tmp[i]; 159 if (lrc) {
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | selftest_lrc.c | 160 u32 *lrc; local 168 lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */ 169 if (!lrc) 171 GEM_BUG_ON(offset_in_page(lrc)); 188 __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE), 201 if (lrc[dw] == 0) { 215 if (lrc[dw] != lri) { 217 engine->name, dw, lri, lrc[dw]); 241 if ((offset ^ lrc[dw]) & lri_mask) { 243 engine->name, dw, offset, lrc[d 1267 u32 x, dw, *hw, *lrc; local [all...] |
H A D | intel_context_types.h | 145 } lrc; member in struct:intel_context
|
H A D | intel_execlists_submission.c | 400 rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; 466 ce->lrc.lrca = lrc_update_regs(ce, engine, head); 495 ce->lrc.ccid = ce->tag; 502 ce->lrc.ccid = tag << (XEHP_SW_CTX_ID_SHIFT - 32); 512 ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32); 517 ce->lrc.ccid |= engine->execlists.ccid; 525 CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid); 601 CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid); 615 ccid = ce->lrc.ccid; 681 desc = ce->lrc [all...] |
H A D | intel_lrc.c | 1226 ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail); 1257 ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail); 1556 * (Context ID) per lrc. 1656 WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
|
H A D | intel_engine_cs.c | 2212 rq->context->lrc.ccid, 2226 rq->context->lrc.ccid,
|
/linux-master/drivers/media/usb/dvb-usb/ |
H A D | technisat-usb2.c | 410 u8 lrc = 0; local 412 lrc ^= *b++; 413 return lrc;
|
/linux-master/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_capture.c | 1559 (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) 1591 (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) { 1601 ce->guc_id.id, ce->lrc.lrca);
|
H A D | intel_guc_submission.c | 154 * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous 156 * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to 157 * partition the guc_id space. We believe the number of multi-lrc contexts in 159 * multi-lrc. 410 * When using multi-lrc submission a scratch memory area is reserved in the 730 * gets enabled in the unblock. For multi-lrc we still submit the 757 * Without multi-lrc KMD does the submission step (moving the 758 * lrc tail) so enabling scheduling is sufficient to submit the 759 * context. This isn't the case in multi-lrc submissio [all...] |
/linux-master/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.c | 348 u64 desc = ce->lrc.desc; 358 ce->lrc.desc = desc;
|
/linux-master/drivers/scsi/ibmvscsi_tgt/ |
H A D | ibmvscsi_tgt.c | 3931 long lrc; local 3936 lrc = ibmvscsis_enable_change_state(vscsi); 3937 if (lrc) 3939 lrc, vscsi->state);
|
/linux-master/drivers/scsi/ |
H A D | FlashPoint.c | 7483 unsigned char lrc; local 7484 lrc = 0; 7486 lrc ^= buffer[i]; 7487 return lrc;
|
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_perf.c | 1571 stream->specific_ctx_id = ce->lrc.lrca >> 12;
|