Searched refs:guc (Results 1 - 25 of 88) sorted by last modified time

1234

/linux-master/drivers/gpu/drm/xe/
H A Dxe_guc_ct.c71 return container_of(ct, struct xe_gt, uc.guc.ct);
229 struct xe_guc *guc = ct_to_guc(ct); local
237 err = xe_guc_self_cfg64(guc,
243 err = xe_guc_self_cfg64(guc,
249 return xe_guc_self_cfg32(guc,
256 struct xe_guc *guc = ct_to_guc(ct); local
265 err = xe_guc_self_cfg64(guc,
271 err = xe_guc_self_cfg64(guc,
277 return xe_guc_self_cfg32(guc,
1014 struct xe_guc *guc local
1174 struct xe_guc *guc = ct_to_guc(ct); local
[all...]
H A Dxe_guc_ads.c35 return container_of(ads, struct xe_gt, uc.guc.ads);
H A Dxe_huc.c39 return &container_of(huc, struct xe_uc, huc)->guc;
241 struct xe_guc *guc = huc_to_guc(huc); local
258 ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
H A Dxe_gt.c78 gt->uc.guc.submission_state.enabled = false;
263 hwe->name, ERR_PTR(err), q->guc->id);
280 hwe->name, ERR_PTR(err), nop_q->guc->id);
288 hwe->name, ERR_PTR(err), q->guc->id);
H A Dxe_exec_queue_types.h104 /** @guc: GuC backend specific state for exec queue */
105 struct xe_guc_exec_queue *guc; member in union:xe_exec_queue::__anon85
H A Dxe_gt_pagefault.c252 static int send_pagefault_reply(struct xe_guc *guc, argument
261 return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
326 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
328 struct xe_gt *gt = guc_to_gt(guc);
393 send_pagefault_reply(&gt->uc.guc, &reply);
629 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
631 struct xe_gt *gt = guc_to_gt(guc);
H A Dxe_gt_tlb_invalidation.c94 struct xe_guc *guc = &gt->uc.guc; local
102 mutex_lock(&gt->uc.guc.ct.lock);
117 wake_up_all(&guc->ct.wq);
123 mutex_unlock(&gt->uc.guc.ct.lock);
139 static int send_tlb_invalidation(struct xe_guc *guc, argument
143 struct xe_gt *gt = guc_to_gt(guc);
153 mutex_lock(&guc->ct.lock);
160 ret = xe_guc_ct_send_locked(&guc->ct, action, len,
193 mutex_unlock(&guc
323 struct xe_guc *guc = &gt->uc.guc; local
361 xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
[all...]
H A Dxe_query.c421 size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
437 xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
542 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; local
544 version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
H A Dxe_guc_submit.c45 return &q->gt->uc.guc;
64 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
69 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
74 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
79 return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
84 atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
89 atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
94 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
99 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
104 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc
183 alloc_submit_wq(struct xe_guc *guc) argument
203 free_submit_wq(struct xe_guc *guc) argument
211 get_submit_wq(struct xe_guc *guc) argument
218 alloc_submit_wq(struct xe_guc *guc) argument
223 free_submit_wq(struct xe_guc *guc) argument
228 get_submit_wq(struct xe_guc *guc) argument
236 struct xe_guc *guc = arg; local
252 primelockdep(struct xe_guc *guc) argument
266 xe_guc_submit_init(struct xe_guc *guc) argument
301 __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) argument
318 alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) argument
366 release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) argument
421 init_policies(struct xe_guc *guc, struct xe_exec_queue *q) argument
440 set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) argument
458 __register_mlrc_engine(struct xe_guc *guc, struct xe_exec_queue *q, struct guc_ctxt_registration_info *info) argument
497 __register_engine(struct xe_guc *guc, struct guc_ctxt_registration_info *info) argument
520 struct xe_guc *guc = exec_queue_to_guc(q); local
579 struct xe_guc *guc = exec_queue_to_guc(q); local
607 struct xe_guc *guc = exec_queue_to_guc(q); local
627 struct xe_guc *guc = exec_queue_to_guc(q); local
672 struct xe_guc *guc = exec_queue_to_guc(q); local
727 struct xe_guc *guc = exec_queue_to_guc(q); local
762 guc_read_stopped(struct xe_guc *guc) argument
774 disable_scheduling_deregister(struct xe_guc *guc, struct xe_exec_queue *q) argument
814 struct xe_guc *guc = exec_queue_to_guc(q); local
858 struct xe_guc *guc = exec_queue_to_guc(q); local
875 struct xe_guc *guc = exec_queue_to_guc(q); local
897 struct xe_guc *guc = exec_queue_to_guc(q); local
967 struct xe_guc *guc = exec_queue_to_guc(q); local
1029 struct xe_guc *guc = exec_queue_to_guc(q); local
1054 __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) argument
1069 struct xe_guc *guc = exec_queue_to_guc(q); local
1089 struct xe_guc *guc = exec_queue_to_guc(q); local
1098 struct xe_guc *guc = exec_queue_to_guc(q); local
1113 struct xe_guc *guc = exec_queue_to_guc(q); local
1148 struct xe_guc *guc = exec_queue_to_guc(q); local
1206 struct xe_guc *guc = exec_queue_to_guc(q); local
1370 struct xe_guc *guc = exec_queue_to_guc(q); local
1379 struct xe_guc *guc = exec_queue_to_guc(q); local
1411 guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) argument
1455 xe_guc_submit_reset_prepare(struct xe_guc *guc) argument
1473 xe_guc_submit_reset_wait(struct xe_guc *guc) argument
1478 xe_guc_submit_stop(struct xe_guc *guc) argument
1517 xe_guc_submit_start(struct xe_guc *guc) argument
1537 g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id) argument
1559 deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) argument
1571 xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
1616 xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
1650 xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
1684 xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
1711 xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len) argument
1739 struct xe_guc *guc = exec_queue_to_guc(q); local
1975 xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p) argument
[all...]
H A Dxe_trace.h150 __entry->guc_id = q->guc->id;
151 __entry->guc_state = atomic_read(&q->guc->state);
267 __entry->guc_id = job->q->guc->id;
269 atomic_read(&job->q->guc->state);
329 ((struct xe_exec_queue *)msg->private_data)->guc->id;
H A Dxe_wopcm_types.h17 /** @guc: GuC WOPCM Region info */
19 /** @guc.base: GuC WOPCM base which is offset from WOPCM base */
21 /** @guc.size: Size of the GuC WOPCM region */
23 } guc; member in struct:xe_wopcm
H A Dxe_uc_fw.c106 fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \
107 fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \
108 fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \
109 fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \
110 fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \
111 fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \
112 fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \
113 fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \
114 fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2))
171 return container_of(uc_fw, struct xe_gt, uc.guc
[all...]
H A Dxe_uc.c44 ret = xe_guc_init(&uc->guc);
63 ret = xe_guc_submit_init(&uc->guc);
67 ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0);
99 err = xe_guc_init_post_hwconfig(&uc->guc);
115 ret = xe_guc_reset(&uc->guc);
127 xe_guc_sanitize(&uc->guc);
151 ret = xe_guc_min_load_for_hwconfig(&uc->guc);
174 ret = xe_guc_upload(&uc->guc);
178 ret = xe_guc_enable_communication(&uc->guc);
186 ret = xe_guc_post_load_init(&uc->guc);
[all...]
H A Dxe_pm.c310 xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
H A Dxe_memirq.c38 static const char *guc_name(struct xe_guc *guc) argument
40 return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
264 * @guc: the &xe_guc to setup
274 int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc) argument
276 bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
288 err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
293 err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
303 guc_name(guc), ERR_PTR(err));
370 struct xe_guc *guc)
372 const char *name = guc_name(guc);
369 memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status, struct xe_guc *guc) argument
[all...]
H A Dxe_memirq.h24 int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
H A Dxe_irq.c245 return xe_guc_irq_handler(&gt->uc.guc, iir);
247 return xe_guc_irq_handler(&gt->uc.guc, iir);
H A Dxe_guc_submit.h15 int xe_guc_submit_init(struct xe_guc *guc);
17 int xe_guc_submit_reset_prepare(struct xe_guc *guc);
18 void xe_guc_submit_reset_wait(struct xe_guc *guc);
19 int xe_guc_submit_stop(struct xe_guc *guc);
20 int xe_guc_submit_start(struct xe_guc *guc);
22 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
23 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
24 int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
25 int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
27 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u3
[all...]
H A Dxe_guc_submit_types.h119 /** @guc: GuC Engine Snapshot */
121 /** @guc.wqi_head: work queue item head */
123 /** @guc.wqi_tail: work queue item tail */
125 /** @guc.id: GuC id for this exec_queue */
127 } guc; member in struct:xe_guc_submit_exec_queue_snapshot
H A Dxe_guc_pc.c78 struct xe_guc *guc = pc_to_guc(pc); local
79 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
87 return container_of(pc, struct xe_gt, uc.guc.pc);
H A Dxe_guc_hwconfig.c17 static int send_get_hwconfig(struct xe_guc *guc, u32 ggtt_addr, u32 size) argument
26 return xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
29 static int guc_hwconfig_size(struct xe_guc *guc, u32 *size) argument
31 int ret = send_get_hwconfig(guc, 0, 0);
40 static int guc_hwconfig_copy(struct xe_guc *guc) argument
42 int ret = send_get_hwconfig(guc, xe_bo_ggtt_addr(guc->hwconfig.bo),
43 guc->hwconfig.size);
51 int xe_guc_hwconfig_init(struct xe_guc *guc) argument
53 struct xe_device *xe = guc_to_xe(guc);
91 xe_guc_hwconfig_size(struct xe_guc *guc) argument
96 xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gt/uc/
H A Dintel_uc.c90 gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
123 intel_guc_init_early(&uc->guc);
137 intel_guc_init_late(&uc->guc);
154 intel_guc_init_send_regs(&uc->guc);
159 struct intel_guc *guc = &uc->guc; local
161 if (guc->log.vma && !uc->load_err_log)
162 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
183 * communication channel with guc is turned off at this point, we can save the
186 static void guc_clear_mmio_msg(struct intel_guc *guc) argument
191 guc_get_mmio_msg(struct intel_guc *guc) argument
210 guc_handle_mmio_msg(struct intel_guc *guc) argument
223 guc_enable_communication(struct intel_guc *guc) argument
255 guc_disable_communication(struct intel_guc *guc) argument
320 struct intel_guc *guc = &uc->guc; local
354 struct intel_guc *guc = &uc->guc; local
460 struct intel_guc *guc = &uc->guc; local
589 struct intel_guc *guc = &uc->guc; local
608 struct intel_guc *guc = &uc->guc; local
629 struct intel_guc *guc = &uc->guc; local
638 struct intel_guc *guc = &uc->guc; local
653 struct intel_guc *guc = &uc->guc; local
662 struct intel_guc *guc = &uc->guc; local
683 struct intel_guc *guc = &uc->guc; local
714 struct intel_guc *guc = &uc->guc; local
[all...]
H A Dintel_guc_submission.c108 * guc->submission_state.lock
122 * guc->submission_state.lock -> ce->guc_state.lock
161 #define NUMBER_MULTI_LRC_GUC_ID(guc) \
162 ((guc)->submission_state.num_guc_ids / 16)
401 return &ce->engine->gt->uc.guc;
451 GEM_BUG_ON(!ce->parallel.guc.parent_page);
453 return ce->parallel.guc.parent_page * PAGE_SIZE;
471 * parallel.guc.parent_page is the offset into ce->state while
504 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc
516 __get_context(struct intel_guc *guc, u32 id) argument
525 __get_lrc_desc_v69(struct intel_guc *guc, u32 index) argument
537 guc_lrc_desc_pool_create_v69(struct intel_guc *guc) argument
552 guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc) argument
561 guc_submission_initialized(struct intel_guc *guc) argument
566 _reset_lrc_desc_v69(struct intel_guc *guc, u32 id) argument
574 ctx_id_mapped(struct intel_guc *guc, u32 id) argument
579 set_ctx_id_mapping(struct intel_guc *guc, u32 id, struct intel_context *ce) argument
593 clr_ctx_id_mapping(struct intel_guc *guc, u32 id) argument
611 decr_outstanding_submission_g2h(struct intel_guc *guc) argument
617 guc_submission_send_busy_loop(struct intel_guc *guc, const u32 *action, u32 len, u32 g2h_len_dw, bool loop) argument
642 intel_guc_wait_for_pending_msg(struct intel_guc *guc, atomic_t *wait_var, bool interruptible, long timeout) argument
683 intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) argument
696 __guc_add_request(struct intel_guc *guc, struct i915_request *rq) argument
779 guc_add_request(struct intel_guc *guc, struct i915_request *rq) argument
893 guc_wq_item_append(struct intel_guc *guc, struct i915_request *rq) argument
927 guc_dequeue_one_context(struct intel_guc *guc) argument
1077 scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) argument
1182 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) argument
1249 struct intel_guc *guc = &engine->gt->uc.guc; local
1284 guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) argument
1314 struct intel_guc *guc = &gt->uc.guc; local
1371 guc_enable_busyness_worker(struct intel_guc *guc) argument
1376 guc_cancel_busyness_worker(struct intel_guc *guc) argument
1422 __reset_guc_busyness_stats(struct intel_guc *guc) argument
1441 __update_guc_busyness_stats(struct intel_guc *guc) argument
1462 struct intel_guc *guc = ce_to_guc(ce); local
1481 struct intel_guc *guc = container_of(wrk, typeof(*guc), local
1544 guc_action_enable_usage_stats(struct intel_guc *guc) argument
1556 guc_init_engine_stats(struct intel_guc *guc) argument
1573 guc_fini_engine_stats(struct intel_guc *guc) argument
1580 struct intel_guc *guc = &gt->uc.guc; local
1607 struct intel_guc *guc = &gt->uc.guc; local
1621 submission_disabled(struct intel_guc *guc) argument
1630 disable_submission(struct intel_guc *guc) argument
1641 enable_submission(struct intel_guc *guc) argument
1659 guc_flush_submissions(struct intel_guc *guc) argument
1668 intel_guc_submission_flush_work(struct intel_guc *guc) argument
1675 intel_guc_submission_reset_prepare(struct intel_guc *guc) argument
1859 wake_up_all_tlb_invalidate(struct intel_guc *guc) argument
1873 intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled) argument
1971 intel_guc_submission_cancel_requests(struct intel_guc *guc) argument
2006 intel_guc_submission_reset_finish(struct intel_guc *guc) argument
2038 intel_guc_tlb_invalidation_is_available(struct intel_guc *guc) argument
2044 init_tlb_lookup(struct intel_guc *guc) argument
2071 fini_tlb_lookup(struct intel_guc *guc) argument
2090 intel_guc_submission_init(struct intel_guc *guc) argument
2128 intel_guc_submission_fini(struct intel_guc *guc) argument
2153 guc_bypass_tasklet_submit(struct intel_guc *guc, struct i915_request *rq) argument
2179 need_tasklet(struct intel_guc *guc, struct i915_request *rq) argument
2192 struct intel_guc *guc = &rq->engine->gt->uc.guc; local
2206 new_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2233 __release_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2255 release_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2264 steal_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2301 assign_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2330 pin_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2380 unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) argument
2399 __guc_action_register_multi_lrc_v69(struct intel_guc *guc, struct intel_context *ce, u32 guc_id, u32 offset, bool loop) argument
2423 __guc_action_register_multi_lrc_v70(struct intel_guc *guc, struct intel_context *ce, struct guc_ctxt_registration_info *info, bool loop) argument
2466 __guc_action_register_context_v69(struct intel_guc *guc, u32 guc_id, u32 offset, bool loop) argument
2481 __guc_action_register_context_v70(struct intel_guc *guc, struct guc_ctxt_registration_info *info, bool loop) argument
2509 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop) argument
2525 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop) argument
2539 struct intel_guc *guc = ce_to_guc(ce); local
2564 __guc_action_deregister_context(struct intel_guc *guc, u32 guc_id) argument
2579 struct intel_guc *guc = ce_to_guc(ce); local
2646 __guc_context_set_context_policies(struct intel_guc *guc, struct context_policy *policy, bool loop) argument
2658 struct intel_guc *guc = &engine->gt->uc.guc; local
2734 struct intel_guc *guc = &engine->gt->uc.guc; local
2803 struct intel_guc *guc = &engine->gt->uc.guc; local
2866 struct intel_guc *guc = &engine->gt->uc.guc; local
2972 struct intel_guc *guc = ce_to_guc(ce); local
2987 __guc_context_sched_enable(struct intel_guc *guc, struct intel_context *ce) argument
3002 __guc_context_sched_disable(struct intel_guc *guc, struct intel_context *ce, u16 guc_id) argument
3059 struct intel_guc *guc = ce_to_guc(ce); local
3115 struct intel_guc *guc = ce_to_guc(ce); local
3169 __guc_context_set_preemption_timeout(struct intel_guc *guc, u16 guc_id, u32 preemption_timeout) argument
3194 struct intel_guc *guc = ce_to_guc(ce); local
3262 bypass_sched_disable(struct intel_guc *guc, struct intel_context *ce) argument
3281 struct intel_guc *guc = ce_to_guc(ce); local
3294 guc_id_pressure(struct intel_guc *guc, struct intel_context *ce) argument
3312 struct intel_guc *guc = ce_to_guc(ce); local
3347 struct intel_guc *guc = ce_to_guc(ce); local
3422 guc_flush_destroyed_contexts(struct intel_guc *guc) argument
3447 deregister_destroyed_contexts(struct intel_guc *guc) argument
3485 struct intel_guc *guc = container_of(w, struct intel_guc, local
3508 struct intel_guc *guc = ce_to_guc(ce); local
3547 __guc_context_set_prio(struct intel_guc *guc, struct intel_context *ce) argument
3567 guc_context_set_prio(struct intel_guc *guc, struct intel_context *ce, u8 prio) argument
3625 struct intel_guc *guc = &ce->engine->gt->uc.guc; local
3805 struct intel_guc *guc = ce_to_guc(ce); local
3940 struct intel_guc *guc = ce_to_guc(ce); local
4010 struct intel_guc *guc = ce_to_guc(ce); local
4036 struct intel_guc *guc = ce_to_guc(ce); local
4372 guc_kernel_context_pin(struct intel_guc *guc, struct intel_context *ce) argument
4401 guc_init_submission(struct intel_guc *guc) argument
4537 struct intel_guc *guc = sched_engine->private_data; local
4547 struct intel_guc *guc = &engine->gt->uc.guc; local
4630 __guc_action_set_scheduling_policies(struct intel_guc *guc, struct scheduling_policy *policy) argument
4653 guc_init_global_schedule_policy(struct intel_guc *guc) argument
4681 guc_route_semaphores(struct intel_guc *guc, bool to_guc) argument
4697 intel_guc_submission_enable(struct intel_guc *guc) argument
4726 intel_guc_submission_disable(struct intel_guc *guc) argument
4734 __guc_submission_supported(struct intel_guc *guc) argument
4741 __guc_submission_selected(struct intel_guc *guc) argument
4751 intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc) argument
4771 intel_guc_submission_init_early(struct intel_guc *guc) argument
4796 g2h_context_lookup(struct intel_guc *guc, u32 ctx_id) argument
4819 wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno) argument
4836 intel_guc_tlb_invalidation_done(struct intel_guc *guc, const u32 *payload, u32 len) argument
4878 guc_send_invalidate_tlb(struct intel_guc *guc, enum intel_guc_tlb_invalidation_type type) argument
4956 intel_guc_invalidate_tlb_engines(struct intel_guc *guc) argument
4962 intel_guc_invalidate_tlb_guc(struct intel_guc *guc) argument
4967 intel_guc_deregister_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) argument
5018 intel_guc_sched_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) argument
5096 capture_error_state(struct intel_guc *guc, struct intel_context *ce) argument
5143 guc_handle_context_reset(struct intel_guc *guc, struct intel_context *ce) argument
5162 intel_guc_context_reset_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) argument
5197 intel_guc_error_capture_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) argument
5217 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) argument
5230 struct intel_guc *guc = container_of(w, struct intel_guc, local
5261 intel_guc_engine_failure_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) argument
5306 struct intel_guc *guc = &engine->gt->uc.guc; local
5368 struct intel_guc *guc = &engine->gt->uc.guc; local
5407 intel_guc_submission_print_info(struct intel_guc *guc, struct drm_printer *p) argument
5474 intel_guc_submission_print_context_info(struct intel_guc *guc, struct drm_printer *p) argument
5812 struct intel_guc *guc; local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_gt.h102 static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) argument
104 return container_of(guc, struct intel_gt, uc.guc);
122 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) argument
124 return guc_to_gt(guc)->i915;
H A Dintel_engine_cs.c592 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
613 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))

Completed in 321 milliseconds

1234