Searched refs:job (Results 1 - 25 of 189) sorted by relevance

12345678

/linux-master/drivers/gpu/drm/xe/
H A Dxe_devcoredump.h13 void xe_devcoredump(struct xe_sched_job *job);
15 static inline void xe_devcoredump(struct xe_sched_job *job) argument
H A Dxe_sched_job.h25 * xe_sched_job_get - get reference to XE schedule job
26 * @job: XE schedule job object
28 * Increment XE schedule job's reference count
30 static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) argument
32 kref_get(&job->refcount);
33 return job;
37 * xe_sched_job_put - put reference to XE schedule job
38 * @job: XE schedule job objec
43 xe_sched_job_put(struct xe_sched_job *job) argument
49 xe_sched_job_is_error(struct xe_sched_job *job) argument
68 xe_sched_job_seqno(struct xe_sched_job *job) argument
74 xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags) argument
[all...]
H A Dxe_ring_ops_types.h18 /** @emit_job: Write job to ring */
19 void (*emit_job)(struct xe_sched_job *job);
H A Dxe_sched_job.c65 static void job_free(struct xe_sched_job *job) argument
67 struct xe_exec_queue *q = job->q;
70 kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
71 xe_sched_job_parallel_slab : xe_sched_job_slab, job);
74 static struct xe_device *job_to_xe(struct xe_sched_job *job) argument
76 return gt_to_xe(job->q->gt);
82 struct xe_sched_job *job; local
89 /* only a kernel context can submit a vm-less job */
99 job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
100 if (!job)
189 struct xe_sched_job *job = local
200 xe_sched_job_set_error(struct xe_sched_job *job, int error) argument
229 xe_sched_job_started(struct xe_sched_job *job) argument
238 xe_sched_job_completed(struct xe_sched_job *job) argument
251 xe_sched_job_arm(struct xe_sched_job *job) argument
266 xe_sched_job_push(struct xe_sched_job *job) argument
282 xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) argument
292 xe_sched_job_snapshot_capture(struct xe_sched_job *job) argument
[all...]
H A Dxe_ring_ops.c163 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i) argument
165 struct xe_gt *gt = job->q->gt;
181 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
211 static u32 get_ppgtt_flag(struct xe_sched_job *job) argument
213 return job->q->vm ? BIT(8) : 0;
217 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, argument
221 u32 ppgtt_flag = get_ppgtt_flag(job);
222 struct xe_gt *gt = job->q->gt;
224 if (job->ring_ops_flush_tlb) {
236 if (job
264 __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, u64 batch_addr, u32 seqno) argument
309 __emit_job_gen12_render_compute(struct xe_sched_job *job, struct xe_lrc *lrc, u64 batch_addr, u32 seqno) argument
358 emit_migration_job_gen12(struct xe_sched_job *job, struct xe_lrc *lrc, u32 seqno) argument
390 emit_job_gen12_gsc(struct xe_sched_job *job) argument
401 emit_job_gen12_copy(struct xe_sched_job *job) argument
417 emit_job_gen12_video(struct xe_sched_job *job) argument
428 emit_job_gen12_render_compute(struct xe_sched_job *job) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c37 struct amdgpu_job *job = to_amdgpu_job(s_job); local
47 /* Effectively the job is aborted as the device is gone */
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
62 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
65 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
82 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
99 unsigned int num_ibs, struct amdgpu_job **job)
104 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
105 if (!*job)
97 amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_sched_entity *entity, void *owner, unsigned int num_ibs, struct amdgpu_job **job) argument
125 amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, struct amdgpu_job **job) argument
147 amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa) argument
164 amdgpu_job_free_resources(struct amdgpu_job *job) argument
184 struct amdgpu_job *job = to_amdgpu_job(s_job); local
197 amdgpu_job_set_gang_leader(struct amdgpu_job *job, struct amdgpu_job *leader) argument
213 amdgpu_job_free(struct amdgpu_job *job) argument
229 amdgpu_job_submit(struct amdgpu_job *job) argument
241 amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence) argument
261 struct amdgpu_job *job = to_amdgpu_job(sched_job); local
293 struct amdgpu_job *job; local
[all...]
H A Damdgpu_job.h42 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
76 /* job_run_counter >= 1 means a resubmit job */
83 static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) argument
85 return to_amdgpu_ring(job->base.entity->rq->sched);
90 unsigned int num_ibs, struct amdgpu_job **job);
94 struct amdgpu_job **job);
95 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
97 void amdgpu_job_free_resources(struct amdgpu_job *job);
[all...]
H A Damdgpu_ib.c110 * @job: job to schedule
127 struct amdgpu_ib *ibs, struct amdgpu_job *job,
140 int vmid = AMDGPU_JOB_GET_VMID(job);
150 /* ring tests don't use a job */
151 if (job) {
152 vm = job->vm;
153 fence_ctx = job->base.s_fence ?
154 job->base.s_fence->scheduled.context : 0;
155 shadow_va = job
126 amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_ib *ibs, struct amdgpu_job *job, struct dma_fence **f) argument
[all...]
H A Damdgpu_ids.c169 struct amdgpu_job *job)
171 return id->gds_base != job->gds_base ||
172 id->gds_size != job->gds_size ||
173 id->gws_base != job->gws_base ||
174 id->gws_size != job->gws_size ||
175 id->oa_base != job->oa_base ||
176 id->oa_size != job->oa_size;
179 /* Check if the id is compatible with the job */
181 struct amdgpu_job *job)
183 return id->pd_gpu_addr == job
168 amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id, struct amdgpu_job *job) argument
180 amdgpu_vmid_compatible(struct amdgpu_vmid *id, struct amdgpu_job *job) argument
270 amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_vmid **id, struct dma_fence **fence) argument
331 amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_vmid **id, struct dma_fence **fence) argument
394 amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **fence) argument
[all...]
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_job.h34 /** @ref_count: Refcount for job. */
37 /** @type: Type of job. */
44 * @paired_job: Job paired to this job.
50 * fragment job to execute when the Parameter Manager runs out of memory.
52 * The geometry job should point to the fragment job it's paired with,
53 * and the fragment job should point to the geometry job it's paired with.
63 /** @done_fence: Fence to signal when the job is done. */
87 * @has_pm_ref: True if the job ha
104 pvr_job_get(struct pvr_job *job) argument
119 pvr_job_release_pm_ref(struct pvr_job *job) argument
136 pvr_job_get_pm_ref(struct pvr_job *job) argument
[all...]
H A Dpvr_job.c25 struct pvr_job *job = container_of(kref, struct pvr_job, ref_count); local
27 xa_erase(&job->pvr_dev->job_ids, job->id);
29 pvr_hwrt_data_put(job->hwrt);
30 pvr_context_put(job->ctx);
32 WARN_ON(job->paired_job);
34 pvr_queue_job_cleanup(job);
35 pvr_job_release_pm_ref(job);
37 kfree(job->cmd);
38 kfree(job);
46 pvr_job_put(struct pvr_job *job) argument
68 pvr_job_process_stream(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs, void *stream, u32 stream_size, struct pvr_job *job) argument
86 pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job, const struct pvr_stream_cmd_defs *stream_def, u64 stream_userptr, u32 stream_len) argument
149 pvr_geom_job_fw_cmd_init(struct pvr_job *job, struct drm_pvr_job *args) argument
178 pvr_frag_job_fw_cmd_init(struct pvr_job *job, struct drm_pvr_job *args) argument
222 pvr_compute_job_fw_cmd_init(struct pvr_job *job, struct drm_pvr_job *args) argument
258 pvr_transfer_job_fw_cmd_init(struct pvr_job *job, struct drm_pvr_job *args) argument
283 pvr_job_fw_cmd_init(struct pvr_job *job, struct drm_pvr_job *args) argument
310 struct pvr_job *job; member in struct:pvr_job_data
414 struct pvr_job *job = NULL; local
548 struct pvr_job *job = job_data[i].job; local
586 update_job_resvs(struct pvr_job *job) argument
637 struct pvr_job *job = job_data[i - 1].job; local
[all...]
H A Dpvr_queue.c230 * Call this function to allocate job CCCB and done fences. This only
294 * pvr_queue_job_fence_init() - Initializes a job done fence object.
299 * a job.
340 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) argument
343 * and a command for the job itself.
346 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len);
351 * @job: Job to operate on.
355 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) argument
361 xa_for_each(&job->base.dependencies, index, fence) {
376 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job
388 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) argument
441 pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) argument
461 pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) argument
498 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); local
601 pvr_queue_submit_job_to_cccb(struct pvr_job *job) argument
688 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); local
763 struct pvr_job *job; local
804 struct pvr_job *job; local
856 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); local
904 struct pvr_job *job, *tmp_job; local
936 struct pvr_job *job; local
1071 pvr_queue_job_init(struct pvr_job *job) argument
1116 pvr_queue_job_arm(struct pvr_job *job) argument
1129 pvr_queue_job_cleanup(struct pvr_job *job) argument
1149 pvr_queue_job_push(struct pvr_job *job) argument
[all...]
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_sched.c26 nouveau_job_init(struct nouveau_job *job, argument
32 INIT_LIST_HEAD(&job->entry);
34 job->file_priv = args->file_priv;
35 job->cli = nouveau_cli(args->file_priv);
36 job->sched = sched;
38 job->sync = args->sync;
39 job->resv_usage = args->resv_usage;
41 job->ops = args->ops;
43 job->in_sync.count = args->in_sync.count;
44 if (job
110 nouveau_job_fini(struct nouveau_job *job) argument
119 nouveau_job_done(struct nouveau_job *job) argument
131 nouveau_job_free(struct nouveau_job *job) argument
140 sync_find_fence(struct nouveau_job *job, struct drm_nouveau_sync *sync, struct dma_fence **fence) argument
165 nouveau_job_add_deps(struct nouveau_job *job) argument
190 nouveau_job_fence_attach_cleanup(struct nouveau_job *job) argument
207 nouveau_job_fence_attach_prepare(struct nouveau_job *job) argument
249 nouveau_job_fence_attach(struct nouveau_job *job) argument
274 nouveau_job_submit(struct nouveau_job *job) argument
347 nouveau_job_run(struct nouveau_job *job) argument
363 struct nouveau_job *job = to_nouveau_job(sched_job); local
372 struct nouveau_job *job = to_nouveau_job(sched_job); local
390 struct nouveau_job *job = to_nouveau_job(sched_job); local
[all...]
H A Dnouveau_exec.c65 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
67 * A VM_BIND job can be executed either synchronously or asynchronously. If
68 * exectued asynchronously, userspace may provide a list of syncobjs this job
70 * VM_BIND job finished execution. If executed synchronously the ioctl will
71 * block until the bind job is finished. For synchronous jobs the kernel will
80 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
87 nouveau_exec_job_submit(struct nouveau_job *job, argument
90 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
91 struct nouveau_cli *cli = job->cli;
118 nouveau_exec_job_armed_submit(struct nouveau_job *job, argument
127 nouveau_exec_job_run(struct nouveau_job *job) argument
164 nouveau_exec_job_free(struct nouveau_job *job) argument
177 nouveau_exec_job_timeout(struct nouveau_job *job) argument
203 struct nouveau_exec_job *job; local
268 struct nouveau_exec_job *job; local
[all...]
/linux-master/drivers/gpu/drm/panfrost/
H A Dpanfrost_dump.h10 void panfrost_core_dump(struct panfrost_job *job);
H A Dpanfrost_job.c106 int panfrost_job_get_slot(struct panfrost_job *job) argument
112 if (job->requirements & PANFROST_JD_REQ_FS)
117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
119 (job->pfdev->features.nr_core_groups == 2))
121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
146 panfrost_get_job_chain_flag(const struct panfrost_job *job) argument
148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
159 struct panfrost_job *job local
179 panfrost_enqueue_job(struct panfrost_device *pfdev, int slot, struct panfrost_job *job) argument
197 panfrost_job_hw_submit(struct panfrost_job *job, int js) argument
261 panfrost_acquire_object_fences(struct drm_gem_object **bos, int bo_count, struct drm_sched_job *job) argument
292 panfrost_job_push(struct panfrost_job *job) argument
332 struct panfrost_job *job = container_of(ref, struct panfrost_job, local
360 panfrost_job_put(struct panfrost_job *job) argument
367 struct panfrost_job *job = to_panfrost_job(sched_job); local
376 struct panfrost_job *job = to_panfrost_job(sched_job); local
426 panfrost_job_handle_err(struct panfrost_device *pfdev, struct panfrost_job *job, unsigned int js) argument
480 panfrost_job_handle_done(struct panfrost_device *pfdev, struct panfrost_job *job) argument
743 struct panfrost_job *job = to_panfrost_job(sched_job); local
945 struct panfrost_job *job = pfdev->jobs[i][j]; local
[all...]
/linux-master/drivers/gpu/host1x/hw/
H A Dchannel_hw.c17 #include "../job.h"
50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, argument
53 struct host1x_cdma *cdma = &job->channel->cdma;
63 if (job->memory_context)
64 stream_id = job->memory_context->stream_id;
66 stream_id = job->engine_fallback_streamid;
79 host1x_cdma_push_wide(&job->channel->cdma,
80 host1x_opcode_setclass(job->class, 0, 0),
82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
113 static void submit_gathers(struct host1x_job *job, u3 argument
163 synchronize_syncpt_base(struct host1x_job *job) argument
212 channel_program_cdma(struct host1x_job *job) argument
281 struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb); local
287 channel_submit(struct host1x_job *job) argument
[all...]
/linux-master/drivers/gpu/host1x/
H A Djob.c21 #include "job.h"
30 struct host1x_job *job = NULL; local
51 mem = job = kzalloc(total, GFP_KERNEL);
52 if (!job)
55 job->enable_firewall = enable_firewall;
57 kref_init(&job->ref);
58 job->channel = ch;
62 job->relocs = num_relocs ? mem : NULL;
64 job->unpins = num_unpins ? mem : NULL;
66 job
77 host1x_job_get(struct host1x_job *job) argument
86 struct host1x_job *job = container_of(ref, struct host1x_job, ref); local
107 host1x_job_put(struct host1x_job *job) argument
113 host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset) argument
126 host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh, bool relative, u32 next_class) argument
141 pin_job(struct host1x *host, struct host1x_job *job) argument
279 do_relocs(struct host1x_job *job, struct host1x_job_gather *g) argument
339 struct host1x_job *job; member in struct:host1x_firewall
524 copy_gathers(struct device *host, struct host1x_job *job, struct device *dev) argument
598 host1x_job_pin(struct host1x_job *job, struct device *dev) argument
653 host1x_job_unpin(struct host1x_job *job) argument
682 host1x_job_dump(struct device *dev, struct host1x_job *job) argument
[all...]
/linux-master/block/
H A Dbsg-lib.c31 struct bsg_job *job; local
49 job = blk_mq_rq_to_pdu(rq);
50 reply = job->reply;
51 memset(job, 0, sizeof(*job));
52 job->reply = reply;
53 job->reply_len = SCSI_SENSE_BUFFERSIZE;
54 job->dd_data = job + 1;
56 job
158 struct bsg_job *job = container_of(kref, struct bsg_job, kref); local
169 bsg_job_put(struct bsg_job *job) argument
175 bsg_job_get(struct bsg_job *job) argument
189 bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) argument
207 struct bsg_job *job = blk_mq_rq_to_pdu(rq); local
234 struct bsg_job *job = blk_mq_rq_to_pdu(req); local
304 struct bsg_job *job = blk_mq_rq_to_pdu(req); local
315 struct bsg_job *job = blk_mq_rq_to_pdu(req); local
[all...]
/linux-master/drivers/md/
H A Ddm-kcopyd.c41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
353 * Error state of the job.
373 * Set this to ensure you are notified when the job has
380 * These fields are only used if the job has been split
414 * Functions to push and pop a job onto the head of a given job
420 struct kcopyd_job *job; local
426 list_for_each_entry(job, jobs, list) {
427 if (job->op == REQ_OP_READ ||
428 !(job
446 struct kcopyd_job *job = NULL; local
463 push(struct list_head *jobs, struct kcopyd_job *job) argument
474 push_head(struct list_head *jobs, struct kcopyd_job *job) argument
492 run_complete_job(struct kcopyd_job *job) argument
522 struct kcopyd_job *job = context; local
555 run_io_job(struct kcopyd_job *job) argument
588 run_pages_job(struct kcopyd_job *job) argument
614 struct kcopyd_job *job; local
679 dispatch_job(struct kcopyd_job *job) argument
700 struct kcopyd_job *job = sub_job->master_job; local
782 struct kcopyd_job *job; local
869 struct kcopyd_job *job; local
887 struct kcopyd_job *job = j; local
[all...]
/linux-master/drivers/gpu/drm/v3d/
H A Dv3d_submit.c21 v3d_lock_bo_reservations(struct v3d_job *job, argument
26 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
30 for (i = 0; i < job->bo_count; i++) {
31 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
35 ret = drm_sched_job_add_implicit_dependencies(&job->base,
36 job->bo[i], true);
44 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
49 * v3d_lookup_bos() - Sets up job
65 v3d_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct v3d_job *job, u64 bo_handles, u32 bo_count) argument
89 struct v3d_job *job = container_of(ref, struct v3d_job, refcount); local
110 struct v3d_render_job *job = container_of(ref, struct v3d_render_job, local
121 v3d_job_cleanup(struct v3d_job *job) argument
130 v3d_job_put(struct v3d_job *job) argument
158 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, struct v3d_job *job, void (*free)(struct kref *ref), u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) argument
212 v3d_push_job(struct v3d_job *job) argument
225 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx, u32 out_sync, struct v3d_submit_ext *se, struct dma_fence *done_fence) argument
267 v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv, struct v3d_dev *v3d, struct drm_v3d_submit_csd *args, struct v3d_csd_job **job, struct v3d_job **clean_job, struct v3d_submit_ext *se, struct ww_acquire_ctx *acquire_ctx) argument
407 v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) argument
449 v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) argument
506 v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) argument
555 v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) argument
619 v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) argument
687 v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) argument
768 v3d_get_extensions(struct drm_file *file_priv, u64 ext_handles, struct v3d_submit_ext *se, struct v3d_cpu_job *job) argument
1010 struct v3d_tfu_job *job = NULL; local
1110 struct v3d_csd_job *job = NULL; local
[all...]
H A Dv3d_sched.c10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
71 struct v3d_job *job = to_v3d_job(sched_job); local
73 v3d_job_cleanup(job);
79 struct v3d_cpu_job *job = to_cpu_job(sched_job); local
80 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
81 struct v3d_performance_query_info *performance_query = &job->performance_query;
95 v3d_job_cleanup(&job->base);
99 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) argument
101 if (job
110 struct v3d_bin_job *job = to_bin_job(sched_job); local
169 struct v3d_render_job *job = to_render_job(sched_job); local
218 struct v3d_tfu_job *job = to_tfu_job(sched_job); local
261 struct v3d_csd_job *job = to_csd_job(sched_job); local
298 v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) argument
333 v3d_timestamp_query(struct v3d_cpu_job *job) argument
353 v3d_reset_timestamp_queries(struct v3d_cpu_job *job) argument
387 v3d_copy_query_results(struct v3d_cpu_job *job) argument
428 v3d_reset_performance_queries(struct v3d_cpu_job *job) argument
456 v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query) argument
486 v3d_copy_performance_query(struct v3d_cpu_job *job) argument
531 struct v3d_cpu_job *job = to_cpu_job(sched_job); local
569 struct v3d_job *job = to_v3d_job(sched_job); local
632 struct v3d_job *job = to_v3d_job(sched_job); local
649 struct v3d_bin_job *job = to_bin_job(sched_job); local
658 struct v3d_render_job *job = to_render_job(sched_job); local
667 struct v3d_job *job = to_v3d_job(sched_job); local
675 struct v3d_csd_job *job = to_csd_job(sched_job); local
[all...]
/linux-master/drivers/accel/ivpu/
H A Divpu_job.c136 * Mark the doorbell as unregistered and reset job queue pointers.
138 * and FW loses job queue state. The next time job queue is used it
181 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) argument
183 struct ivpu_device *vdev = job->vdev;
189 /* Check if there is space left in job queue */
192 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
196 entry = &cmdq->jobq->job[tail];
197 entry->batch_buf_addr = job
252 ivpu_job_destroy(struct ivpu_job *job) argument
273 struct ivpu_job *job; local
300 struct ivpu_job *job; local
324 struct ivpu_job *job; local
331 ivpu_job_submit(struct ivpu_job *job) argument
402 ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, u32 buf_count, u32 commands_offset) argument
472 struct ivpu_job *job; local
[all...]
/linux-master/drivers/gpu/drm/
H A Ddrm_writeback.c312 int drm_writeback_prepare_job(struct drm_writeback_job *job) argument
314 struct drm_writeback_connector *connector = job->connector;
320 ret = funcs->prepare_writeback_job(connector, job);
325 job->prepared = true;
331 * drm_writeback_queue_job - Queue a writeback job for later signalling
332 * @wb_connector: The writeback connector to queue a job on
333 * @conn_state: The connector state containing the job to queue
335 * This function adds the job contained in @conn_state to the job_queue for a
336 * writeback connector. It takes ownership of the writeback job and sets the
337 * @conn_state->writeback_job to NULL, and so no access to the job ma
352 struct drm_writeback_job *job; local
364 drm_writeback_cleanup_job(struct drm_writeback_job *job) argument
392 struct drm_writeback_job *job = container_of(work, local
420 struct drm_writeback_job *job; local
[all...]
/linux-master/drivers/gpu/drm/ci/
H A Dlava-submit.sh16 mkdir -p results/job-rootfs-overlay/
18 cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
19 cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
20 cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
21 cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
26 artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
29 tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
30 ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" job
[all...]

Completed in 215 milliseconds

12345678