Lines Matching refs:job

37 	struct amdgpu_job *job = to_amdgpu_job(s_job);
47 /* Effectively the job is aborted as the device is gone */
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
62 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
65 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
82 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
99 unsigned int num_ibs, struct amdgpu_job **job)
104 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
105 if (!*job)
112 (*job)->base.sched = &adev->rings[0]->sched;
113 (*job)->vm = vm;
115 amdgpu_sync_create(&(*job)->explicit_sync);
116 (*job)->generation = amdgpu_vm_generation(adev, vm);
117 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
122 return drm_sched_job_init(&(*job)->base, entity, 1, owner);
128 struct amdgpu_job **job)
132 r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
136 (*job)->num_ibs = 1;
137 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
140 drm_sched_job_cleanup(&(*job)->base);
141 kfree(*job);
147 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
151 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
152 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
155 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
156 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
159 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
160 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
164 void amdgpu_job_free_resources(struct amdgpu_job *job)
166 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
171 if (job->base.s_fence && job->base.s_fence->finished.ops)
172 f = &job->base.s_fence->finished;
173 else if (job->hw_fence.ops)
174 f = &job->hw_fence;
178 for (i = 0; i < job->num_ibs; ++i)
179 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
184 struct amdgpu_job *job = to_amdgpu_job(s_job);
188 amdgpu_sync_free(&job->explicit_sync);
191 if (!job->hw_fence.ops)
192 kfree(job);
194 dma_fence_put(&job->hw_fence);
197 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
202 WARN_ON(job->gang_submit);
208 if (job != leader)
210 job->gang_submit = fence;
213 void amdgpu_job_free(struct amdgpu_job *job)
215 if (job->base.entity)
216 drm_sched_job_cleanup(&job->base);
218 amdgpu_job_free_resources(job);
219 amdgpu_sync_free(&job->explicit_sync);
220 if (job->gang_submit != &job->base.s_fence->scheduled)
221 dma_fence_put(job->gang_submit);
223 if (!job->hw_fence.ops)
224 kfree(job);
226 dma_fence_put(&job->hw_fence);
229 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
233 drm_sched_job_arm(&job->base);
234 f = dma_fence_get(&job->base.s_fence->finished);
235 amdgpu_job_free_resources(job);
236 drm_sched_entity_push_job(&job->base);
241 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
246 job->base.sched = &ring->sched;
247 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
252 amdgpu_job_free(job);
261 struct amdgpu_job *job = to_amdgpu_job(sched_job);
270 if (!fence && job->gang_submit)
271 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
273 while (!fence && job->vm && !job->vmid) {
274 r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
284 dma_fence_set_error(&job->base.s_fence->finished, r);
293 struct amdgpu_job *job;
296 job = to_amdgpu_job(sched_job);
297 finished = &job->base.s_fence->finished;
299 trace_amdgpu_sched_run_job(job);
301 /* Skip job if VRAM is lost and never resubmit gangs */
302 if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
303 (job->job_run_counter && job->gang_submit))
310 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
318 job->job_run_counter++;
319 amdgpu_job_free_resources(job);