Lines Matching refs:id

156  * @id: VMID structure
161 struct amdgpu_vmid *id)
163 return id->current_gpu_reset_count !=
168 static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
171 return id->gds_base != job->gds_base ||
172 id->gds_size != job->gds_size ||
173 id->gws_base != job->gws_base ||
174 id->gws_size != job->gws_size ||
175 id->oa_base != job->oa_base ||
176 id->oa_size != job->oa_size;
179 /* Check if the id is compatible with the job */
180 static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
183 return id->pd_gpu_addr == job->vm_pd_addr &&
184 !amdgpu_vmid_gds_switch_needed(id, job);
192 * @fence: fence to wait for if no id could be grabbed
262 * @vm: vm to allocate id for
265 * @id: resulting VMID
266 * @fence: fence to wait for if no id could be grabbed
273 struct amdgpu_vmid **id,
284 *id = id_mgr->reserved;
285 if ((*id)->owner != vm->immediate.fence_context ||
286 !amdgpu_vmid_compatible(*id, job) ||
287 (*id)->flushed_updates < updates ||
288 !(*id)->last_flush ||
289 ((*id)->last_flush->context != fence_context &&
290 !dma_fence_is_signaled((*id)->last_flush))) {
298 (*id)->pd_gpu_addr = 0;
299 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
301 *id = NULL;
311 r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
323 * @vm: vm to allocate id for
326 * @id: resulting VMID
327 * @fence: fence to wait for if no id could be grabbed
334 struct amdgpu_vmid **id,
347 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
351 if ((*id)->owner != vm->immediate.fence_context)
354 if (!amdgpu_vmid_compatible(*id, job))
357 if (!(*id)->last_flush ||
358 ((*id)->last_flush->context != fence_context &&
359 !dma_fence_is_signaled((*id)->last_flush)))
362 if ((*id)->flushed_updates < updates)
371 r = amdgpu_sync_fence(&(*id)->active,
380 *id = NULL;
387 * @vm: vm to allocate id for
390 * @fence: fence to wait for if no id could be grabbed
392 * Allocate an id for the vm, adding fences to the sync obj as necessary.
401 struct amdgpu_vmid *id = NULL;
410 r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
411 if (r || !id)
414 r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
418 if (!id) {
420 id = idle;
423 r = amdgpu_sync_fence(&id->active,
431 list_move_tail(&id->list, &id_mgr->ids_lru);
434 job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
436 id->flushed_updates = amdgpu_vm_tlb_seq(vm);
437 dma_fence_put(id->last_flush);
438 id->last_flush = NULL;
440 job->vmid = id - id_mgr->ids;
443 id->gds_base = job->gds_base;
444 id->gds_size = job->gds_size;
445 id->gws_base = job->gws_base;
446 id->gws_size = job->gws_size;
447 id->oa_base = job->oa_base;
448 id->oa_size = job->oa_size;
449 id->pd_gpu_addr = job->vm_pd_addr;
450 id->owner = vm->immediate.fence_context;
468 struct amdgpu_vmid *id;
470 id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
473 list_del_init(&id->list);
474 id_mgr->reserved = id;
509 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
512 id->owner = 0;
513 id->gds_base = 0;
514 id->gds_size = 0;
515 id->gws_base = 0;
516 id->gws_size = 0;
517 id->oa_base = 0;
518 id->oa_size = 0;
594 struct amdgpu_vmid *id = &id_mgr->ids[j];
596 amdgpu_sync_free(&id->active);
597 dma_fence_put(id->last_flush);
598 dma_fence_put(id->pasid_mapping);