Lines Matching defs:eb

321 static int eb_parse(struct i915_execbuffer *eb);
322 static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
323 static void eb_unpin_engine(struct i915_execbuffer *eb);
324 static void eb_capture_release(struct i915_execbuffer *eb);
326 static bool eb_use_cmdparser(const struct i915_execbuffer *eb)
328 return intel_engine_requires_cmd_parser(eb->context->engine) ||
329 (intel_engine_using_cmd_parser(eb->context->engine) &&
330 eb->args->batch_len);
333 static int eb_create(struct i915_execbuffer *eb)
335 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
336 unsigned int size = 1 + ilog2(eb->buffer_count);
362 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
364 if (eb->buckets)
371 eb->lut_size = size;
373 eb->lut_size = -eb->buffer_count;
439 eb_pin_vma(struct i915_execbuffer *eb,
457 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
466 err = i915_vma_pin_ww(vma, &eb->ww,
501 eb_validate_vma(struct i915_execbuffer *eb,
509 GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
512 if (unlikely(entry->flags & eb->invalid_flags))
541 if (!eb->reloc_cache.has_fence) {
545 eb->reloc_cache.needs_unfenced) &&
554 is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx)
556 return eb->args->flags & I915_EXEC_BATCH_FIRST ?
557 buffer_idx < eb->num_batches :
558 buffer_idx >= eb->args->buffer_count - eb->num_batches;
562 eb_add_vma(struct i915_execbuffer *eb,
567 struct drm_i915_private *i915 = eb->i915;
568 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
569 struct eb_vma *ev = &eb->vma[i];
575 if (eb->lut_size > 0) {
578 &eb->buckets[hash_32(entry->handle,
579 eb->lut_size)]);
583 list_add_tail(&ev->reloc_link, &eb->relocs);
594 if (is_batch_buffer(eb, i)) {
598 if (eb->reloc_cache.has_fence)
601 eb->batches[*current_batch] = ev;
610 eb->batch_start_offset,
611 eb->args->batch_len,
617 if (eb->args->batch_len == 0)
618 eb->batch_len[*current_batch] = ev->vma->size -
619 eb->batch_start_offset;
621 eb->batch_len[*current_batch] = eb->args->batch_len;
622 if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */
656 static int eb_reserve_vma(struct i915_execbuffer *eb,
671 err = i915_vma_pin_ww(vma, &eb->ww,
679 eb->args->flags |= __EXEC_HAS_RELOC;
697 static bool eb_unbind(struct i915_execbuffer *eb, bool force)
699 const unsigned int count = eb->buffer_count;
705 INIT_LIST_HEAD(&eb->unbound);
709 struct eb_vma *ev = &eb->vma[i];
721 list_add(&ev->bind_link, &eb->unbound);
724 list_add_tail(&ev->bind_link, &eb->unbound);
732 list_splice_tail(&last, &eb->unbound);
736 static int eb_reserve(struct i915_execbuffer *eb)
780 eb_unbind(eb, pass >= 2);
783 err = mutex_lock_interruptible(&eb->context->vm->mutex);
785 err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
786 mutex_unlock(&eb->context->vm->mutex);
794 err = mutex_lock_interruptible(&eb->context->vm->mutex);
798 err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
799 mutex_unlock(&eb->context->vm->mutex);
801 err = i915_gem_object_lock(busy_bo, &eb->ww);
811 list_for_each_entry(ev, &eb->unbound, bind_link) {
812 err = eb_reserve_vma(eb, ev, pin_flags);
824 static int eb_select_context(struct i915_execbuffer *eb)
828 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
832 eb->gem_context = ctx;
834 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
839 static int __eb_add_lut(struct i915_execbuffer *eb,
842 struct i915_gem_context *ctx = eb->gem_context;
867 if (idr_find(&eb->file->object_idr, handle) == obj) {
889 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
891 struct i915_address_space *vm = eb->context->vm;
899 vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
906 obj = i915_gem_object_lookup(eb->file, handle);
917 if (i915_gem_context_uses_protected_content(eb->gem_context) &&
919 err = intel_pxp_key_check(eb->i915->pxp, obj, true);
932 err = __eb_add_lut(eb, handle, vma);
942 static int eb_lookup_vmas(struct i915_execbuffer *eb)
947 INIT_LIST_HEAD(&eb->relocs);
949 for (i = 0; i < eb->buffer_count; i++) {
952 vma = eb_lookup_vma(eb, eb->exec[i].handle);
958 err = eb_validate_vma(eb, &eb->exec[i], vma);
964 err = eb_add_vma(eb, &current_batch, i, vma);
971 if (i + 1 < eb->buffer_count) {
978 eb->vma[i + 1].vma = NULL;
984 eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
985 eb->args->flags |= __EXEC_USERPTR_USED;
992 eb->vma[i].vma = NULL;
996 static int eb_lock_vmas(struct i915_execbuffer *eb)
1001 for (i = 0; i < eb->buffer_count; i++) {
1002 struct eb_vma *ev = &eb->vma[i];
1005 err = i915_gem_object_lock(vma->obj, &eb->ww);
1013 static int eb_validate_vmas(struct i915_execbuffer *eb)
1018 INIT_LIST_HEAD(&eb->unbound);
1020 err = eb_lock_vmas(eb);
1024 for (i = 0; i < eb->buffer_count; i++) {
1025 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
1026 struct eb_vma *ev = &eb->vma[i];
1029 err = eb_pin_vma(eb, entry, ev);
1036 eb->args->flags |= __EXEC_HAS_RELOC;
1041 list_add_tail(&ev->bind_link, &eb->unbound);
1050 err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
1055 eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
1058 if (!list_empty(&eb->unbound))
1059 return eb_reserve(eb);
1065 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
1067 if (eb->lut_size < 0) {
1068 if (handle >= -eb->lut_size)
1070 return &eb->vma[handle];
1075 head = &eb->buckets[hash_32(handle, eb->lut_size)];
1084 static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
1086 const unsigned int count = eb->buffer_count;
1090 struct eb_vma *ev = &eb->vma[i];
1102 eb_capture_release(eb);
1103 eb_unpin_engine(eb);
1106 static void eb_destroy(const struct i915_execbuffer *eb)
1108 if (eb->lut_size > 0)
1109 kfree(eb->buckets);
1193 static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
1269 struct i915_execbuffer *eb,
1273 struct reloc_cache *cache = &eb->reloc_cache;
1304 vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
1351 struct i915_execbuffer *eb,
1354 struct reloc_cache *cache = &eb->reloc_cache;
1362 vaddr = reloc_iomap(vma, eb, page);
1394 struct i915_execbuffer *eb,
1399 bool wide = eb->reloc_cache.use_64bit_reloc;
1403 vaddr = reloc_vaddr(vma, eb,
1411 eb->reloc_cache.vaddr);
1424 eb_relocate_entry(struct i915_execbuffer *eb,
1428 struct drm_i915_private *i915 = eb->i915;
1433 target = eb_get_vma(eb, reloc->target_handle);
1470 GRAPHICS_VER(eb->i915) == 6 &&
1474 reloc_cache_unmap(&eb->reloc_cache);
1480 reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
1496 ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1523 return relocate_entry(ev->vma, reloc, eb, target->vma);
1526 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1570 u64 offset = eb_relocate_entry(eb, ev, r);
1606 reloc_cache_reset(&eb->reloc_cache, eb);
1611 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
1620 u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
1629 reloc_cache_reset(&eb->reloc_cache, eb);
1660 static int eb_copy_relocations(const struct i915_execbuffer *eb)
1663 const unsigned int count = eb->buffer_count;
1668 const unsigned int nreloc = eb->exec[i].relocation_count;
1676 err = check_relocations(&eb->exec[i]);
1680 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1722 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1734 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1735 if (eb->exec[i].relocation_count)
1741 static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1743 const unsigned int count = eb->buffer_count;
1749 err = check_relocations(&eb->exec[i]);
1757 static int eb_reinit_userptr(struct i915_execbuffer *eb)
1759 const unsigned int count = eb->buffer_count;
1763 if (likely(!(eb->args->flags & __EXEC_USERPTR_USED)))
1767 struct eb_vma *ev = &eb->vma[i];
1782 static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
1795 eb_release_vmas(eb, false);
1796 i915_gem_ww_ctx_fini(&eb->ww);
1812 err = eb_prefault_relocations(eb);
1814 err = eb_copy_relocations(eb);
1822 err = eb_reinit_userptr(eb);
1824 i915_gem_ww_ctx_init(&eb->ww, true);
1830 err = eb_pin_engine(eb, false);
1834 err = eb_validate_vmas(eb);
1838 GEM_BUG_ON(!eb->batches[0]);
1840 list_for_each_entry(ev, &eb->relocs, reloc_link) {
1842 err = eb_relocate_vma(eb, ev);
1846 err = eb_relocate_vma_slow(eb, ev);
1862 err = eb_parse(eb);
1875 eb_release_vmas(eb, false);
1876 err = i915_gem_ww_ctx_backoff(&eb->ww);
1886 const unsigned int count = eb->buffer_count;
1891 &eb->exec[i];
1905 static int eb_relocate_parse(struct i915_execbuffer *eb)
1911 err = eb_pin_engine(eb, throttle);
1922 err = eb_validate_vmas(eb);
1929 if (eb->args->flags & __EXEC_HAS_RELOC) {
1932 list_for_each_entry(ev, &eb->relocs, reloc_link) {
1933 err = eb_relocate_vma(eb, ev);
1945 err = eb_parse(eb);
1949 eb_release_vmas(eb, false);
1950 err = i915_gem_ww_ctx_backoff(&eb->ww);
1958 err = eb_relocate_parse_slow(eb);
1967 eb->args->flags &= ~__EXEC_HAS_RELOC;
1988 eb_find_first_request_added(struct i915_execbuffer *eb)
1992 for_each_batch_add_order(eb, i)
1993 if (eb->requests[i])
1994 return eb->requests[i];
2004 static int eb_capture_stage(struct i915_execbuffer *eb)
2006 const unsigned int count = eb->buffer_count;
2010 struct eb_vma *ev = &eb->vma[i];
2017 if (i915_gem_context_is_recoverable(eb->gem_context) &&
2018 (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
2021 for_each_batch_create_order(eb, j) {
2028 capture->next = eb->capture_lists[j];
2030 eb->capture_lists[j] = capture;
2038 static void eb_capture_commit(struct i915_execbuffer *eb)
2042 for_each_batch_create_order(eb, j) {
2043 struct i915_request *rq = eb->requests[j];
2048 rq->capture_list = eb->capture_lists[j];
2049 eb->capture_lists[j] = NULL;
2057 static void eb_capture_release(struct i915_execbuffer *eb)
2061 for_each_batch_create_order(eb, j) {
2062 if (eb->capture_lists[j]) {
2063 i915_request_free_capture_list(eb->capture_lists[j]);
2064 eb->capture_lists[j] = NULL;
2069 static void eb_capture_list_clear(struct i915_execbuffer *eb)
2071 memset(eb->capture_lists, 0, sizeof(eb->capture_lists));
2076 static int eb_capture_stage(struct i915_execbuffer *eb)
2081 static void eb_capture_commit(struct i915_execbuffer *eb)
2085 static void eb_capture_release(struct i915_execbuffer *eb)
2089 static void eb_capture_list_clear(struct i915_execbuffer *eb)
2095 static int eb_move_to_gpu(struct i915_execbuffer *eb)
2097 const unsigned int count = eb->buffer_count;
2102 struct eb_vma *ev = &eb->vma[i];
2140 (eb_find_first_request_added(eb), obj,
2144 for_each_batch_add_order(eb, j) {
2147 if (!eb->requests[j])
2150 err = _i915_vma_move_to_active(vma, eb->requests[j],
2152 eb->composite_fence ?
2153 eb->composite_fence :
2154 &eb->requests[j]->fence,
2161 if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
2163 struct eb_vma *ev = &eb->vma[i];
2180 intel_gt_chipset_flush(eb->gt);
2181 eb_capture_commit(eb);
2186 for_each_batch_create_order(eb, j) {
2187 if (!eb->requests[j])
2190 i915_request_set_error_once(eb->requests[j], err);
2247 shadow_batch_pin(struct i915_execbuffer *eb,
2259 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags | PIN_VALIDATE);
2266 static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
2272 if (eb->batch_flags & I915_DISPATCH_SECURE)
2273 return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, PIN_VALIDATE);
2278 static int eb_parse(struct i915_execbuffer *eb)
2280 struct drm_i915_private *i915 = eb->i915;
2281 struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
2286 if (!eb_use_cmdparser(eb)) {
2287 batch = eb_dispatch_secure(eb, eb->batches[0]->vma);
2294 if (intel_context_is_parallel(eb->context))
2297 len = eb->batch_len[0];
2298 if (!CMDPARSER_USES_GGTT(eb->i915)) {
2303 if (!eb->context->vm->has_read_only) {
2311 if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */
2315 pool = intel_gt_get_buffer_pool(eb->gt, len,
2319 eb->batch_pool = pool;
2322 err = i915_gem_object_lock(pool->obj, &eb->ww);
2326 shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
2335 if (CMDPARSER_USES_GGTT(eb->i915)) {
2338 shadow = shadow_batch_pin(eb, pool->obj,
2339 &eb->gt->ggtt->vm,
2346 eb->batch_flags |= I915_DISPATCH_SECURE;
2349 batch = eb_dispatch_secure(eb, shadow);
2357 err = intel_engine_cmd_parser(eb->context->engine,
2358 eb->batches[0]->vma,
2359 eb->batch_start_offset,
2360 eb->batch_len[0],
2365 eb->batches[0] = &eb->vma[eb->buffer_count++];
2366 eb->batches[0]->vma = i915_vma_get(shadow);
2367 eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
2369 eb->trampoline = trampoline;
2370 eb->batch_start_offset = 0;
2374 if (intel_context_is_parallel(eb->context))
2377 eb->batches[0] = &eb->vma[eb->buffer_count++];
2378 eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
2379 eb->batches[0]->vma = i915_vma_get(batch);
2384 static int eb_request_submit(struct i915_execbuffer *eb,
2394 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2414 eb->batch_start_offset,
2416 eb->batch_flags);
2420 if (eb->trampoline) {
2422 GEM_BUG_ON(eb->batch_start_offset);
2424 i915_vma_offset(eb->trampoline) +
2433 static int eb_submit(struct i915_execbuffer *eb)
2438 err = eb_move_to_gpu(eb);
2440 for_each_batch_create_order(eb, i) {
2441 if (!eb->requests[i])
2444 trace_i915_request_queue(eb->requests[i], eb->batch_flags);
2446 err = eb_request_submit(eb, eb->requests[i],
2447 eb->batches[i]->vma,
2448 eb->batch_len[i]);
2480 static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
2514 static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
2534 rq = eb_throttle(eb, ce);
2538 bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
2565 static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
2567 struct intel_context *ce = eb->context, *child;
2571 GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
2581 err = intel_context_pin_ww(ce, &eb->ww);
2585 err = intel_context_pin_ww(child, &eb->ww);
2590 err = eb_pin_timeline(eb, child, throttle);
2595 err = eb_pin_timeline(eb, ce, throttle);
2599 eb->args->flags |= __EXEC_ENGINE_PINNED;
2616 static void eb_unpin_engine(struct i915_execbuffer *eb)
2618 struct intel_context *ce = eb->context, *child;
2620 if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
2623 eb->args->flags &= ~__EXEC_ENGINE_PINNED;
2641 eb_select_legacy_ring(struct i915_execbuffer *eb)
2643 struct drm_i915_private *i915 = eb->i915;
2644 struct drm_i915_gem_execbuffer2 *args = eb->args;
2660 bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file);
2685 eb_select_engine(struct i915_execbuffer *eb)
2691 if (i915_gem_context_user_engines(eb->gem_context))
2692 idx = eb->args->flags & I915_EXEC_RING_MASK;
2694 idx = eb_select_legacy_ring(eb);
2696 ce = i915_gem_context_get_engine(eb->gem_context, idx);
2701 if (eb->buffer_count < ce->parallel.number_children + 1) {
2705 if (eb->batch_start_offset || eb->args->batch_len) {
2710 eb->num_batches = ce->parallel.number_children + 1;
2714 eb->wakeref = intel_gt_pm_get(ce->engine->gt);
2742 eb->context = ce;
2743 eb->gt = ce->engine->gt;
2753 intel_gt_pm_put(ce->engine->gt, eb->wakeref);
2761 eb_put_engine(struct i915_execbuffer *eb)
2765 i915_vm_put(eb->context->vm);
2766 intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
2767 for_each_child(eb->context, child)
2769 intel_context_put(eb->context);
2784 add_timeline_fence_array(struct i915_execbuffer *eb,
2801 SIZE_MAX / sizeof(*f)) - eb->num_fences)
2812 f = krealloc(eb->fences,
2813 (eb->num_fences + nfences) * sizeof(*f),
2818 eb->fences = f;
2819 f += eb->num_fences;
2841 syncobj = drm_syncobj_find(eb->file, user_fence.handle);
2843 drm_dbg(&eb->i915->drm,
2852 drm_dbg(&eb->i915->drm,
2862 drm_dbg(&eb->i915->drm,
2890 drm_dbg(&eb->i915->drm,
2911 eb->num_fences++;
2917 static int add_fence_array(struct i915_execbuffer *eb)
2919 struct drm_i915_gem_execbuffer2 *args = eb->args;
2934 SIZE_MAX / sizeof(*f) - eb->num_fences))
2941 f = krealloc(eb->fences,
2942 (eb->num_fences + num_fences) * sizeof(*f),
2947 eb->fences = f;
2948 f += eb->num_fences;
2960 syncobj = drm_syncobj_find(eb->file, user_fence.handle);
2962 drm_dbg(&eb->i915->drm,
2970 drm_dbg(&eb->i915->drm,
2985 eb->num_fences++;
2998 await_fence_array(struct i915_execbuffer *eb,
3004 for (n = 0; n < eb->num_fences; n++) {
3005 if (!eb->fences[n].dma_fence)
3008 err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence);
3016 static void signal_fence_array(const struct i915_execbuffer *eb,
3021 for (n = 0; n < eb->num_fences; n++) {
3025 syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
3029 if (eb->fences[n].chain_fence) {
3031 eb->fences[n].chain_fence,
3033 eb->fences[n].value);
3038 eb->fences[n].chain_fence = NULL;
3048 struct i915_execbuffer *eb = data;
3054 return add_timeline_fence_array(eb, &timeline_fences);
3066 static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
3081 if (likely(!intel_context_is_closed(eb->context))) {
3082 attr = eb->gem_context->sched;
3090 if (intel_context_is_parallel(eb->context)) {
3112 static int eb_requests_add(struct i915_execbuffer *eb, int err)
3120 for_each_batch_add_order(eb, i) {
3121 struct i915_request *rq = eb->requests[i];
3125 err |= eb_request_add(eb, rq, err, i == 0);
3137 struct i915_execbuffer *eb)
3145 if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
3154 eb);
3157 static void eb_requests_get(struct i915_execbuffer *eb)
3161 for_each_batch_create_order(eb, i) {
3162 if (!eb->requests[i])
3165 i915_request_get(eb->requests[i]);
3169 static void eb_requests_put(struct i915_execbuffer *eb)
3173 for_each_batch_create_order(eb, i) {
3174 if (!eb->requests[i])
3177 i915_request_put(eb->requests[i]);
3182 eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
3189 GEM_BUG_ON(!intel_context_is_parent(eb->context));
3191 fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL);
3195 for_each_batch_create_order(eb, i) {
3196 fences[i] = &eb->requests[i]->fence;
3198 &eb->requests[i]->fence.flags);
3201 fence_array = dma_fence_array_create(eb->num_batches,
3203 eb->context->parallel.fence_context,
3204 eb->context->parallel.seqno++,
3212 for_each_batch_create_order(eb, i)
3223 eb->composite_fence = &fence_array->base;
3229 eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
3235 if (unlikely(eb->gem_context->syncobj)) {
3238 fence = drm_syncobj_fence_get(eb->gem_context->syncobj);
3246 if (eb->args->flags & I915_EXEC_FENCE_SUBMIT)
3254 if (eb->fences) {
3255 err = await_fence_array(eb, rq);
3260 if (intel_context_is_parallel(eb->context)) {
3261 out_fence = eb_composite_fence_create(eb, out_fence_fd);
3274 eb_find_context(struct i915_execbuffer *eb, unsigned int context_number)
3279 return eb->context;
3281 for_each_child(eb->context, child)
3291 eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
3297 for_each_batch_create_order(eb, i) {
3299 eb->requests[i] = i915_request_create(eb_find_context(eb, i));
3300 if (IS_ERR(eb->requests[i])) {
3301 out_fence = ERR_CAST(eb->requests[i]);
3302 eb->requests[i] = NULL;
3311 if (i + 1 == eb->num_batches) {
3312 out_fence = eb_fences_add(eb, eb->requests[i],
3323 if (eb->batches[i]->vma)
3324 eb->requests[i]->batch_res =
3325 i915_vma_resource_get(eb->batches[i]->vma->resource);
3326 if (eb->batch_pool) {
3327 GEM_BUG_ON(intel_context_is_parallel(eb->context));
3328 intel_gt_buffer_pool_mark_active(eb->batch_pool,
3329 eb->requests[i]);
3343 struct i915_execbuffer eb;
3353 eb.i915 = i915;
3354 eb.file = file;
3355 eb.args = args;
3359 eb.exec = exec;
3360 eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
3361 eb.vma[0].vma = NULL;
3362 eb.batch_pool = NULL;
3364 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
3365 reloc_cache_init(&eb.reloc_cache, eb.i915);
3367 eb.buffer_count = args->buffer_count;
3368 eb.batch_start_offset = args->batch_start_offset;
3369 eb.trampoline = NULL;
3371 eb.fences = NULL;
3372 eb.num_fences = 0;
3374 eb_capture_list_clear(&eb);
3376 memset(eb.requests, 0, sizeof(struct i915_request *) *
3377 ARRAY_SIZE(eb.requests));
3378 eb.composite_fence = NULL;
3380 eb.batch_flags = 0;
3392 eb.batch_flags |= I915_DISPATCH_SECURE;
3395 eb.batch_flags |= I915_DISPATCH_PINNED;
3397 err = parse_execbuf2_extensions(args, &eb);
3401 err = add_fence_array(&eb);
3426 err = eb_create(&eb);
3430 GEM_BUG_ON(!eb.lut_size);
3432 err = eb_select_context(&eb);
3436 err = eb_select_engine(&eb);
3440 err = eb_lookup_vmas(&eb);
3442 eb_release_vmas(&eb, true);
3446 i915_gem_ww_ctx_init(&eb.ww, true);
3448 err = eb_relocate_parse(&eb);
3461 ww_acquire_done(&eb.ww.ctx);
3462 err = eb_capture_stage(&eb);
3466 out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
3470 if (eb.requests[0])
3476 err = eb_submit(&eb);
3479 eb_requests_get(&eb);
3480 err = eb_requests_add(&eb, err);
3482 if (eb.fences)
3483 signal_fence_array(&eb, eb.composite_fence ?
3484 eb.composite_fence :
3485 &eb.requests[0]->fence);
3487 if (unlikely(eb.gem_context->syncobj)) {
3488 drm_syncobj_replace_fence(eb.gem_context->syncobj,
3489 eb.composite_fence ?
3490 eb.composite_fence :
3491 &eb.requests[0]->fence);
3505 if (!out_fence && eb.composite_fence)
3506 dma_fence_put(eb.composite_fence);
3508 eb_requests_put(&eb);
3511 eb_release_vmas(&eb, true);
3513 i915_gem_ww_ctx_fini(&eb.ww);
3515 if (eb.batch_pool)
3516 intel_gt_buffer_pool_put(eb.batch_pool);
3518 eb_put_engine(&eb);
3520 i915_gem_context_put(eb.gem_context);
3522 eb_destroy(&eb);
3529 put_fence_array(eb.fences, eb.num_fences);