Searched refs:timeline (Results 1 - 25 of 35) sorted by relevance

12

/openbsd-current/sys/dev/pci/drm/i915/gt/selftests/
H A Dmock_timeline.c11 void mock_timeline_init(struct intel_timeline *timeline, u64 context) argument
13 timeline->gt = NULL;
14 timeline->fence_context = context;
16 rw_init(&timeline->mutex, "mktmln");
18 INIT_ACTIVE_FENCE(&timeline->last_request);
19 INIT_LIST_HEAD(&timeline->requests);
21 i915_syncmap_init(&timeline->sync);
23 INIT_LIST_HEAD(&timeline->link);
26 void mock_timeline_fini(struct intel_timeline *timeline) argument
28 i915_syncmap_free(&timeline
[all...]
H A Dmock_timeline.h14 void mock_timeline_init(struct intel_timeline *timeline, u64 context);
15 void mock_timeline_fini(struct intel_timeline *timeline);
/openbsd-current/sys/dev/pci/drm/i915/gt/
H A Dintel_timeline.c58 intel_timeline_pin_map(struct intel_timeline *timeline) argument
60 struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
61 u32 ofs = offset_in_page(timeline->hwsp_offset);
68 timeline->hwsp_map = vaddr;
69 timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
75 static int intel_timeline_init(struct intel_timeline *timeline, argument
80 kref_init(&timeline->kref);
81 atomic_set(&timeline->pin_count, 0);
83 timeline->gt = gt;
86 timeline
125 struct intel_timeline *timeline = local
150 struct intel_timeline *timeline; local
393 struct intel_timeline *timeline = local
[all...]
H A Dintel_timeline.h33 intel_timeline_get(struct intel_timeline *timeline) argument
35 kref_get(&timeline->kref);
36 return timeline;
40 static inline void intel_timeline_put(struct intel_timeline *timeline) argument
42 kref_put(&timeline->kref, __intel_timeline_free);
H A Dintel_engine_pm.c84 ce->timeline->seqno,
85 READ_ONCE(*ce->timeline->hwsp_seqno),
87 GEM_BUG_ON(ce->timeline->seqno !=
88 READ_ONCE(*ce->timeline->hwsp_seqno));
129 * engine->wakeref.counter or our timeline->active_count.
144 /* Let new submissions commence (and maybe retire this timeline) */
174 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
181 * Note, we do this without taking the timeline->mutex. We cannot
183 * already underneath the timeline->mutex. Instead we rely on the
187 * the context, as they assume protection by the timeline
[all...]
H A Dintel_engine_heartbeat.c212 if (!mutex_trylock(&ce->timeline->mutex)) {
213 /* Unable to lock the kernel timeline, is the engine stuck? */
229 mutex_unlock(&ce->timeline->mutex);
280 lockdep_assert_held(&ce->timeline->mutex);
332 err = mutex_lock_interruptible(&ce->timeline->mutex);
347 mutex_unlock(&ce->timeline->mutex);
366 if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
368 mutex_unlock(&ce->timeline->mutex);
389 if (mutex_lock_interruptible(&ce->timeline->mutex)) {
404 mutex_unlock(&ce->timeline
[all...]
H A Dintel_context.c181 err = intel_timeline_pin(ce->timeline, ww);
196 intel_timeline_unpin(ce->timeline);
207 intel_timeline_unpin(ce->timeline);
225 * We always pin the context/ring/timeline here, to ensure a pin
230 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
299 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
365 __intel_timeline_pin(ce->timeline);
434 if (ce->timeline)
435 intel_timeline_put(ce->timeline);
474 intel_timeline_enter(ce->timeline);
[all...]
H A Dintel_context.h25 ce__->timeline->fence_context, \
210 lockdep_assert_held(&ce->timeline->mutex);
220 lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
227 lockdep_assert_held(&ce->timeline->mutex);
249 __acquires(&ce->timeline->mutex)
251 struct intel_timeline *tl = ce->timeline;
H A Dmock_engine.c156 mock_timeline_unpin(ce->timeline);
171 ce->timeline = intel_timeline_create(ce->engine->gt);
172 if (IS_ERR(ce->timeline)) {
174 return PTR_ERR(ce->timeline);
177 err = mock_timeline_pin(ce->timeline);
179 intel_timeline_put(ce->timeline);
180 ce->timeline = NULL;
267 * Virtual engines complicate acquiring the engine timeline lock,
417 engine->status_page.vma = ce->timeline->hwsp_ggtt;
H A Dintel_ring_submission.c581 ce->timeline = intel_timeline_get(engine->legacy.timeline);
1075 intel_timeline_unpin(engine->legacy.timeline);
1076 intel_timeline_put(engine->legacy.timeline);
1148 * Using a global execution timeline; the previous final breadcrumb is
1321 struct intel_timeline *timeline; local
1346 timeline = intel_timeline_create_from_engine(engine,
1348 if (IS_ERR(timeline)) {
1349 err = PTR_ERR(timeline);
1352 GEM_BUG_ON(timeline
[all...]
H A Dselftest_timeline.c520 pr_err("Failed to write to timeline!\n");
585 GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
657 GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
751 pr_err("Wait for timeline writes timed out!\n");
758 pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
838 /* keep the same cache settings as timeline */
863 lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
864 mutex_unlock(&from->context->timeline->mutex);
868 mutex_lock(&to->context->timeline->mutex);
869 to->cookie = lockdep_pin_lock(&to->context->timeline
[all...]
H A Dintel_context_types.h114 struct intel_timeline *timeline; member in struct:intel_context
160 unsigned int active_count; /* protected by timeline->mutex */
H A Dintel_breadcrumbs.c231 if (intel_timeline_is_last(ce->timeline, rq))
232 add_retire(b, ce->timeline);
373 * hasn't event started). We could walk the timeline->requests,
H A Dintel_engine_cs.c1365 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
1366 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
1376 mutex_lock(&ce->timeline->mutex);
1382 mutex_unlock(&ce->timeline->mutex);
1406 ce->timeline = page_pack_bits(NULL, hwsp);
1427 lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
1437 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
1440 list_del(&ce->timeline->engine_link);
2008 * unknown queue. Play safe and make sure the timeline remain
[all...]
H A Dselftest_rc6.c148 *cs++ = ce->timeline->hwsp_offset + 8;
H A Dintel_gt_requests.c31 return !list_empty(&engine->kernel_context->timeline->requests);
74 * If the timeline is currently locked, either it is being
96 * so that we know when the timeline is already on a
H A Dintel_execlists_submission.c177 * (each request in the timeline waits for the completion fence of
451 * requests so that inter-timeline dependencies (i.e other timelines)
462 head = __active_request(ce->timeline, rq, -EIO)->head;
614 if (intel_timeline_is_last(ce->timeline, rq) &&
616 intel_engine_add_retire(engine, ce->timeline);
819 ce->timeline->fence_context,
828 ccid, ce->timeline->fence_context,
847 ce->timeline->fence_context,
862 ce->timeline->fence_context,
878 ce->timeline
[all...]
/openbsd-current/sys/dev/pci/drm/i915/gem/
H A Di915_gem_throttle.c66 if (!ce->timeline)
69 mutex_lock(&ce->timeline->mutex);
71 &ce->timeline->requests,
83 mutex_unlock(&ce->timeline->mutex);
H A Di915_gem_execbuffer.c1514 * timeline). To be completely sure, and since we are required to
1974 * to parent. This is done for locking reasons as the timeline lock is acquired
2489 struct intel_timeline *tl = ce->timeline;
2531 * until the timeline is idle, which in turn releases the wakeref
2560 mutex_lock(&ce->timeline->mutex);
2562 mutex_unlock(&ce->timeline->mutex);
2615 mutex_lock(&child->timeline->mutex);
2617 mutex_unlock(&child->timeline->mutex);
2636 mutex_lock(&child->timeline->mutex);
2638 mutex_unlock(&child->timeline
[all...]
/openbsd-current/sys/dev/pci/drm/i915/
H A Di915_active.c30 u64 timeline; member in struct:active_node
156 /* Make the cached node available for reuse with any timeline */
157 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
169 /* Finally free the discarded timeline tree */
240 GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
243 * We track the most recently used timeline to skip a rbtree search
247 * current timeline.
251 u64 cached = READ_ONCE(it->timeline);
258 * An unclaimed cache [.timeline=0] can only be claimed once.
262 * idx. If, and only if, the timeline i
[all...]
H A Di915_request.h214 struct intel_timeline __rcu *timeline; member in struct:i915_request
228 * We pin the timeline->mutex while constructing the request to
230 * The timeline->mutex must be held to ensure that only this caller
231 * can use the ring and manipulate the associated timeline during
275 * the HW status page (or our timeline's local equivalent). The full
276 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
320 /* timeline->request entry for this request */
504 * the local timeline's equivalent) for this request. The request itself
528 * If the timeline is not using initial breadcrumbs, a request is
529 * considered started if the previous request on its timeline (
[all...]
H A Di915_request.c71 * The timeline struct (as part of the ppgtt underneath a context)
792 * to the engine timeline (__i915_request_submit()). The waiters
967 struct intel_timeline *tl = ce->timeline;
1036 RCU_INIT_POINTER(rq->timeline, tl);
1146 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1170 /* Is signal the earliest request on its timeline? */
1171 if (pos == &rcu_dereference(signal->timeline)->requests)
1175 * Peek at the request before us in the timeline. That
1178 * still part of the signaler's timeline.
1253 * the timeline HWS
1717 __i915_request_ensure_parallel_ordering(struct i915_request *rq, struct intel_timeline *timeline) argument
1752 __i915_request_ensure_ordering(struct i915_request *rq, struct intel_timeline *timeline) argument
1803 struct intel_timeline *timeline = i915_request_timeline(rq); local
[all...]
H A Di915_scheduler.c147 * Virtual engines complicate acquiring the engine timeline lock,
257 /* Recheck after acquiring the engine->timeline.lock */
399 * However, retirement is run independently on each timeline and
441 /* Dependencies along the same timeline are expected. */
442 if (signaler->timeline == rq->timeline)
/openbsd-current/sys/dev/pci/drm/amd/amdgpu/
H A Damdgpu_trace.h171 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
181 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
187 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
188 __entry->sched_job_id, __get_str(timeline), __entry->context,
197 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
206 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
212 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
213 __entry->sched_job_id, __get_str(timeline), __entry->context,
/openbsd-current/sys/dev/pci/drm/i915/selftests/
H A Di915_active.c300 "\ttimeline: %llx\n", it->timeline);

Completed in 181 milliseconds

12