Lines Matching refs:tl

40 	struct intel_timeline *tl =
41 container_of(active, typeof(*tl), active);
43 i915_vma_unpin(tl->hwsp_ggtt);
44 intel_timeline_put(tl);
49 struct intel_timeline *tl =
50 container_of(active, typeof(*tl), active);
52 __i915_vma_pin(tl->hwsp_ggtt);
53 intel_timeline_get(tl);
171 struct intel_timeline *tl;
173 tl = __intel_timeline_create(engine->gt, hwsp, offset);
174 if (IS_ERR(tl))
175 return tl;
179 list_add_tail(&tl->engine_link, &engine->status_page.timelines);
182 return tl;
185 void __intel_timeline_pin(struct intel_timeline *tl)
187 GEM_BUG_ON(!atomic_read(&tl->pin_count));
188 atomic_inc(&tl->pin_count);
191 int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
195 if (atomic_add_unless(&tl->pin_count, 1, 0))
198 if (!tl->hwsp_map) {
199 err = intel_timeline_pin_map(tl);
204 err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
208 tl->hwsp_offset =
209 i915_ggtt_offset(tl->hwsp_ggtt) +
210 offset_in_page(tl->hwsp_offset);
211 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
212 tl->fence_context, tl->hwsp_offset);
214 i915_active_acquire(&tl->active);
215 if (atomic_fetch_inc(&tl->pin_count)) {
216 i915_active_release(&tl->active);
217 __i915_vma_unpin(tl->hwsp_ggtt);
223 void intel_timeline_reset_seqno(const struct intel_timeline *tl)
225 u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
227 GEM_BUG_ON(!atomic_read(&tl->pin_count));
230 WRITE_ONCE(*hwsp_seqno, tl->seqno);
234 void intel_timeline_enter(struct intel_timeline *tl)
236 struct intel_gt_timelines *timelines = &tl->gt->timelines;
247 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
251 * barrier, and there we use the tl->active_count as a means to
254 * use atomic to manipulate tl->active_count.
256 lockdep_assert_held(&tl->mutex);
258 if (atomic_add_unless(&tl->active_count, 1, 0))
262 if (!atomic_fetch_inc(&tl->active_count)) {
269 intel_timeline_reset_seqno(tl);
270 list_add_tail(&tl->link, &timelines->active_list);
275 void intel_timeline_exit(struct intel_timeline *tl)
277 struct intel_gt_timelines *timelines = &tl->gt->timelines;
280 lockdep_assert_held(&tl->mutex);
282 GEM_BUG_ON(!atomic_read(&tl->active_count));
283 if (atomic_add_unless(&tl->active_count, -1, 1))
287 if (atomic_dec_and_test(&tl->active_count))
288 list_del(&tl->link);
296 i915_syncmap_free(&tl->sync);
299 static u32 timeline_advance(struct intel_timeline *tl)
301 GEM_BUG_ON(!atomic_read(&tl->pin_count));
302 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
304 return tl->seqno += 1 + tl->has_initial_breadcrumb;
308 __intel_timeline_get_seqno(struct intel_timeline *tl,
311 u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
317 tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
318 tl->hwsp_seqno = tl->hwsp_map + next_ofs;
319 intel_timeline_reset_seqno(tl);
321 *seqno = timeline_advance(tl);
322 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
326 int intel_timeline_get_seqno(struct intel_timeline *tl,
330 *seqno = timeline_advance(tl);
333 if (unlikely(!*seqno && tl->has_initial_breadcrumb))
334 return __intel_timeline_get_seqno(tl, seqno);
343 struct intel_timeline *tl;
347 tl = rcu_dereference(from->timeline);
349 !i915_active_acquire_if_busy(&tl->active))
350 tl = NULL;
352 if (tl) {
354 *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
359 if (tl && __i915_request_is_complete(from)) {
360 i915_active_release(&tl->active);
361 tl = NULL;
365 if (!tl)
369 if (!tl->has_initial_breadcrumb) {
374 err = i915_active_add_request(&tl->active, to);
377 i915_active_release(&tl->active);
381 void intel_timeline_unpin(struct intel_timeline *tl)
383 GEM_BUG_ON(!atomic_read(&tl->pin_count));
384 if (!atomic_dec_and_test(&tl->pin_count))
387 i915_active_release(&tl->active);
388 __i915_vma_unpin(tl->hwsp_ggtt);
418 struct intel_timeline *tl, *tn;
422 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
427 if (!mutex_trylock(&tl->mutex)) {
429 tl->fence_context);
433 intel_timeline_get(tl);
434 GEM_BUG_ON(!atomic_read(&tl->active_count));
435 atomic_inc(&tl->active_count); /* pin the list element */
441 list_for_each_entry_safe(rq, rn, &tl->requests, link) {
452 drm_printf(m, "Timeline %llx: { ", tl->fence_context);
456 *tl->hwsp_seqno, tl->seqno);
457 fence = i915_active_fence_get(&tl->last_request);
466 list_for_each_entry_safe(rq, rn, &tl->requests, link)
470 mutex_unlock(&tl->mutex);
474 list_safe_reset_next(tl, tn, link);
475 if (atomic_dec_and_test(&tl->active_count))
476 list_del(&tl->link);
479 if (refcount_dec_and_test(&tl->kref.refcount)) {
480 GEM_BUG_ON(atomic_read(&tl->active_count));
481 list_add(&tl->link, &free);
486 list_for_each_entry_safe(tl, tn, &free, link)
487 __intel_timeline_free(&tl->kref);