• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/dev/drm2/i915/

Lines Matching defs:ring

1039 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1043 DRM_LOCK_ASSERT(ring->dev);
1046 if (seqno == ring->outstanding_lazy_request)
1047 ret = i915_add_request(ring, NULL, NULL);
1054 * @ring: the ring expected to report seqno
1062 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1065 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1072 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1075 CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
1084 if (WARN_ON(!ring->irq_get(ring)))
1091 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1099 ret = -msleep_sbt(&ring->irq_queue, &dev_priv->irq_lock, flags,
1133 ring->irq_put(ring);
1134 CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, end);
1161 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1163 struct drm_device *dev = ring->dev;
1175 ret = i915_gem_check_olr(ring, seqno);
1179 return __wait_seqno(ring, seqno, interruptible, NULL);
1190 struct intel_ring_buffer *ring = obj->ring;
1198 ret = i915_wait_seqno(ring, seqno);
1202 i915_gem_retire_requests_ring(ring);
1225 struct intel_ring_buffer *ring = obj->ring;
1240 ret = i915_gem_check_olr(ring, seqno);
1245 ret = __wait_seqno(ring, seqno, true, NULL);
1248 i915_gem_retire_requests_ring(ring);
2149 struct intel_ring_buffer *ring)
2153 u32 seqno = intel_ring_get_seqno(ring);
2155 BUG_ON(ring == NULL);
2156 obj->ring = ring;
2166 list_move_tail(&obj->ring_list, &ring->active_list);
2196 obj->ring = NULL;
2215 struct intel_ring_buffer *ring;
2223 for_each_ring(ring, dev_priv, i) {
2224 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2225 ret |= ring->sync_seqno[j] != 0;
2235 for_each_ring(ring, dev_priv, i) {
2236 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2237 ring->sync_seqno[j] = 0;
2262 i915_add_request(struct intel_ring_buffer *ring,
2266 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2279 ret = intel_ring_flush_all_caches(ring);
2293 request_ring_position = intel_ring_get_tail(ring);
2295 ret = ring->add_request(ring);
2301 request->seqno = intel_ring_get_seqno(ring);
2302 request->ring = ring;
2305 was_empty = list_empty(&ring->request_list);
2306 list_add_tail(&request->list, &ring->request_list);
2319 CTR2(KTR_DRM, "request_add %s %d", ring->name, request->seqno);
2320 ring->outstanding_lazy_request = 0;
2356 struct intel_ring_buffer *ring)
2358 if (ring->dev != NULL)
2359 DRM_LOCK_ASSERT(ring->dev);
2361 while (!list_empty(&ring->request_list)) {
2364 request = list_first_entry(&ring->request_list,
2373 while (!list_empty(&ring->active_list)) {
2376 obj = list_first_entry(&ring->active_list,
2409 struct intel_ring_buffer *ring;
2412 for_each_ring(ring, dev_priv, i)
2413 i915_gem_reset_ring_lists(dev_priv, ring);
2433 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2437 if (list_empty(&ring->request_list))
2440 WARN_ON(i915_verify_lists(ring->dev));
2442 seqno = ring->get_seqno(ring, true);
2443 CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2445 while (!list_empty(&ring->request_list)) {
2448 request = list_first_entry(&ring->request_list,
2456 ring->name, seqno);
2462 ring->last_retired_head = request->tail;
2472 while (!list_empty(&ring->active_list)) {
2475 obj = list_first_entry(&ring->active_list,
2485 if (unlikely(ring->trace_irq_seqno &&
2486 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2487 ring->irq_put(ring);
2488 ring->trace_irq_seqno = 0;
2491 WARN_ON(i915_verify_lists(ring->dev));
2498 struct intel_ring_buffer *ring;
2501 for_each_ring(ring, dev_priv, i)
2502 i915_gem_retire_requests_ring(ring);
2510 struct intel_ring_buffer *ring;
2528 /* Send a periodic flush down the ring so we don't hold onto GEM
2532 for_each_ring(ring, dev_priv, i) {
2533 if (ring->gpu_caches_dirty)
2534 i915_add_request(ring, NULL, NULL);
2536 idle &= list_empty(&ring->request_list);
2559 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2563 i915_gem_retire_requests_ring(obj->ring);
2596 struct intel_ring_buffer *ring = NULL;
2624 ring = obj->ring;
2641 ret = __wait_seqno(ring, seqno, true, timeout);
2654 * i915_gem_object_sync - sync an object to a ring.
2656 * @obj: object which may be in use on another ring.
2657 * @to: ring we wish to use the object on. May be NULL.
2661 * rather than a particular GPU ring.
2669 struct intel_ring_buffer *from = obj->ring;
2685 ret = i915_gem_check_olr(obj->ring, seqno);
2778 struct intel_ring_buffer *ring;
2782 for_each_ring(ring, dev_priv, i) {
2783 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2787 ret = intel_ring_idle(ring);
2981 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3587 if (pipelined != obj->ring) {
3714 /* Throttle our rendering by waiting until the ring has completed our requests
3731 struct intel_ring_buffer *ring = NULL;
3743 ring = request->ring;
3751 ret = __wait_seqno(ring, seqno, true, NULL);
3933 if (obj->ring) {
3935 args->busy |= intel_ring_flag(obj->ring) << 16;
4279 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4281 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4354 struct intel_ring_buffer *ring;
4357 for_each_ring(ring, dev_priv, i)
4358 intel_cleanup_ring_buffer(ring);
4428 init_ring_lists(struct intel_ring_buffer *ring)
4430 INIT_LIST_HEAD(&ring->active_list);
4431 INIT_LIST_HEAD(&ring->request_list);
4446 init_ring_lists(&dev_priv->ring[i]);