Lines Matching refs:dev_priv

135 i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size)
138 dev_priv->mm.object_count++;
139 dev_priv->mm.object_memory += size;
143 i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size)
146 dev_priv->mm.object_count--;
147 dev_priv->mm.object_memory -= size;
153 struct drm_i915_private *dev_priv;
156 dev_priv = dev->dev_private;
157 if (!atomic_load_acq_int(&dev_priv->mm.wedged))
160 mtx_lock(&dev_priv->error_completion_lock);
161 while (dev_priv->error_completion == 0) {
162 ret = -msleep(&dev_priv->error_completion,
163 &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
167 mtx_unlock(&dev_priv->error_completion_lock);
171 mtx_unlock(&dev_priv->error_completion_lock);
173 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
174 mtx_lock(&dev_priv->error_completion_lock);
175 dev_priv->error_completion++;
176 mtx_unlock(&dev_priv->error_completion_lock);
184 struct drm_i915_private *dev_priv;
187 dev_priv = dev->dev_private;
209 drm_i915_private_t *dev_priv;
212 dev_priv = dev->dev_private;
223 was_interruptible = dev_priv->mm.interruptible;
224 dev_priv->mm.interruptible = false;
229 dev_priv->mm.interruptible = was_interruptible;
234 i915_gem_info_remove_obj(dev_priv, obj->base.size);
252 drm_i915_private_t *dev_priv;
255 dev_priv = dev->dev_private;
257 INIT_LIST_HEAD(&dev_priv->mm.active_list);
258 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
259 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
260 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
261 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
263 init_ring_lists(&dev_priv->rings[i]);
265 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
266 TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0,
267 i915_gem_retire_task_handler, dev_priv);
268 dev_priv->error_completion = 0;
276 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
280 dev_priv->fence_reg_start = 3;
284 dev_priv->num_fence_regs = 16;
286 dev_priv->num_fence_regs = 8;
292 dev_priv->mm.interruptible = true;
294 dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
303 drm_i915_private_t *dev_priv;
309 dev_priv = dev->dev_private;
316 if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock))
337 drm_i915_private_t *dev_priv;
342 dev_priv = dev->dev_private;
343 if (dev_priv->mm.suspended) {
370 dev_priv->mm.suspended = 1;
371 callout_stop(&dev_priv->hangcheck_timer);
379 taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
386 drm_i915_private_t *dev_priv;
388 dev_priv = dev->dev_private;
391 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
410 drm_i915_private_t *dev_priv;
417 dev_priv = dev->dev_private;
418 ppgtt = dev_priv->mm.aliasing_ppgtt;
453 for_each_ring(ring, dev_priv, i) {
466 drm_i915_private_t *dev_priv;
469 dev_priv = dev->dev_private;
489 dev_priv->next_seqno = 1;
495 intel_cleanup_ring_buffer(&dev_priv->rings[VCS]);
497 intel_cleanup_ring_buffer(&dev_priv->rings[RCS]);
516 struct drm_i915_private *dev_priv = dev->dev_private;
520 gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
521 mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
560 dev_priv->dri1.allow_batchbuffer = 1;
568 struct drm_i915_private *dev_priv;
573 dev_priv = dev->dev_private;
578 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
583 args->aper_size = dev_priv->mm.gtt_total;
776 struct drm_i915_private *dev_priv;
784 dev_priv = dev->dev_private;
785 if (atomic_load_acq_int(&dev_priv->mm.wedged))
806 taskqueue_enqueue_timeout(dev_priv->tq,
807 &dev_priv->mm.retire_task, 0);
868 drm_i915_private_t *dev_priv;
872 dev_priv = dev->dev_private;
873 for_each_ring(ring, dev_priv, i)
881 drm_i915_private_t *dev_priv;
886 dev_priv = dev->dev_private;
887 if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
889 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
893 dev_priv->mm.suspended = 0;
901 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
902 KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
903 KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
915 dev_priv->mm.suspended = 1;
1918 drm_i915_private_t *dev_priv;
1926 dev_priv = dev->dev_private;
2018 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2106 struct drm_i915_private *dev_priv;
2110 dev_priv = dev->dev_private;
2122 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
2150 struct drm_i915_private *dev_priv;
2153 dev_priv = dev->dev_private;
2162 struct drm_i915_private *dev_priv;
2165 dev_priv = dev->dev_private;
2192 i915_gem_info_add_obj(dev_priv, size);
2269 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2305 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2317 drm_i915_private_t *dev_priv;
2329 dev_priv = dev->dev_private;
2350 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2589 struct drm_i915_private *dev_priv;
2596 dev_priv = dev->dev_private;
2623 dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2632 &dev_priv->mm.gtt_space, size, alignment, 0,
2633 dev_priv->mm.gtt_mappable_end, 0);
2635 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2641 dev_priv->mm.gtt_mappable_end, 1);
2674 if (!dev_priv->mm.aliasing_ppgtt)
2677 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2678 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2692 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2767 drm_i915_private_t *dev_priv;
2770 dev_priv = obj->base.dev->dev_private;
2802 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2927 struct drm_i915_private *dev_priv;
2933 dev_priv = dev->dev_private;
2934 start = OFF_TO_IDX(dev_priv->mm.gtt_start);
2935 end = OFF_TO_IDX(dev_priv->mm.gtt_end);
3040 struct drm_i915_private *dev_priv = dev->dev_private;
3053 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
3062 reg = &dev_priv->fence_regs[obj->fence_reg];
3064 &dev_priv->mm.fence_list);
3081 drm_i915_private_t *dev_priv = dev->dev_private;
3084 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
3093 struct drm_i915_private *dev_priv = dev->dev_private;
3095 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3160 drm_i915_private_t *dev_priv;
3162 dev_priv = obj->base.dev->dev_private;
3163 return (dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3248 drm_i915_private_t *dev_priv = dev->dev_private;
3253 for_each_ring(ring, dev_priv, i) {
3271 i915_gem_check_wedge(struct drm_i915_private *dev_priv)
3273 DRM_LOCK_ASSERT(dev_priv->dev);
3275 if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
3278 mtx_lock(&dev_priv->error_completion_lock);
3279 recovery_complete = (&dev_priv->error_completion) > 0;
3280 mtx_unlock(&dev_priv->error_completion_lock);
3318 drm_i915_private_t *dev_priv = ring->dev->dev_private;
3326 mtx_lock(&dev_priv->irq_lock);
3328 mtx_unlock(&dev_priv->irq_lock);
3334 && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
3336 ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0);
3341 mtx_unlock(&dev_priv->irq_lock);
3351 drm_i915_private_t *dev_priv;
3356 dev_priv = ring->dev->dev_private;
3359 ret = i915_gem_check_wedge(dev_priv);
3367 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
3368 if (atomic_load_acq_int(&dev_priv->mm.wedged))
3377 drm_i915_private_t *dev_priv = dev->dev_private;
3378 u32 seqno = dev_priv->next_seqno;
3381 if (++dev_priv->next_seqno == 0)
3382 dev_priv->next_seqno = 1;
3400 drm_i915_private_t *dev_priv;
3409 dev_priv = ring->dev->dev_private;
3439 if (!dev_priv->mm.suspended) {
3441 callout_schedule(&dev_priv->hangcheck_timer,
3445 taskqueue_enqueue_timeout(dev_priv->tq,
3446 &dev_priv->mm.retire_task, hz);
3493 i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
3526 struct drm_i915_private *dev_priv = dev->dev_private;
3529 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3530 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
3542 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3548 struct drm_i915_private *dev_priv = dev->dev_private;
3553 for_each_ring(ring, dev_priv, i)
3554 i915_gem_reset_ring_lists(dev_priv, ring);
3560 while (!list_empty(&dev_priv->mm.flushing_list)) {
3561 obj = list_first_entry(&dev_priv->mm.flushing_list,
3573 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
3640 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3641 mtx_lock(&dev_priv->irq_lock);
3643 mtx_unlock(&dev_priv->irq_lock);
3651 drm_i915_private_t *dev_priv = dev->dev_private;
3655 for_each_ring(ring, dev_priv, i)
3662 drm_i915_private_t *dev_priv = dev->dev_private;
3687 drm_i915_private_t *dev_priv = dev->dev_private;
3710 drm_i915_private_t *dev_priv = dev->dev_private;
3755 drm_i915_private_t *dev_priv = dev->dev_private;
3799 static inline int fence_number(struct drm_i915_private *dev_priv,
3802 return fence - dev_priv->fence_regs;
3809 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3810 int reg = fence_number(dev_priv, fence);
3817 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3862 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3873 &dev_priv->fence_regs[obj->fence_reg],
3883 struct drm_i915_private *dev_priv = dev->dev_private;
3889 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3890 reg = &dev_priv->fence_regs[i];
3902 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3916 struct drm_i915_private *dev_priv = dev->dev_private;
3933 reg = &dev_priv->fence_regs[obj->fence_reg];
3936 &dev_priv->mm.fence_list);
3980 drm_i915_private_t *dev_priv;
3986 dev_priv = arg;
3987 dev = dev_priv->dev;
3991 taskqueue_enqueue_timeout(dev_priv->tq,
3992 &dev_priv->mm.retire_task, hz);
4004 for_each_ring(ring, dev_priv, i) {
4005 struct intel_ring_buffer *ring = &dev_priv->rings[i];
4023 if (!dev_priv->mm.suspended && !idle)
4024 taskqueue_enqueue_timeout(dev_priv->tq,
4025 &dev_priv->mm.retire_task, hz);
4046 drm_i915_private_t *dev_priv;
4050 dev_priv = dev->dev_private;
4051 if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0)
4067 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4079 drm_i915_private_t *dev_priv;
4082 dev_priv = dev->dev_private;
4083 if (dev_priv->mm.phys_objs[id - 1] == NULL)
4086 phys_obj = dev_priv->mm.phys_objs[id - 1];
4092 dev_priv->mm.phys_objs[id - 1] = NULL;
4152 drm_i915_private_t *dev_priv;
4167 dev_priv = dev->dev_private;
4168 if (dev_priv->mm.phys_objs[id - 1] == NULL) {
4178 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4214 drm_i915_private_t *dev_priv;
4216 dev_priv = dev->dev_private;
4217 return (!list_empty(&dev_priv->mm.flushing_list) ||
4218 !list_empty(&dev_priv->mm.active_list));
4225 struct drm_i915_private *dev_priv;
4230 dev_priv = dev->dev_private;
4243 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4253 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4277 struct drm_i915_private *dev_priv;
4279 dev_priv = dev->dev_private;
4280 EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);