• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/i915/

Lines Matching refs:dev_priv

72 	drm_i915_private_t *dev_priv = dev->dev_private;
80 drm_mm_init(&dev_priv->mm.gtt_space, start,
172 drm_i915_private_t *dev_priv = obj->dev->dev_private;
175 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
575 drm_i915_private_t *dev_priv = dev->dev_private;
612 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
647 drm_i915_private_t *dev_priv = dev->dev_private;
713 slow_kernel_write(dev_priv->mm.gtt_mapping,
993 struct drm_i915_private *dev_priv = dev->dev_private;
1038 &dev_priv->fence_regs[obj_priv->fence_reg];
1040 &dev_priv->mm.fence_list);
1056 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1159 drm_i915_private_t *dev_priv = dev->dev_private;
1190 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1478 drm_i915_private_t *dev_priv = dev->dev_private;
1489 spin_lock(&dev_priv->mm.active_list_lock);
1491 spin_unlock(&dev_priv->mm.active_list_lock);
1499 drm_i915_private_t *dev_priv = dev->dev_private;
1503 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1538 drm_i915_private_t *dev_priv = dev->dev_private;
1545 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1563 drm_i915_private_t *dev_priv = dev->dev_private;
1567 &dev_priv->mm.gpu_write_list,
1583 &dev_priv->fence_regs[obj_priv->fence_reg];
1585 &dev_priv->mm.fence_list);
1599 drm_i915_private_t *dev_priv = dev->dev_private;
1633 if (!dev_priv->mm.suspended) {
1634 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1636 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1669 drm_i915_private_t *dev_priv = dev->dev_private;
1676 spin_lock(&dev_priv->mm.active_list_lock);
1709 spin_unlock(&dev_priv->mm.active_list_lock);
1711 spin_lock(&dev_priv->mm.active_list_lock);
1715 spin_unlock(&dev_priv->mm.active_list_lock);
1741 drm_i915_private_t *dev_priv = dev->dev_private;
1760 atomic_read(&dev_priv->mm.wedged)) {
1770 if (unlikely (dev_priv->trace_irq_seqno &&
1771 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1774 dev_priv->trace_irq_seqno = 0;
1781 drm_i915_private_t *dev_priv = dev->dev_private;
1783 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1792 &dev_priv->mm.deferred_free_list,
1797 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1799 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1805 drm_i915_private_t *dev_priv;
1808 dev_priv = container_of(work, drm_i915_private_t,
1810 dev = dev_priv->dev;
1815 if (!dev_priv->mm.suspended &&
1816 (!list_empty(&dev_priv->render_ring.request_list) ||
1818 !list_empty(&dev_priv->bsd_ring.request_list))))
1819 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1827 drm_i915_private_t *dev_priv = dev->dev_private;
1833 if (atomic_read(&dev_priv->mm.wedged))
1856 || atomic_read(&dev_priv->mm.wedged));
1861 || atomic_read(&dev_priv->mm.wedged));
1868 if (atomic_read(&dev_priv->mm.wedged))
1902 drm_i915_private_t *dev_priv = dev->dev_private;
1905 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1910 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1955 drm_i915_private_t *dev_priv = dev->dev_private;
2010 spin_lock(&dev_priv->mm.active_list_lock);
2013 spin_unlock(&dev_priv->mm.active_list_lock);
2026 drm_i915_private_t *dev_priv = dev->dev_private;
2031 spin_lock(&dev_priv->mm.active_list_lock);
2032 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2033 list_empty(&dev_priv->render_ring.active_list) &&
2035 list_empty(&dev_priv->bsd_ring.active_list)));
2036 spin_unlock(&dev_priv->mm.active_list_lock);
2044 &dev_priv->render_ring);
2047 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2051 &dev_priv->bsd_ring);
2055 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2124 drm_i915_private_t *dev_priv = dev->dev_private;
2146 drm_i915_private_t *dev_priv = dev->dev_private;
2166 drm_i915_private_t *dev_priv = dev->dev_private;
2214 drm_i915_private_t *dev_priv = dev->dev_private;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2254 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2255 reg = &dev_priv->fence_regs[i];
2269 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2314 struct drm_i915_private *dev_priv = dev->dev_private;
2321 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2322 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2351 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2352 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2383 * data structures in dev_priv and obj_priv.
2389 drm_i915_private_t *dev_priv = dev->dev_private;
2392 &dev_priv->fence_regs[obj_priv->fence_reg];
2426 * data structures in dev_priv and obj_priv.
2472 drm_i915_private_t *dev_priv = dev->dev_private;
2499 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2573 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2969 drm_i915_private_t *dev_priv = dev->dev_private;
3033 if (obj_priv->ring == &dev_priv->render_ring)
3034 dev_priv->flush_rings |= FLUSH_RENDER_RING;
3035 else if (obj_priv->ring == &dev_priv->bsd_ring)
3036 dev_priv->flush_rings |= FLUSH_BSD_RING;
3173 drm_i915_private_t *dev_priv = dev->dev_private;
3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3516 drm_i915_private_t *dev_priv = dev->dev_private;
3522 prepare_to_wait(&dev_priv->pending_flip_queue,
3541 finish_wait(&dev_priv->pending_flip_queue, &wait);
3553 drm_i915_private_t *dev_priv = dev->dev_private;
3575 ring = &dev_priv->bsd_ring;
3577 ring = &dev_priv->render_ring;
3621 if (atomic_read(&dev_priv->mm.wedged)) {
3627 if (dev_priv->mm.suspended) {
3754 dev_priv->flush_rings = 0;
3775 if (dev_priv->flush_rings & FLUSH_RENDER_RING)
3778 &dev_priv->render_ring);
3779 if (dev_priv->flush_rings & FLUSH_BSD_RING)
3782 &dev_priv->bsd_ring);
3793 &dev_priv->mm.gpu_write_list);
4090 drm_i915_private_t *dev_priv = dev->dev_private;
4106 &dev_priv->mm.inactive_list);
4350 drm_i915_private_t *dev_priv = dev->dev_private;
4357 &dev_priv->mm.deferred_free_list);
4390 drm_i915_private_t *dev_priv = dev->dev_private;
4395 if (dev_priv->mm.suspended ||
4396 (dev_priv->render_ring.gem_object == NULL) ||
4398 dev_priv->bsd_ring.gem_object == NULL)) {
4422 dev_priv->mm.suspended = 1;
4423 del_timer(&dev_priv->hangcheck_timer);
4431 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4443 drm_i915_private_t *dev_priv = dev->dev_private;
4461 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4462 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4463 if (dev_priv->seqno_page == NULL)
4466 dev_priv->seqno_obj = obj;
4467 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4483 drm_i915_private_t *dev_priv = dev->dev_private;
4487 obj = dev_priv->seqno_obj;
4492 dev_priv->seqno_obj = NULL;
4494 dev_priv->seqno_page = NULL;
4500 drm_i915_private_t *dev_priv = dev->dev_private;
4503 dev_priv->render_ring = render_ring;
4506 dev_priv->render_ring.status_page.page_addr
4507 = dev_priv->status_page_dmah->vaddr;
4508 memset(dev_priv->render_ring.status_page.page_addr,
4518 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4523 dev_priv->bsd_ring = bsd_ring;
4524 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4529 dev_priv->next_seqno = 1;
4534 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4544 drm_i915_private_t *dev_priv = dev->dev_private;
4546 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4548 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4557 drm_i915_private_t *dev_priv = dev->dev_private;
4563 if (atomic_read(&dev_priv->mm.wedged)) {
4565 atomic_set(&dev_priv->mm.wedged, 0);
4569 dev_priv->mm.suspended = 0;
4577 spin_lock(&dev_priv->mm.active_list_lock);
4578 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4579 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4580 spin_unlock(&dev_priv->mm.active_list_lock);
4582 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4583 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4584 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4585 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4597 dev_priv->mm.suspended = 1;
4631 drm_i915_private_t *dev_priv = dev->dev_private;
4633 spin_lock_init(&dev_priv->mm.active_list_lock);
4634 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4635 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4636 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4637 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4638 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4639 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4640 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4642 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4643 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4646 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4647 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4650 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4665 dev_priv->fence_reg_start = 3;
4668 dev_priv->num_fence_regs = 16;
4670 dev_priv->num_fence_regs = 8;
4684 init_waitqueue_head(&dev_priv->pending_flip_queue);
4694 drm_i915_private_t *dev_priv = dev->dev_private;
4698 if (dev_priv->mm.phys_objs[id - 1] || !size)
4716 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4726 drm_i915_private_t *dev_priv = dev->dev_private;
4729 if (!dev_priv->mm.phys_objs[id - 1])
4732 phys_obj = dev_priv->mm.phys_objs[id - 1];
4742 dev_priv->mm.phys_objs[id - 1] = NULL;
4793 drm_i915_private_t *dev_priv = dev->dev_private;
4811 if (!dev_priv->mm.phys_objs[id - 1]) {
4821 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4886 drm_i915_private_t *dev_priv = dev->dev_private;
4889 spin_lock(&dev_priv->mm.active_list_lock);
4890 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4891 list_empty(&dev_priv->render_ring.active_list);
4893 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4894 spin_unlock(&dev_priv->mm.active_list_lock);
4902 drm_i915_private_t *dev_priv, *next_dev;
4910 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4911 struct drm_device *dev = dev_priv->dev;
4915 &dev_priv->mm.inactive_list,
4930 list_for_each_entry_safe(dev_priv, next_dev,
4932 struct drm_device *dev = dev_priv->dev;
4941 &dev_priv->mm.inactive_list,
4960 list_for_each_entry_safe(dev_priv, next_dev,
4962 struct drm_device *dev = dev_priv->dev;
4970 &dev_priv->mm.inactive_list,
4994 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4995 struct drm_device *dev = dev_priv->dev;