• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/vmwgfx/

Lines Matching refs:dev_priv

32 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
37 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
54 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
56 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
59 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
69 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
102 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
103 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
104 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
106 mutex_lock(&dev_priv->hw_mutex);
107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
109 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
110 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
113 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
114 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
121 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
128 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
129 mutex_unlock(&dev_priv->hw_mutex);
140 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
141 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
143 return vmw_fifo_send_fence(dev_priv, &dummy);
150 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
152 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
154 mutex_lock(&dev_priv->hw_mutex);
158 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
161 mutex_unlock(&dev_priv->hw_mutex);
164 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
166 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
168 mutex_lock(&dev_priv->hw_mutex);
170 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
171 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
173 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
175 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
176 dev_priv->config_done_state);
177 vmw_write(dev_priv, SVGA_REG_ENABLE,
178 dev_priv->enable_state);
179 vmw_write(dev_priv, SVGA_REG_TRACES,
180 dev_priv->traces_state);
182 mutex_unlock(&dev_priv->hw_mutex);
201 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
203 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
212 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
223 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
226 if (!vmw_fifo_is_full(dev_priv, bytes))
239 finish_wait(&dev_priv->fifo_queue, &__wait);
240 wake_up_all(&dev_priv->fifo_queue);
245 static int vmw_fifo_wait(struct vmw_private *dev_priv,
252 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
255 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
256 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
257 return vmw_fifo_wait_noirq(dev_priv, bytes,
260 mutex_lock(&dev_priv->hw_mutex);
261 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
262 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
264 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
265 vmw_write(dev_priv, SVGA_REG_IRQMASK,
266 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
268 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
270 mutex_unlock(&dev_priv->hw_mutex);
274 (dev_priv->fifo_queue,
275 !vmw_fifo_is_full(dev_priv, bytes), timeout);
278 (dev_priv->fifo_queue,
279 !vmw_fifo_is_full(dev_priv, bytes), timeout);
286 mutex_lock(&dev_priv->hw_mutex);
287 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
288 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
289 vmw_write(dev_priv, SVGA_REG_IRQMASK,
290 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
292 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
294 mutex_unlock(&dev_priv->hw_mutex);
299 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
301 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
302 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
332 else if (vmw_fifo_is_full(dev_priv, bytes)) {
333 ret = vmw_fifo_wait(dev_priv, bytes,
345 ret = vmw_fifo_wait(dev_priv, bytes,
423 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
425 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
426 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
465 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
469 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
471 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
477 fm = vmw_fifo_reserve(dev_priv, bytes);
479 *sequence = atomic_read(&dev_priv->fence_seq);
481 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
487 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
497 vmw_fifo_commit(dev_priv, 0);
507 vmw_fifo_commit(dev_priv, bytes);
510 vmw_update_sequence(dev_priv, fifo_state);
546 struct vmw_private *dev_priv;
549 dev_priv = vmw_priv(file_priv->minor->dev);
551 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||