• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/vmwgfx/

Lines Matching refs:dev_priv

36 	struct vmw_private *dev_priv = vmw_priv(dev);
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 spin_unlock(&dev_priv->irq_lock);
44 wake_up_all(&dev_priv->fence_queue);
46 wake_up_all(&dev_priv->fifo_queue);
49 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
56 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
60 mutex_lock(&dev_priv->hw_mutex);
61 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
62 mutex_unlock(&dev_priv->hw_mutex);
67 void vmw_update_sequence(struct vmw_private *dev_priv,
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
74 if (dev_priv->last_read_sequence != sequence) {
75 dev_priv->last_read_sequence = sequence;
80 bool vmw_fence_signaled(struct vmw_private *dev_priv,
86 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
89 fifo_state = &dev_priv->fifo;
90 vmw_update_sequence(dev_priv, fifo_state);
91 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
95 vmw_fifo_idle(dev_priv, sequence))
103 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
109 int vmw_fallback_wait(struct vmw_private *dev_priv,
116 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
134 signal_seq = atomic_read(&dev_priv->fence_seq);
138 prepare_to_wait(&dev_priv->fence_queue, &__wait,
141 if (wait_condition(dev_priv, sequence))
162 finish_wait(&dev_priv->fence_queue, &__wait);
164 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
167 wake_up_all(&dev_priv->fence_queue);
174 int vmw_wait_fence(struct vmw_private *dev_priv,
180 struct vmw_fifo_state *fifo = &dev_priv->fifo;
182 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
185 if (likely(vmw_fence_signaled(dev_priv, sequence)))
188 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
191 return vmw_fallback_wait(dev_priv, lazy, true, sequence,
194 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
195 return vmw_fallback_wait(dev_priv, lazy, false, sequence,
198 mutex_lock(&dev_priv->hw_mutex);
199 if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
200 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
202 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
203 vmw_write(dev_priv, SVGA_REG_IRQMASK,
204 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
206 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
208 mutex_unlock(&dev_priv->hw_mutex);
212 (dev_priv->fence_queue,
213 vmw_fence_signaled(dev_priv, sequence),
217 (dev_priv->fence_queue,
218 vmw_fence_signaled(dev_priv, sequence),
226 mutex_lock(&dev_priv->hw_mutex);
227 if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
228 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
229 vmw_write(dev_priv, SVGA_REG_IRQMASK,
230 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
232 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
234 mutex_unlock(&dev_priv->hw_mutex);
241 struct vmw_private *dev_priv = vmw_priv(dev);
244 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
247 spin_lock_init(&dev_priv->irq_lock);
248 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
249 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
259 struct vmw_private *dev_priv = vmw_priv(dev);
262 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
265 mutex_lock(&dev_priv->hw_mutex);
266 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
267 mutex_unlock(&dev_priv->hw_mutex);
269 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
270 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);