• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/media/video/

Lines Matching defs:m2m_dev

157 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
162 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
163 if (m2m_dev->curr_ctx)
164 ret = m2m_dev->curr_ctx->priv;
165 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
176 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
180 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
181 if (NULL != m2m_dev->curr_ctx) {
182 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
187 if (list_empty(&m2m_dev->job_queue)) {
188 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
193 m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
195 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
196 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
198 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
219 struct v4l2_m2m_dev *m2m_dev;
222 m2m_dev = m2m_ctx->m2m_dev;
231 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
233 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
241 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
247 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
253 if (m2m_dev->m2m_ops->job_ready
254 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
260 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
263 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
265 v4l2_m2m_try_run(m2m_dev);
280 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
285 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
286 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
287 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
292 list_del(&m2m_dev->curr_ctx->queue);
293 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
294 m2m_dev->curr_ctx = NULL;
296 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
302 v4l2_m2m_try_run(m2m_dev);
493 struct v4l2_m2m_dev *m2m_dev;
501 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
502 if (!m2m_dev)
505 m2m_dev->curr_ctx = NULL;
506 m2m_dev->m2m_ops = m2m_ops;
507 INIT_LIST_HEAD(&m2m_dev->job_queue);
508 spin_lock_init(&m2m_dev->job_spinlock);
510 return m2m_dev;
515 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
519 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
521 kfree(m2m_dev);
528 * @m2m_dev - a previously initialized m2m_dev struct
534 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
549 m2m_ctx->m2m_dev = m2m_dev;
574 struct v4l2_m2m_dev *m2m_dev;
578 m2m_dev = m2m_ctx->m2m_dev;
580 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
582 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
583 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
592 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
597 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);