1/*
2 *
3 * generic helper functions for video4linux capture buffers, to handle
4 * memory management and PCI DMA.
5 * Right now, bttv, saa7134, saa7146 and cx88 use it.
6 *
7 * The functions expect the hardware being able to scatter gatter
8 * (i.e. the buffers are not linear in physical memory, but fragmented
9 * into PAGE_SIZE chunks).  They also assume the driver does not need
10 * to touch the video data.
11 *
12 * device specific map/unmap/sync stuff now are mapped as operations
13 * to allow its usage by USB and virtual devices.
14 *
15 * (c) 2001-2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
16 * (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
17 * (c) 2006 Ted Walther and John Sokol
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/vmalloc.h>
29#include <linux/pagemap.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/interrupt.h>
33#include <asm/page.h>
34#include <asm/pgtable.h>
35
36#include <media/video-buf.h>
37
38#define MAGIC_DMABUF 0x19721112
39#define MAGIC_BUFFER 0x20040302
40#define MAGIC_CHECK(is,should)	if (unlikely((is) != (should))) \
41	{ printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
42
43static int debug = 0;
44module_param(debug, int, 0644);
45
46MODULE_DESCRIPTION("helper module to manage video4linux pci dma buffers");
47MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
48MODULE_LICENSE("GPL");
49
50#define dprintk(level, fmt, arg...)	if (debug >= level) \
51	printk(KERN_DEBUG "vbuf: " fmt , ## arg)
52
53struct scatterlist*
54videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
55{
56	struct scatterlist *sglist;
57	struct page *pg;
58	int i;
59
60	sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
61	if (NULL == sglist)
62		return NULL;
63	for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
64		pg = vmalloc_to_page(virt);
65		if (NULL == pg)
66			goto err;
67		BUG_ON(PageHighMem(pg));
68		sglist[i].page   = pg;
69		sglist[i].length = PAGE_SIZE;
70	}
71	return sglist;
72
73 err:
74	kfree(sglist);
75	return NULL;
76}
77
78struct scatterlist*
79videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
80{
81	struct scatterlist *sglist;
82	int i = 0;
83
84	if (NULL == pages[0])
85		return NULL;
86	sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
87	if (NULL == sglist)
88		return NULL;
89
90	if (NULL == pages[0])
91		goto nopage;
92	if (PageHighMem(pages[0]))
93		/* DMA to highmem pages might not work */
94		goto highmem;
95	sglist[0].page   = pages[0];
96	sglist[0].offset = offset;
97	sglist[0].length = PAGE_SIZE - offset;
98	for (i = 1; i < nr_pages; i++) {
99		if (NULL == pages[i])
100			goto nopage;
101		if (PageHighMem(pages[i]))
102			goto highmem;
103		sglist[i].page   = pages[i];
104		sglist[i].length = PAGE_SIZE;
105	}
106	return sglist;
107
108 nopage:
109	dprintk(2,"sgl: oops - no page\n");
110	kfree(sglist);
111	return NULL;
112
113 highmem:
114	dprintk(2,"sgl: oops - highmem page\n");
115	kfree(sglist);
116	return NULL;
117}
118
119/* --------------------------------------------------------------------- */
120
121void videobuf_dma_init(struct videobuf_dmabuf *dma)
122{
123	memset(dma,0,sizeof(*dma));
124	dma->magic = MAGIC_DMABUF;
125}
126
127int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
128			   unsigned long data, unsigned long size)
129{
130	unsigned long first,last;
131	int err, rw = 0;
132
133	dma->direction = direction;
134	switch (dma->direction) {
135	case PCI_DMA_FROMDEVICE: rw = READ;  break;
136	case PCI_DMA_TODEVICE:   rw = WRITE; break;
137	default:                 BUG();
138	}
139
140	first = (data          & PAGE_MASK) >> PAGE_SHIFT;
141	last  = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
142	dma->offset   = data & ~PAGE_MASK;
143	dma->nr_pages = last-first+1;
144	dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*),
145			     GFP_KERNEL);
146	if (NULL == dma->pages)
147		return -ENOMEM;
148	dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n",
149		data,size,dma->nr_pages);
150
151	dma->varea = (void *) data;
152
153	down_read(&current->mm->mmap_sem);
154	err = get_user_pages(current,current->mm,
155			     data & PAGE_MASK, dma->nr_pages,
156			     rw == READ, 1, /* force */
157			     dma->pages, NULL);
158	up_read(&current->mm->mmap_sem);
159	if (err != dma->nr_pages) {
160		dma->nr_pages = (err >= 0) ? err : 0;
161		dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages);
162		return err < 0 ? err : -EINVAL;
163	}
164	return 0;
165}
166
167int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
168			     int nr_pages)
169{
170	dprintk(1,"init kernel [%d pages]\n",nr_pages);
171	dma->direction = direction;
172	dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT);
173	if (NULL == dma->vmalloc) {
174		dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages);
175		return -ENOMEM;
176	}
177	dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n",
178				(unsigned long)dma->vmalloc,
179				nr_pages << PAGE_SHIFT);
180	memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT);
181	dma->nr_pages = nr_pages;
182	return 0;
183}
184
185int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
186			      dma_addr_t addr, int nr_pages)
187{
188	dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n",
189		nr_pages,(unsigned long)addr);
190	dma->direction = direction;
191	if (0 == addr)
192		return -EINVAL;
193
194	dma->bus_addr = addr;
195	dma->nr_pages = nr_pages;
196	return 0;
197}
198
199int videobuf_dma_map(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
200{
201	void                   *dev=q->dev;
202
203	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
204	BUG_ON(0 == dma->nr_pages);
205
206	if (dma->pages) {
207		dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
208						   dma->offset);
209	}
210	if (dma->vmalloc) {
211		dma->sglist = videobuf_vmalloc_to_sg
212						(dma->vmalloc,dma->nr_pages);
213	}
214	if (dma->bus_addr) {
215		dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL);
216		if (NULL != dma->sglist) {
217			dma->sglen  = 1;
218			sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
219			dma->sglist[0].offset           = dma->bus_addr & ~PAGE_MASK;
220			sg_dma_len(&dma->sglist[0])     = dma->nr_pages * PAGE_SIZE;
221		}
222	}
223	if (NULL == dma->sglist) {
224		dprintk(1,"scatterlist is NULL\n");
225		return -ENOMEM;
226	}
227	if (!dma->bus_addr) {
228		if (q->ops->vb_map_sg) {
229			dma->sglen = q->ops->vb_map_sg(dev,dma->sglist,
230					dma->nr_pages, dma->direction);
231		}
232		if (0 == dma->sglen) {
233			printk(KERN_WARNING
234			       "%s: videobuf_map_sg failed\n",__FUNCTION__);
235			kfree(dma->sglist);
236			dma->sglist = NULL;
237			dma->sglen = 0;
238			return -EIO;
239		}
240	}
241	return 0;
242}
243
244int videobuf_dma_sync(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
245{
246	void                   *dev=q->dev;
247
248	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
249	BUG_ON(!dma->sglen);
250
251	if (!dma->bus_addr && q->ops->vb_dma_sync_sg)
252		q->ops->vb_dma_sync_sg(dev,dma->sglist,dma->nr_pages,
253							dma->direction);
254
255	return 0;
256}
257
258int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
259{
260	void                   *dev=q->dev;
261
262	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
263	if (!dma->sglen)
264		return 0;
265
266	if (!dma->bus_addr && q->ops->vb_unmap_sg)
267			q->ops->vb_unmap_sg(dev,dma->sglist,dma->nr_pages,
268							dma->direction);
269	kfree(dma->sglist);
270	dma->sglist = NULL;
271	dma->sglen = 0;
272	return 0;
273}
274
275int videobuf_dma_free(struct videobuf_dmabuf *dma)
276{
277	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
278	BUG_ON(dma->sglen);
279
280	if (dma->pages) {
281		int i;
282		for (i=0; i < dma->nr_pages; i++)
283			page_cache_release(dma->pages[i]);
284		kfree(dma->pages);
285		dma->pages = NULL;
286	}
287
288	vfree(dma->vmalloc);
289	dma->vmalloc = NULL;
290	dma->varea = NULL;
291
292	if (dma->bus_addr) {
293		dma->bus_addr = 0;
294	}
295	dma->direction = PCI_DMA_NONE;
296	return 0;
297}
298
299/* --------------------------------------------------------------------- */
300
301void* videobuf_alloc(unsigned int size)
302{
303	struct videobuf_buffer *vb;
304
305	vb = kzalloc(size,GFP_KERNEL);
306	if (NULL != vb) {
307		videobuf_dma_init(&vb->dma);
308		init_waitqueue_head(&vb->done);
309		vb->magic     = MAGIC_BUFFER;
310	}
311	return vb;
312}
313
314int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
315{
316	int retval = 0;
317	DECLARE_WAITQUEUE(wait, current);
318
319	MAGIC_CHECK(vb->magic,MAGIC_BUFFER);
320	add_wait_queue(&vb->done, &wait);
321	while (vb->state == STATE_ACTIVE || vb->state == STATE_QUEUED) {
322		if (non_blocking) {
323			retval = -EAGAIN;
324			break;
325		}
326		set_current_state(intr  ? TASK_INTERRUPTIBLE
327					: TASK_UNINTERRUPTIBLE);
328		if (vb->state == STATE_ACTIVE || vb->state == STATE_QUEUED)
329			schedule();
330		set_current_state(TASK_RUNNING);
331		if (intr && signal_pending(current)) {
332			dprintk(1,"buffer waiton: -EINTR\n");
333			retval = -EINTR;
334			break;
335		}
336	}
337	remove_wait_queue(&vb->done, &wait);
338	return retval;
339}
340
341int
342videobuf_iolock(struct videobuf_queue* q, struct videobuf_buffer *vb,
343		struct v4l2_framebuffer *fbuf)
344{
345	int err,pages;
346	dma_addr_t bus;
347
348	MAGIC_CHECK(vb->magic,MAGIC_BUFFER);
349	switch (vb->memory) {
350	case V4L2_MEMORY_MMAP:
351	case V4L2_MEMORY_USERPTR:
352		if (0 == vb->baddr) {
353			/* no userspace addr -- kernel bounce buffer */
354			pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
355			err = videobuf_dma_init_kernel(&vb->dma,PCI_DMA_FROMDEVICE,
356						       pages);
357			if (0 != err)
358				return err;
359		} else {
360			/* dma directly to userspace */
361			err = videobuf_dma_init_user(&vb->dma,PCI_DMA_FROMDEVICE,
362						     vb->baddr,vb->bsize);
363			if (0 != err)
364				return err;
365		}
366		break;
367	case V4L2_MEMORY_OVERLAY:
368		if (NULL == fbuf)
369			return -EINVAL;
370		/*
371		 * Using a double cast to avoid compiler warnings when
372		 * building for PAE. Compiler doesn't like direct casting
373		 * of a 32 bit ptr to 64 bit integer.
374		 */
375		bus   = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
376		pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
377		err = videobuf_dma_init_overlay(&vb->dma,PCI_DMA_FROMDEVICE,
378						bus, pages);
379		if (0 != err)
380			return err;
381		break;
382	default:
383		BUG();
384	}
385	err = videobuf_dma_map(q,&vb->dma);
386	if (0 != err)
387		return err;
388
389	return 0;
390}
391
392/* --------------------------------------------------------------------- */
393
394void videobuf_queue_pci(struct videobuf_queue* q)
395{
396	/* If not specified, defaults to PCI map sg */
397	if (!q->ops->vb_map_sg)
398		q->ops->vb_map_sg=(vb_map_sg_t *)pci_map_sg;
399
400	if (!q->ops->vb_dma_sync_sg)
401		q->ops->vb_dma_sync_sg=(vb_map_sg_t *)pci_dma_sync_sg_for_cpu;
402	if (!q->ops->vb_unmap_sg)
403		q->ops->vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
404}
405
406int videobuf_pci_dma_map(struct pci_dev *pci,struct videobuf_dmabuf *dma)
407{
408	struct videobuf_queue q;
409	struct videobuf_queue_ops qops;
410
411	q.dev=pci;
412	qops.vb_map_sg=(vb_map_sg_t *)pci_map_sg;
413	qops.vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
414	q.ops = &qops;
415
416	return (videobuf_dma_map(&q,dma));
417}
418
419int videobuf_pci_dma_unmap(struct pci_dev *pci,struct videobuf_dmabuf *dma)
420{
421	struct videobuf_queue q;
422	struct videobuf_queue_ops qops;
423
424	q.dev=pci;
425	qops.vb_map_sg=(vb_map_sg_t *)pci_map_sg;
426	qops.vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
427	q.ops = &qops;
428
429	return (videobuf_dma_unmap(&q,dma));
430}
431
432void videobuf_queue_init(struct videobuf_queue* q,
433			 struct videobuf_queue_ops *ops,
434			 void *dev,
435			 spinlock_t *irqlock,
436			 enum v4l2_buf_type type,
437			 enum v4l2_field field,
438			 unsigned int msize,
439			 void *priv)
440{
441	memset(q,0,sizeof(*q));
442	q->irqlock = irqlock;
443	q->dev     = dev;
444	q->type    = type;
445	q->field   = field;
446	q->msize   = msize;
447	q->ops     = ops;
448	q->priv_data = priv;
449
450	videobuf_queue_pci(q);
451
452	mutex_init(&q->lock);
453	INIT_LIST_HEAD(&q->stream);
454}
455
456int
457videobuf_queue_is_busy(struct videobuf_queue *q)
458{
459	int i;
460
461	if (q->streaming) {
462		dprintk(1,"busy: streaming active\n");
463		return 1;
464	}
465	if (q->reading) {
466		dprintk(1,"busy: pending read #1\n");
467		return 1;
468	}
469	if (q->read_buf) {
470		dprintk(1,"busy: pending read #2\n");
471		return 1;
472	}
473	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
474		if (NULL == q->bufs[i])
475			continue;
476		if (q->bufs[i]->map) {
477			dprintk(1,"busy: buffer #%d mapped\n",i);
478			return 1;
479		}
480		if (q->bufs[i]->state == STATE_QUEUED) {
481			dprintk(1,"busy: buffer #%d queued\n",i);
482			return 1;
483		}
484		if (q->bufs[i]->state == STATE_ACTIVE) {
485			dprintk(1,"busy: buffer #%d avtive\n",i);
486			return 1;
487		}
488	}
489	return 0;
490}
491
492void
493videobuf_queue_cancel(struct videobuf_queue *q)
494{
495	unsigned long flags=0;
496	int i;
497
498	/* remove queued buffers from list */
499	if (q->irqlock)
500		spin_lock_irqsave(q->irqlock,flags);
501	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
502		if (NULL == q->bufs[i])
503			continue;
504		if (q->bufs[i]->state == STATE_QUEUED) {
505			list_del(&q->bufs[i]->queue);
506			q->bufs[i]->state = STATE_ERROR;
507		}
508	}
509	if (q->irqlock)
510		spin_unlock_irqrestore(q->irqlock,flags);
511
512	/* free all buffers + clear queue */
513	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
514		if (NULL == q->bufs[i])
515			continue;
516		q->ops->buf_release(q,q->bufs[i]);
517	}
518	INIT_LIST_HEAD(&q->stream);
519}
520
521/* --------------------------------------------------------------------- */
522
523enum v4l2_field
524videobuf_next_field(struct videobuf_queue *q)
525{
526	enum v4l2_field field = q->field;
527
528	BUG_ON(V4L2_FIELD_ANY == field);
529
530	if (V4L2_FIELD_ALTERNATE == field) {
531		if (V4L2_FIELD_TOP == q->last) {
532			field   = V4L2_FIELD_BOTTOM;
533			q->last = V4L2_FIELD_BOTTOM;
534		} else {
535			field   = V4L2_FIELD_TOP;
536			q->last = V4L2_FIELD_TOP;
537		}
538	}
539	return field;
540}
541
542void
543videobuf_status(struct v4l2_buffer *b, struct videobuf_buffer *vb,
544		enum v4l2_buf_type type)
545{
546	MAGIC_CHECK(vb->magic,MAGIC_BUFFER);
547
548	b->index    = vb->i;
549	b->type     = type;
550
551	b->memory   = vb->memory;
552	switch (b->memory) {
553	case V4L2_MEMORY_MMAP:
554		b->m.offset  = vb->boff;
555		b->length    = vb->bsize;
556		break;
557	case V4L2_MEMORY_USERPTR:
558		b->m.userptr = vb->baddr;
559		b->length    = vb->bsize;
560		break;
561	case V4L2_MEMORY_OVERLAY:
562		b->m.offset  = vb->boff;
563		break;
564	}
565
566	b->flags    = 0;
567	if (vb->map)
568		b->flags |= V4L2_BUF_FLAG_MAPPED;
569
570	switch (vb->state) {
571	case STATE_PREPARED:
572	case STATE_QUEUED:
573	case STATE_ACTIVE:
574		b->flags |= V4L2_BUF_FLAG_QUEUED;
575		break;
576	case STATE_DONE:
577	case STATE_ERROR:
578		b->flags |= V4L2_BUF_FLAG_DONE;
579		break;
580	case STATE_NEEDS_INIT:
581	case STATE_IDLE:
582		/* nothing */
583		break;
584	}
585
586	if (vb->input != UNSET) {
587		b->flags |= V4L2_BUF_FLAG_INPUT;
588		b->input  = vb->input;
589	}
590
591	b->field     = vb->field;
592	b->timestamp = vb->ts;
593	b->bytesused = vb->size;
594	b->sequence  = vb->field_count >> 1;
595}
596
597int
598videobuf_reqbufs(struct videobuf_queue *q,
599		 struct v4l2_requestbuffers *req)
600{
601	unsigned int size,count;
602	int retval;
603
604	if (req->type != q->type) {
605		dprintk(1,"reqbufs: queue type invalid\n");
606		return -EINVAL;
607	}
608	if (req->count < 1) {
609		dprintk(1,"reqbufs: count invalid (%d)\n",req->count);
610		return -EINVAL;
611	}
612	if (req->memory != V4L2_MEMORY_MMAP     &&
613	    req->memory != V4L2_MEMORY_USERPTR  &&
614	    req->memory != V4L2_MEMORY_OVERLAY) {
615		dprintk(1,"reqbufs: memory type invalid\n");
616		return -EINVAL;
617	}
618
619	if (q->streaming) {
620		dprintk(1,"reqbufs: streaming already exists\n");
621		return -EBUSY;
622	}
623	if (!list_empty(&q->stream)) {
624		dprintk(1,"reqbufs: stream running\n");
625		return -EBUSY;
626	}
627
628	mutex_lock(&q->lock);
629	count = req->count;
630	if (count > VIDEO_MAX_FRAME)
631		count = VIDEO_MAX_FRAME;
632	size = 0;
633	q->ops->buf_setup(q,&count,&size);
634	size = PAGE_ALIGN(size);
635	dprintk(1,"reqbufs: bufs=%d, size=0x%x [%d pages total]\n",
636		count, size, (count*size)>>PAGE_SHIFT);
637
638	retval = videobuf_mmap_setup(q,count,size,req->memory);
639	if (retval < 0) {
640		dprintk(1,"reqbufs: mmap setup returned %d\n",retval);
641		goto done;
642	}
643
644	req->count = count;
645
646 done:
647	mutex_unlock(&q->lock);
648	return retval;
649}
650
651int
652videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
653{
654	if (unlikely(b->type != q->type)) {
655		dprintk(1,"querybuf: Wrong type.\n");
656		return -EINVAL;
657	}
658	if (unlikely(b->index < 0 || b->index >= VIDEO_MAX_FRAME)) {
659		dprintk(1,"querybuf: index out of range.\n");
660		return -EINVAL;
661	}
662	if (unlikely(NULL == q->bufs[b->index])) {
663		dprintk(1,"querybuf: buffer is null.\n");
664		return -EINVAL;
665	}
666	videobuf_status(b,q->bufs[b->index],q->type);
667	return 0;
668}
669
670int
671videobuf_qbuf(struct videobuf_queue *q,
672	      struct v4l2_buffer *b)
673{
674	struct videobuf_buffer *buf;
675	enum v4l2_field field;
676	unsigned long flags=0;
677	int retval;
678
679	mutex_lock(&q->lock);
680	retval = -EBUSY;
681	if (q->reading) {
682		dprintk(1,"qbuf: Reading running...\n");
683		goto done;
684	}
685	retval = -EINVAL;
686	if (b->type != q->type) {
687		dprintk(1,"qbuf: Wrong type.\n");
688		goto done;
689	}
690	if (b->index < 0 || b->index >= VIDEO_MAX_FRAME) {
691		dprintk(1,"qbuf: index out of range.\n");
692		goto done;
693	}
694	buf = q->bufs[b->index];
695	if (NULL == buf) {
696		dprintk(1,"qbuf: buffer is null.\n");
697		goto done;
698	}
699	MAGIC_CHECK(buf->magic,MAGIC_BUFFER);
700	if (buf->memory != b->memory) {
701		dprintk(1,"qbuf: memory type is wrong.\n");
702		goto done;
703	}
704	if (buf->state != STATE_NEEDS_INIT && buf->state != STATE_IDLE) {
705		dprintk(1,"qbuf: buffer is already queued or active.\n");
706		goto done;
707	}
708
709	if (b->flags & V4L2_BUF_FLAG_INPUT) {
710		if (b->input >= q->inputs) {
711			dprintk(1,"qbuf: wrong input.\n");
712			goto done;
713		}
714		buf->input = b->input;
715	} else {
716		buf->input = UNSET;
717	}
718
719	switch (b->memory) {
720	case V4L2_MEMORY_MMAP:
721		if (0 == buf->baddr) {
722			dprintk(1,"qbuf: mmap requested but buffer addr is zero!\n");
723			goto done;
724		}
725		break;
726	case V4L2_MEMORY_USERPTR:
727		if (b->length < buf->bsize) {
728			dprintk(1,"qbuf: buffer length is not enough\n");
729			goto done;
730		}
731		if (STATE_NEEDS_INIT != buf->state && buf->baddr != b->m.userptr)
732			q->ops->buf_release(q,buf);
733		buf->baddr = b->m.userptr;
734		break;
735	case V4L2_MEMORY_OVERLAY:
736		buf->boff = b->m.offset;
737		break;
738	default:
739		dprintk(1,"qbuf: wrong memory type\n");
740		goto done;
741	}
742
743	dprintk(1,"qbuf: requesting next field\n");
744	field = videobuf_next_field(q);
745	retval = q->ops->buf_prepare(q,buf,field);
746	if (0 != retval) {
747		dprintk(1,"qbuf: buffer_prepare returned %d\n",retval);
748		goto done;
749	}
750
751	list_add_tail(&buf->stream,&q->stream);
752	if (q->streaming) {
753		if (q->irqlock)
754			spin_lock_irqsave(q->irqlock,flags);
755		q->ops->buf_queue(q,buf);
756		if (q->irqlock)
757			spin_unlock_irqrestore(q->irqlock,flags);
758	}
759	dprintk(1,"qbuf: succeded\n");
760	retval = 0;
761
762 done:
763	mutex_unlock(&q->lock);
764	return retval;
765}
766
767int
768videobuf_dqbuf(struct videobuf_queue *q,
769	       struct v4l2_buffer *b, int nonblocking)
770{
771	struct videobuf_buffer *buf;
772	int retval;
773
774	mutex_lock(&q->lock);
775	retval = -EBUSY;
776	if (q->reading) {
777		dprintk(1,"dqbuf: Reading running...\n");
778		goto done;
779	}
780	retval = -EINVAL;
781	if (b->type != q->type) {
782		dprintk(1,"dqbuf: Wrong type.\n");
783		goto done;
784	}
785	if (list_empty(&q->stream)) {
786		dprintk(1,"dqbuf: stream running\n");
787		goto done;
788	}
789	buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
790	retval = videobuf_waiton(buf, nonblocking, 1);
791	if (retval < 0) {
792		dprintk(1,"dqbuf: waiton returned %d\n",retval);
793		goto done;
794	}
795	switch (buf->state) {
796	case STATE_ERROR:
797		dprintk(1,"dqbuf: state is error\n");
798		retval = -EIO;
799		videobuf_dma_sync(q,&buf->dma);
800		buf->state = STATE_IDLE;
801		break;
802	case STATE_DONE:
803		dprintk(1,"dqbuf: state is done\n");
804		videobuf_dma_sync(q,&buf->dma);
805		buf->state = STATE_IDLE;
806		break;
807	default:
808		dprintk(1,"dqbuf: state invalid\n");
809		retval = -EINVAL;
810		goto done;
811	}
812	list_del(&buf->stream);
813	memset(b,0,sizeof(*b));
814	videobuf_status(b,buf,q->type);
815
816 done:
817	mutex_unlock(&q->lock);
818	return retval;
819}
820
821int videobuf_streamon(struct videobuf_queue *q)
822{
823	struct videobuf_buffer *buf;
824	struct list_head *list;
825	unsigned long flags=0;
826	int retval;
827
828	mutex_lock(&q->lock);
829	retval = -EBUSY;
830	if (q->reading)
831		goto done;
832	retval = 0;
833	if (q->streaming)
834		goto done;
835	q->streaming = 1;
836	if (q->irqlock)
837		spin_lock_irqsave(q->irqlock,flags);
838	list_for_each(list,&q->stream) {
839		buf = list_entry(list, struct videobuf_buffer, stream);
840		if (buf->state == STATE_PREPARED)
841			q->ops->buf_queue(q,buf);
842	}
843	if (q->irqlock)
844		spin_unlock_irqrestore(q->irqlock,flags);
845
846 done:
847	mutex_unlock(&q->lock);
848	return retval;
849}
850
851int videobuf_streamoff(struct videobuf_queue *q)
852{
853	int retval = -EINVAL;
854
855	mutex_lock(&q->lock);
856	if (!q->streaming)
857		goto done;
858	videobuf_queue_cancel(q);
859	q->streaming = 0;
860	retval = 0;
861
862 done:
863	mutex_unlock(&q->lock);
864	return retval;
865}
866
867static ssize_t
868videobuf_read_zerocopy(struct videobuf_queue *q, char __user *data,
869		       size_t count, loff_t *ppos)
870{
871	enum v4l2_field field;
872	unsigned long flags=0;
873	int retval;
874
875	/* setup stuff */
876	q->read_buf = videobuf_alloc(q->msize);
877	if (NULL == q->read_buf)
878		return -ENOMEM;
879
880	q->read_buf->memory = V4L2_MEMORY_USERPTR;
881	q->read_buf->baddr  = (unsigned long)data;
882	q->read_buf->bsize  = count;
883	field = videobuf_next_field(q);
884	retval = q->ops->buf_prepare(q,q->read_buf,field);
885	if (0 != retval)
886		goto done;
887
888	/* start capture & wait */
889	if (q->irqlock)
890		spin_lock_irqsave(q->irqlock,flags);
891	q->ops->buf_queue(q,q->read_buf);
892	if (q->irqlock)
893		spin_unlock_irqrestore(q->irqlock,flags);
894	retval = videobuf_waiton(q->read_buf,0,0);
895	if (0 == retval) {
896		videobuf_dma_sync(q,&q->read_buf->dma);
897		if (STATE_ERROR == q->read_buf->state)
898			retval = -EIO;
899		else
900			retval = q->read_buf->size;
901	}
902
903 done:
904	/* cleanup */
905	q->ops->buf_release(q,q->read_buf);
906	kfree(q->read_buf);
907	q->read_buf = NULL;
908	return retval;
909}
910
911ssize_t videobuf_read_one(struct videobuf_queue *q,
912			  char __user *data, size_t count, loff_t *ppos,
913			  int nonblocking)
914{
915	enum v4l2_field field;
916	unsigned long flags=0;
917	unsigned size, nbufs, bytes;
918	int retval;
919
920	mutex_lock(&q->lock);
921
922	nbufs = 1; size = 0;
923	q->ops->buf_setup(q,&nbufs,&size);
924	if (NULL == q->read_buf  &&
925	    count >= size        &&
926	    !nonblocking) {
927		retval = videobuf_read_zerocopy(q,data,count,ppos);
928		if (retval >= 0  ||  retval == -EIO)
929			/* ok, all done */
930			goto done;
931		/* fallback to kernel bounce buffer on failures */
932	}
933
934	if (NULL == q->read_buf) {
935		/* need to capture a new frame */
936		retval = -ENOMEM;
937		q->read_buf = videobuf_alloc(q->msize);
938		dprintk(1,"video alloc=0x%p\n", q->read_buf);
939		if (NULL == q->read_buf)
940			goto done;
941		q->read_buf->memory = V4L2_MEMORY_USERPTR;
942		q->read_buf->bsize = count; /* preferred size */
943		field = videobuf_next_field(q);
944		retval = q->ops->buf_prepare(q,q->read_buf,field);
945		if (0 != retval) {
946			kfree (q->read_buf);
947			q->read_buf = NULL;
948			goto done;
949		}
950		if (q->irqlock)
951			spin_lock_irqsave(q->irqlock,flags);
952		q->ops->buf_queue(q,q->read_buf);
953		if (q->irqlock)
954			spin_unlock_irqrestore(q->irqlock,flags);
955		q->read_off = 0;
956	}
957
958	/* wait until capture is done */
959	retval = videobuf_waiton(q->read_buf, nonblocking, 1);
960	if (0 != retval)
961		goto done;
962	videobuf_dma_sync(q,&q->read_buf->dma);
963
964	if (STATE_ERROR == q->read_buf->state) {
965		/* catch I/O errors */
966		q->ops->buf_release(q,q->read_buf);
967		kfree(q->read_buf);
968		q->read_buf = NULL;
969		retval = -EIO;
970		goto done;
971	}
972
973	/* copy to userspace */
974	bytes = count;
975	if (bytes > q->read_buf->size - q->read_off)
976		bytes = q->read_buf->size - q->read_off;
977	retval = -EFAULT;
978	if (copy_to_user(data, q->read_buf->dma.vmalloc+q->read_off, bytes))
979		goto done;
980
981	retval = bytes;
982	q->read_off += bytes;
983	if (q->read_off == q->read_buf->size) {
984		/* all data copied, cleanup */
985		q->ops->buf_release(q,q->read_buf);
986		kfree(q->read_buf);
987		q->read_buf = NULL;
988	}
989
990 done:
991	mutex_unlock(&q->lock);
992	return retval;
993}
994
995int videobuf_read_start(struct videobuf_queue *q)
996{
997	enum v4l2_field field;
998	unsigned long flags=0;
999	int count = 0, size = 0;
1000	int err, i;
1001
1002	q->ops->buf_setup(q,&count,&size);
1003	if (count < 2)
1004		count = 2;
1005	if (count > VIDEO_MAX_FRAME)
1006		count = VIDEO_MAX_FRAME;
1007	size = PAGE_ALIGN(size);
1008
1009	err = videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
1010	if (err)
1011		return err;
1012	for (i = 0; i < count; i++) {
1013		field = videobuf_next_field(q);
1014		err = q->ops->buf_prepare(q,q->bufs[i],field);
1015		if (err)
1016			return err;
1017		list_add_tail(&q->bufs[i]->stream, &q->stream);
1018	}
1019	if (q->irqlock)
1020		spin_lock_irqsave(q->irqlock,flags);
1021	for (i = 0; i < count; i++)
1022		q->ops->buf_queue(q,q->bufs[i]);
1023	if (q->irqlock)
1024		spin_unlock_irqrestore(q->irqlock,flags);
1025	q->reading = 1;
1026	return 0;
1027}
1028
1029void videobuf_read_stop(struct videobuf_queue *q)
1030{
1031	int i;
1032
1033	videobuf_queue_cancel(q);
1034	videobuf_mmap_free(q);
1035	INIT_LIST_HEAD(&q->stream);
1036	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1037		if (NULL == q->bufs[i])
1038			continue;
1039		kfree(q->bufs[i]);
1040		q->bufs[i] = NULL;
1041	}
1042	q->read_buf = NULL;
1043	q->reading  = 0;
1044}
1045
1046ssize_t videobuf_read_stream(struct videobuf_queue *q,
1047			     char __user *data, size_t count, loff_t *ppos,
1048			     int vbihack, int nonblocking)
1049{
1050	unsigned int *fc, bytes;
1051	int err, retval;
1052	unsigned long flags=0;
1053
1054	dprintk(2,"%s\n",__FUNCTION__);
1055	mutex_lock(&q->lock);
1056	retval = -EBUSY;
1057	if (q->streaming)
1058		goto done;
1059	if (!q->reading) {
1060		retval = videobuf_read_start(q);
1061		if (retval < 0)
1062			goto done;
1063	}
1064
1065	retval = 0;
1066	while (count > 0) {
1067		/* get / wait for data */
1068		if (NULL == q->read_buf) {
1069			q->read_buf = list_entry(q->stream.next,
1070						 struct videobuf_buffer,
1071						 stream);
1072			list_del(&q->read_buf->stream);
1073			q->read_off = 0;
1074		}
1075		err = videobuf_waiton(q->read_buf, nonblocking, 1);
1076		if (err < 0) {
1077			if (0 == retval)
1078				retval = err;
1079			break;
1080		}
1081
1082		if (q->read_buf->state == STATE_DONE) {
1083			if (vbihack) {
1084				/* dirty, undocumented hack -- pass the frame counter
1085				 * within the last four bytes of each vbi data block.
1086				 * We need that one to maintain backward compatibility
1087				 * to all vbi decoding software out there ... */
1088				fc  = (unsigned int*)q->read_buf->dma.vmalloc;
1089				fc += (q->read_buf->size>>2) -1;
1090				*fc = q->read_buf->field_count >> 1;
1091				dprintk(1,"vbihack: %d\n",*fc);
1092			}
1093
1094			/* copy stuff */
1095			bytes = count;
1096			if (bytes > q->read_buf->size - q->read_off)
1097				bytes = q->read_buf->size - q->read_off;
1098			if (copy_to_user(data + retval,
1099					 q->read_buf->dma.vmalloc + q->read_off,
1100					 bytes)) {
1101				if (0 == retval)
1102					retval = -EFAULT;
1103				break;
1104			}
1105			count       -= bytes;
1106			retval      += bytes;
1107			q->read_off += bytes;
1108		} else {
1109			/* some error */
1110			q->read_off = q->read_buf->size;
1111			if (0 == retval)
1112				retval = -EIO;
1113		}
1114
1115		/* requeue buffer when done with copying */
1116		if (q->read_off == q->read_buf->size) {
1117			list_add_tail(&q->read_buf->stream,
1118				      &q->stream);
1119			if (q->irqlock)
1120				spin_lock_irqsave(q->irqlock,flags);
1121			q->ops->buf_queue(q,q->read_buf);
1122			if (q->irqlock)
1123				spin_unlock_irqrestore(q->irqlock,flags);
1124			q->read_buf = NULL;
1125		}
1126		if (retval < 0)
1127			break;
1128	}
1129
1130 done:
1131	mutex_unlock(&q->lock);
1132	return retval;
1133}
1134
1135unsigned int videobuf_poll_stream(struct file *file,
1136				  struct videobuf_queue *q,
1137				  poll_table *wait)
1138{
1139	struct videobuf_buffer *buf = NULL;
1140	unsigned int rc = 0;
1141
1142	mutex_lock(&q->lock);
1143	if (q->streaming) {
1144		if (!list_empty(&q->stream))
1145			buf = list_entry(q->stream.next,
1146					 struct videobuf_buffer, stream);
1147	} else {
1148		if (!q->reading)
1149			videobuf_read_start(q);
1150		if (!q->reading) {
1151			rc = POLLERR;
1152		} else if (NULL == q->read_buf) {
1153			q->read_buf = list_entry(q->stream.next,
1154						 struct videobuf_buffer,
1155						 stream);
1156			list_del(&q->read_buf->stream);
1157			q->read_off = 0;
1158		}
1159		buf = q->read_buf;
1160	}
1161	if (!buf)
1162		rc = POLLERR;
1163
1164	if (0 == rc) {
1165		poll_wait(file, &buf->done, wait);
1166		if (buf->state == STATE_DONE ||
1167		    buf->state == STATE_ERROR)
1168			rc = POLLIN|POLLRDNORM;
1169	}
1170	mutex_unlock(&q->lock);
1171	return rc;
1172}
1173
1174/* --------------------------------------------------------------------- */
1175
1176static void
1177videobuf_vm_open(struct vm_area_struct *vma)
1178{
1179	struct videobuf_mapping *map = vma->vm_private_data;
1180
1181	dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
1182		map->count,vma->vm_start,vma->vm_end);
1183	map->count++;
1184}
1185
1186static void
1187videobuf_vm_close(struct vm_area_struct *vma)
1188{
1189	struct videobuf_mapping *map = vma->vm_private_data;
1190	struct videobuf_queue *q = map->q;
1191	int i;
1192
1193	dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
1194		map->count,vma->vm_start,vma->vm_end);
1195
1196	map->count--;
1197	if (0 == map->count) {
1198		dprintk(1,"munmap %p q=%p\n",map,q);
1199		mutex_lock(&q->lock);
1200		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1201			if (NULL == q->bufs[i])
1202				continue;
1203			if (q->bufs[i])
1204				;
1205			if (q->bufs[i]->map != map)
1206				continue;
1207			q->bufs[i]->map   = NULL;
1208			q->bufs[i]->baddr = 0;
1209			q->ops->buf_release(q,q->bufs[i]);
1210		}
1211		mutex_unlock(&q->lock);
1212		kfree(map);
1213	}
1214	return;
1215}
1216
1217/*
1218 * Get a anonymous page for the mapping.  Make sure we can DMA to that
1219 * memory location with 32bit PCI devices (i.e. don't use highmem for
1220 * now ...).  Bounce buffers don't work very well for the data rates
1221 * video capture has.
1222 */
1223static struct page*
1224videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr,
1225		   int *type)
1226{
1227	struct page *page;
1228
1229	dprintk(3,"nopage: fault @ %08lx [vma %08lx-%08lx]\n",
1230		vaddr,vma->vm_start,vma->vm_end);
1231	if (vaddr > vma->vm_end)
1232		return NOPAGE_SIGBUS;
1233	page = alloc_page(GFP_USER | __GFP_DMA32);
1234	if (!page)
1235		return NOPAGE_OOM;
1236	clear_user_page(page_address(page), vaddr, page);
1237	if (type)
1238		*type = VM_FAULT_MINOR;
1239	return page;
1240}
1241
1242static struct vm_operations_struct videobuf_vm_ops =
1243{
1244	.open     = videobuf_vm_open,
1245	.close    = videobuf_vm_close,
1246	.nopage   = videobuf_vm_nopage,
1247};
1248
1249int videobuf_mmap_setup(struct videobuf_queue *q,
1250			unsigned int bcount, unsigned int bsize,
1251			enum v4l2_memory memory)
1252{
1253	unsigned int i;
1254	int err;
1255
1256	err = videobuf_mmap_free(q);
1257	if (0 != err)
1258		return err;
1259
1260	for (i = 0; i < bcount; i++) {
1261		q->bufs[i] = videobuf_alloc(q->msize);
1262		q->bufs[i]->i      = i;
1263		q->bufs[i]->input  = UNSET;
1264		q->bufs[i]->memory = memory;
1265		q->bufs[i]->bsize  = bsize;
1266		switch (memory) {
1267		case V4L2_MEMORY_MMAP:
1268			q->bufs[i]->boff  = bsize * i;
1269			break;
1270		case V4L2_MEMORY_USERPTR:
1271		case V4L2_MEMORY_OVERLAY:
1272			/* nothing */
1273			break;
1274		}
1275	}
1276	dprintk(1,"mmap setup: %d buffers, %d bytes each\n",
1277		bcount,bsize);
1278	return 0;
1279}
1280
1281int videobuf_mmap_free(struct videobuf_queue *q)
1282{
1283	int i;
1284
1285	for (i = 0; i < VIDEO_MAX_FRAME; i++)
1286		if (q->bufs[i] && q->bufs[i]->map)
1287			return -EBUSY;
1288	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1289		if (NULL == q->bufs[i])
1290			continue;
1291		q->ops->buf_release(q,q->bufs[i]);
1292		kfree(q->bufs[i]);
1293		q->bufs[i] = NULL;
1294	}
1295	return 0;
1296}
1297
1298int videobuf_mmap_mapper(struct videobuf_queue *q,
1299			 struct vm_area_struct *vma)
1300{
1301	struct videobuf_mapping *map;
1302	unsigned int first,last,size,i;
1303	int retval;
1304
1305	mutex_lock(&q->lock);
1306	retval = -EINVAL;
1307	if (!(vma->vm_flags & VM_WRITE)) {
1308		dprintk(1,"mmap app bug: PROT_WRITE please\n");
1309		goto done;
1310	}
1311	if (!(vma->vm_flags & VM_SHARED)) {
1312		dprintk(1,"mmap app bug: MAP_SHARED please\n");
1313		goto done;
1314	}
1315
1316	/* look for first buffer to map */
1317	for (first = 0; first < VIDEO_MAX_FRAME; first++) {
1318		if (NULL == q->bufs[first])
1319			continue;
1320		if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
1321			continue;
1322		if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
1323			break;
1324	}
1325	if (VIDEO_MAX_FRAME == first) {
1326		dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
1327			(vma->vm_pgoff << PAGE_SHIFT));
1328		goto done;
1329	}
1330
1331	/* look for last buffer to map */
1332	for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
1333		if (NULL == q->bufs[last])
1334			continue;
1335		if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
1336			continue;
1337		if (q->bufs[last]->map) {
1338			retval = -EBUSY;
1339			goto done;
1340		}
1341		size += q->bufs[last]->bsize;
1342		if (size == (vma->vm_end - vma->vm_start))
1343			break;
1344	}
1345	if (VIDEO_MAX_FRAME == last) {
1346		dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n",
1347			(vma->vm_end - vma->vm_start));
1348		goto done;
1349	}
1350
1351	/* create mapping + update buffer list */
1352	retval = -ENOMEM;
1353	map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL);
1354	if (NULL == map)
1355		goto done;
1356	for (size = 0, i = first; i <= last; size += q->bufs[i++]->bsize) {
1357		q->bufs[i]->map   = map;
1358		q->bufs[i]->baddr = vma->vm_start + size;
1359	}
1360	map->count    = 1;
1361	map->start    = vma->vm_start;
1362	map->end      = vma->vm_end;
1363	map->q        = q;
1364	vma->vm_ops   = &videobuf_vm_ops;
1365	vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
1366	vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
1367	vma->vm_private_data = map;
1368	dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
1369		map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last);
1370	retval = 0;
1371
1372 done:
1373	mutex_unlock(&q->lock);
1374	return retval;
1375}
1376
1377/* --------------------------------------------------------------------- */
1378
1379EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
1380
1381EXPORT_SYMBOL_GPL(videobuf_dma_init);
1382EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
1383EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
1384EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
1385EXPORT_SYMBOL_GPL(videobuf_dma_map);
1386EXPORT_SYMBOL_GPL(videobuf_dma_sync);
1387EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
1388EXPORT_SYMBOL_GPL(videobuf_dma_free);
1389
1390EXPORT_SYMBOL_GPL(videobuf_pci_dma_map);
1391EXPORT_SYMBOL_GPL(videobuf_pci_dma_unmap);
1392
1393EXPORT_SYMBOL_GPL(videobuf_alloc);
1394EXPORT_SYMBOL_GPL(videobuf_waiton);
1395EXPORT_SYMBOL_GPL(videobuf_iolock);
1396
1397EXPORT_SYMBOL_GPL(videobuf_queue_init);
1398EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
1399EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
1400
1401EXPORT_SYMBOL_GPL(videobuf_next_field);
1402EXPORT_SYMBOL_GPL(videobuf_status);
1403EXPORT_SYMBOL_GPL(videobuf_reqbufs);
1404EXPORT_SYMBOL_GPL(videobuf_querybuf);
1405EXPORT_SYMBOL_GPL(videobuf_qbuf);
1406EXPORT_SYMBOL_GPL(videobuf_dqbuf);
1407EXPORT_SYMBOL_GPL(videobuf_streamon);
1408EXPORT_SYMBOL_GPL(videobuf_streamoff);
1409
1410EXPORT_SYMBOL_GPL(videobuf_read_start);
1411EXPORT_SYMBOL_GPL(videobuf_read_stop);
1412EXPORT_SYMBOL_GPL(videobuf_read_stream);
1413EXPORT_SYMBOL_GPL(videobuf_read_one);
1414EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1415
1416EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
1417EXPORT_SYMBOL_GPL(videobuf_mmap_free);
1418EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
1419
1420/*
1421 * Local variables:
1422 * c-basic-offset: 8
1423 * End:
1424 */
1425