• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/media/video/ivtv/
1/* interrupt handling
2    Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3    Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4    Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
27#include "ivtv-yuv.h"
28#include <media/v4l2-event.h>
29
30#define DMA_MAGIC_COOKIE 0x000001fe
31
32static void ivtv_dma_dec_start(struct ivtv_stream *s);
33
34static const int ivtv_stream_map[] = {
35	IVTV_ENC_STREAM_TYPE_MPG,
36	IVTV_ENC_STREAM_TYPE_YUV,
37	IVTV_ENC_STREAM_TYPE_PCM,
38	IVTV_ENC_STREAM_TYPE_VBI,
39};
40
41
42static void ivtv_pio_work_handler(struct ivtv *itv)
43{
44	struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
45	struct ivtv_buffer *buf;
46	int i = 0;
47
48	IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50			s->vdev == NULL || !ivtv_use_pio(s)) {
51		itv->cur_pio_stream = -1;
52		/* trigger PIO complete user interrupt */
53		write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
54		return;
55	}
56	IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
57	list_for_each_entry(buf, &s->q_dma.list, list) {
58		u32 size = s->sg_processing[i].size & 0x3ffff;
59
60		/* Copy the data from the card to the buffer */
61		if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
62			memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
63		}
64		else {
65			memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
66		}
67		i++;
68		if (i == s->sg_processing_size)
69			break;
70	}
71	write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
72}
73
74void ivtv_irq_work_handler(struct kthread_work *work)
75{
76	struct ivtv *itv = container_of(work, struct ivtv, irq_work);
77
78	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
79		ivtv_pio_work_handler(itv);
80
81	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
82		ivtv_vbi_work_handler(itv);
83
84	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
85		ivtv_yuv_work_handler(itv);
86}
87
88/* Determine the required DMA size, setup enough buffers in the predma queue and
89   actually copy the data from the card to the buffers in case a PIO transfer is
90   required for this stream.
91 */
92static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
93{
94	struct ivtv *itv = s->itv;
95	struct ivtv_buffer *buf;
96	u32 bytes_needed = 0;
97	u32 offset, size;
98	u32 UVoffset = 0, UVsize = 0;
99	int skip_bufs = s->q_predma.buffers;
100	int idx = s->sg_pending_size;
101	int rc;
102
103	/* sanity checks */
104	if (s->vdev == NULL) {
105		IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
106		return -1;
107	}
108	if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
109		IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
110		return -1;
111	}
112
113	/* determine offset, size and PTS for the various streams */
114	switch (s->type) {
115		case IVTV_ENC_STREAM_TYPE_MPG:
116			offset = data[1];
117			size = data[2];
118			s->pending_pts = 0;
119			break;
120
121		case IVTV_ENC_STREAM_TYPE_YUV:
122			offset = data[1];
123			size = data[2];
124			UVoffset = data[3];
125			UVsize = data[4];
126			s->pending_pts = ((u64) data[5] << 32) | data[6];
127			break;
128
129		case IVTV_ENC_STREAM_TYPE_PCM:
130			offset = data[1] + 12;
131			size = data[2] - 12;
132			s->pending_pts = read_dec(offset - 8) |
133				((u64)(read_dec(offset - 12)) << 32);
134			if (itv->has_cx23415)
135				offset += IVTV_DECODER_OFFSET;
136			break;
137
138		case IVTV_ENC_STREAM_TYPE_VBI:
139			size = itv->vbi.enc_size * itv->vbi.fpi;
140			offset = read_enc(itv->vbi.enc_start - 4) + 12;
141			if (offset == 12) {
142				IVTV_DEBUG_INFO("VBI offset == 0\n");
143				return -1;
144			}
145			s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
146			break;
147
148		case IVTV_DEC_STREAM_TYPE_VBI:
149			size = read_dec(itv->vbi.dec_start + 4) + 8;
150			offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
151			s->pending_pts = 0;
152			offset += IVTV_DECODER_OFFSET;
153			break;
154		default:
155			/* shouldn't happen */
156			return -1;
157	}
158
159	/* if this is the start of the DMA then fill in the magic cookie */
160	if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
161		if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
162		    s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
163			s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
164			write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
165		}
166		else {
167			s->pending_backup = read_enc(offset);
168			write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
169		}
170		s->pending_offset = offset;
171	}
172
173	bytes_needed = size;
174	if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
175		/* The size for the Y samples needs to be rounded upwards to a
176		   multiple of the buf_size. The UV samples then start in the
177		   next buffer. */
178		bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
179		bytes_needed += UVsize;
180	}
181
182	IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
183		ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
184
185	rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
186	if (rc < 0) { /* Insufficient buffers */
187		IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
188				bytes_needed, s->name);
189		return -1;
190	}
191	if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
192		IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
193		IVTV_WARN("Cause: the application is not reading fast enough.\n");
194	}
195	s->buffers_stolen = rc;
196
197	/* got the buffers, now fill in sg_pending */
198	buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
199	memset(buf->buf, 0, 128);
200	list_for_each_entry(buf, &s->q_predma.list, list) {
201		if (skip_bufs-- > 0)
202			continue;
203		s->sg_pending[idx].dst = buf->dma_handle;
204		s->sg_pending[idx].src = offset;
205		s->sg_pending[idx].size = s->buf_size;
206		buf->bytesused = min(size, s->buf_size);
207		buf->dma_xfer_cnt = s->dma_xfer_cnt;
208
209		s->q_predma.bytesused += buf->bytesused;
210		size -= buf->bytesused;
211		offset += s->buf_size;
212
213		/* Sync SG buffers */
214		ivtv_buf_sync_for_device(s, buf);
215
216		if (size == 0) {	/* YUV */
217			/* process the UV section */
218			offset = UVoffset;
219			size = UVsize;
220		}
221		idx++;
222	}
223	s->sg_pending_size = idx;
224	return 0;
225}
226
227static void dma_post(struct ivtv_stream *s)
228{
229	struct ivtv *itv = s->itv;
230	struct ivtv_buffer *buf = NULL;
231	struct list_head *p;
232	u32 offset;
233	__le32 *u32buf;
234	int x = 0;
235
236	IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
237			s->name, s->dma_offset);
238	list_for_each(p, &s->q_dma.list) {
239		buf = list_entry(p, struct ivtv_buffer, list);
240		u32buf = (__le32 *)buf->buf;
241
242		/* Sync Buffer */
243		ivtv_buf_sync_for_cpu(s, buf);
244
245		if (x == 0 && ivtv_use_dma(s)) {
246			offset = s->dma_last_offset;
247			if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
248			{
249				for (offset = 0; offset < 64; offset++) {
250					if (u32buf[offset] == DMA_MAGIC_COOKIE) {
251						break;
252					}
253				}
254				offset *= 4;
255				if (offset == 256) {
256					IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
257					offset = s->dma_last_offset;
258				}
259				if (s->dma_last_offset != offset)
260					IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
261				s->dma_last_offset = offset;
262			}
263			if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
264						s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
265				write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
266			}
267			else {
268				write_enc_sync(0, s->dma_offset);
269			}
270			if (offset) {
271				buf->bytesused -= offset;
272				memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
273			}
274			*u32buf = cpu_to_le32(s->dma_backup);
275		}
276		x++;
277		/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
278		if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
279		    s->type == IVTV_ENC_STREAM_TYPE_VBI)
280			buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
281	}
282	if (buf)
283		buf->bytesused += s->dma_last_offset;
284	if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
285		list_for_each_entry(buf, &s->q_dma.list, list) {
286			/* Parse and Groom VBI Data */
287			s->q_dma.bytesused -= buf->bytesused;
288			ivtv_process_vbi_data(itv, buf, 0, s->type);
289			s->q_dma.bytesused += buf->bytesused;
290		}
291		if (s->id == -1) {
292			ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
293			return;
294		}
295	}
296	ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
297	if (s->id != -1)
298		wake_up(&s->waitq);
299}
300
301void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
302{
303	struct ivtv *itv = s->itv;
304	struct yuv_playback_info *yi = &itv->yuv_info;
305	u8 frame = yi->draw_frame;
306	struct yuv_frame_info *f = &yi->new_frame_info[frame];
307	struct ivtv_buffer *buf;
308	u32 y_size = 720 * ((f->src_h + 31) & ~31);
309	u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
310	int y_done = 0;
311	int bytes_written = 0;
312	unsigned long flags = 0;
313	int idx = 0;
314
315	IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
316
317	/* Insert buffer block for YUV if needed */
318	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
319		if (yi->blanking_dmaptr) {
320			s->sg_pending[idx].src = yi->blanking_dmaptr;
321			s->sg_pending[idx].dst = offset;
322			s->sg_pending[idx].size = 720 * 16;
323		}
324		offset += 720 * 16;
325		idx++;
326	}
327
328	list_for_each_entry(buf, &s->q_predma.list, list) {
329		/* YUV UV Offset from Y Buffer */
330		if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
331				(bytes_written + buf->bytesused) >= y_size) {
332			s->sg_pending[idx].src = buf->dma_handle;
333			s->sg_pending[idx].dst = offset;
334			s->sg_pending[idx].size = y_size - bytes_written;
335			offset = uv_offset;
336			if (s->sg_pending[idx].size != buf->bytesused) {
337				idx++;
338				s->sg_pending[idx].src =
339				  buf->dma_handle + s->sg_pending[idx - 1].size;
340				s->sg_pending[idx].dst = offset;
341				s->sg_pending[idx].size =
342				   buf->bytesused - s->sg_pending[idx - 1].size;
343				offset += s->sg_pending[idx].size;
344			}
345			y_done = 1;
346		} else {
347			s->sg_pending[idx].src = buf->dma_handle;
348			s->sg_pending[idx].dst = offset;
349			s->sg_pending[idx].size = buf->bytesused;
350			offset += buf->bytesused;
351		}
352		bytes_written += buf->bytesused;
353
354		/* Sync SG buffers */
355		ivtv_buf_sync_for_device(s, buf);
356		idx++;
357	}
358	s->sg_pending_size = idx;
359
360	/* Sync Hardware SG List of buffers */
361	ivtv_stream_sync_for_device(s);
362	if (lock)
363		spin_lock_irqsave(&itv->dma_reg_lock, flags);
364	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
365		ivtv_dma_dec_start(s);
366	}
367	else {
368		set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
369	}
370	if (lock)
371		spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
372}
373
374static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
375{
376	struct ivtv *itv = s->itv;
377
378	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
379	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
380	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
381	s->sg_processed++;
382	/* Sync Hardware SG List of buffers */
383	ivtv_stream_sync_for_device(s);
384	write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
385	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
386	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
387	add_timer(&itv->dma_timer);
388}
389
390static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
391{
392	struct ivtv *itv = s->itv;
393
394	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
395	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
396	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
397	s->sg_processed++;
398	/* Sync Hardware SG List of buffers */
399	ivtv_stream_sync_for_device(s);
400	write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
401	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
402	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
403	add_timer(&itv->dma_timer);
404}
405
406/* start the encoder DMA */
407static void ivtv_dma_enc_start(struct ivtv_stream *s)
408{
409	struct ivtv *itv = s->itv;
410	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
411	int i;
412
413	IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
414
415	if (s->q_predma.bytesused)
416		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
417
418	if (ivtv_use_dma(s))
419		s->sg_pending[s->sg_pending_size - 1].size += 256;
420
421	/* If this is an MPEG stream, and VBI data is also pending, then append the
422	   VBI DMA to the MPEG DMA and transfer both sets of data at once.
423
424	   VBI DMA is a second class citizen compared to MPEG and mixing them together
425	   will confuse the firmware (the end of a VBI DMA is seen as the end of a
426	   MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
427	   sure we only use the MPEG DMA to transfer the VBI DMA if both are in
428	   use. This way no conflicts occur. */
429	clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
430	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
431			s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
432		ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
433		if (ivtv_use_dma(s_vbi))
434			s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
435		for (i = 0; i < s_vbi->sg_pending_size; i++) {
436			s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
437		}
438		s_vbi->dma_offset = s_vbi->pending_offset;
439		s_vbi->sg_pending_size = 0;
440		s_vbi->dma_xfer_cnt++;
441		set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
442		IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
443	}
444
445	s->dma_xfer_cnt++;
446	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
447	s->sg_processing_size = s->sg_pending_size;
448	s->sg_pending_size = 0;
449	s->sg_processed = 0;
450	s->dma_offset = s->pending_offset;
451	s->dma_backup = s->pending_backup;
452	s->dma_pts = s->pending_pts;
453
454	if (ivtv_use_pio(s)) {
455		set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
456		set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
457		set_bit(IVTV_F_I_PIO, &itv->i_flags);
458		itv->cur_pio_stream = s->type;
459	}
460	else {
461		itv->dma_retries = 0;
462		ivtv_dma_enc_start_xfer(s);
463		set_bit(IVTV_F_I_DMA, &itv->i_flags);
464		itv->cur_dma_stream = s->type;
465	}
466}
467
468static void ivtv_dma_dec_start(struct ivtv_stream *s)
469{
470	struct ivtv *itv = s->itv;
471
472	if (s->q_predma.bytesused)
473		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
474	s->dma_xfer_cnt++;
475	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
476	s->sg_processing_size = s->sg_pending_size;
477	s->sg_pending_size = 0;
478	s->sg_processed = 0;
479
480	IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
481	itv->dma_retries = 0;
482	ivtv_dma_dec_start_xfer(s);
483	set_bit(IVTV_F_I_DMA, &itv->i_flags);
484	itv->cur_dma_stream = s->type;
485}
486
487static void ivtv_irq_dma_read(struct ivtv *itv)
488{
489	struct ivtv_stream *s = NULL;
490	struct ivtv_buffer *buf;
491	int hw_stream_type = 0;
492
493	IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
494
495	del_timer(&itv->dma_timer);
496
497	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
498		return;
499
500	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
501		s = &itv->streams[itv->cur_dma_stream];
502		ivtv_stream_sync_for_cpu(s);
503
504		if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
505			IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
506					read_reg(IVTV_REG_DMASTATUS),
507					s->sg_processed, s->sg_processing_size, itv->dma_retries);
508			write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
509			if (itv->dma_retries == 3) {
510				/* Too many retries, give up on this frame */
511				itv->dma_retries = 0;
512				s->sg_processed = s->sg_processing_size;
513			}
514			else {
515				/* Retry, starting with the first xfer segment.
516				   Just retrying the current segment is not sufficient. */
517				s->sg_processed = 0;
518				itv->dma_retries++;
519			}
520		}
521		if (s->sg_processed < s->sg_processing_size) {
522			/* DMA next buffer */
523			ivtv_dma_dec_start_xfer(s);
524			return;
525		}
526		if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
527			hw_stream_type = 2;
528		IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
529
530		/* For some reason must kick the firmware, like PIO mode,
531		   I think this tells the firmware we are done and the size
532		   of the xfer so it can calculate what we need next.
533		   I think we can do this part ourselves but would have to
534		   fully calculate xfer info ourselves and not use interrupts
535		 */
536		ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
537				hw_stream_type);
538
539		/* Free last DMA call */
540		while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
541			ivtv_buf_sync_for_cpu(s, buf);
542			ivtv_enqueue(s, buf, &s->q_free);
543		}
544		wake_up(&s->waitq);
545	}
546	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
547	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
548	itv->cur_dma_stream = -1;
549	wake_up(&itv->dma_waitq);
550}
551
552static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
553{
554	u32 data[CX2341X_MBOX_MAX_DATA];
555	struct ivtv_stream *s;
556
557	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
558	IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
559
560	del_timer(&itv->dma_timer);
561
562	if (itv->cur_dma_stream < 0)
563		return;
564
565	s = &itv->streams[itv->cur_dma_stream];
566	ivtv_stream_sync_for_cpu(s);
567
568	if (data[0] & 0x18) {
569		IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
570			s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
571		write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
572		if (itv->dma_retries == 3) {
573			/* Too many retries, give up on this frame */
574			itv->dma_retries = 0;
575			s->sg_processed = s->sg_processing_size;
576		}
577		else {
578			/* Retry, starting with the first xfer segment.
579			   Just retrying the current segment is not sufficient. */
580			s->sg_processed = 0;
581			itv->dma_retries++;
582		}
583	}
584	if (s->sg_processed < s->sg_processing_size) {
585		/* DMA next buffer */
586		ivtv_dma_enc_start_xfer(s);
587		return;
588	}
589	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590	itv->cur_dma_stream = -1;
591	dma_post(s);
592	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
593		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
594		dma_post(s);
595	}
596	s->sg_processing_size = 0;
597	s->sg_processed = 0;
598	wake_up(&itv->dma_waitq);
599}
600
601static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
602{
603	struct ivtv_stream *s;
604
605	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
606		itv->cur_pio_stream = -1;
607		return;
608	}
609	s = &itv->streams[itv->cur_pio_stream];
610	IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
611	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
612	itv->cur_pio_stream = -1;
613	dma_post(s);
614	if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
615		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
616	else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
617		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
618	else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
619		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
620	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
621	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
622		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
623		dma_post(s);
624	}
625	wake_up(&itv->dma_waitq);
626}
627
628static void ivtv_irq_dma_err(struct ivtv *itv)
629{
630	u32 data[CX2341X_MBOX_MAX_DATA];
631
632	del_timer(&itv->dma_timer);
633	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
634	IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
635				read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
636	write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
637	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
638	    itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
639		struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
640
641		/* retry */
642		if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
643			ivtv_dma_dec_start(s);
644		else
645			ivtv_dma_enc_start(s);
646		return;
647	}
648	if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
649		ivtv_udma_start(itv);
650		return;
651	}
652	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
653	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
654	itv->cur_dma_stream = -1;
655	wake_up(&itv->dma_waitq);
656}
657
658static void ivtv_irq_enc_start_cap(struct ivtv *itv)
659{
660	u32 data[CX2341X_MBOX_MAX_DATA];
661	struct ivtv_stream *s;
662
663	/* Get DMA destination and size arguments from card */
664	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
665	IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
666
667	if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
668		IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
669				data[0], data[1], data[2]);
670		return;
671	}
672	s = &itv->streams[ivtv_stream_map[data[0]]];
673	if (!stream_enc_dma_append(s, data)) {
674		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
675	}
676}
677
678static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
679{
680	u32 data[CX2341X_MBOX_MAX_DATA];
681	struct ivtv_stream *s;
682
683	IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
684	s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
685
686	if (!stream_enc_dma_append(s, data))
687		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
688}
689
690static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
691{
692	u32 data[CX2341X_MBOX_MAX_DATA];
693	struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
694
695	IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
696	if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
697			!stream_enc_dma_append(s, data)) {
698		set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
699	}
700}
701
702static void ivtv_irq_dec_data_req(struct ivtv *itv)
703{
704	u32 data[CX2341X_MBOX_MAX_DATA];
705	struct ivtv_stream *s;
706
707	/* YUV or MPG */
708
709	if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
710		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
711		itv->dma_data_req_size =
712				 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
713		itv->dma_data_req_offset = data[1];
714		if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
715			ivtv_yuv_frame_complete(itv);
716		s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
717	}
718	else {
719		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
720		itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
721		itv->dma_data_req_offset = data[1];
722		s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
723	}
724	IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
725		       itv->dma_data_req_offset, itv->dma_data_req_size);
726	if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
727		set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
728	}
729	else {
730		if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
731			ivtv_yuv_setup_stream_frame(itv);
732		clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733		ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
734		ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
735	}
736}
737
738static void ivtv_irq_vsync(struct ivtv *itv)
739{
740	/* The vsync interrupt is unusual in that it won't clear until
741	 * the end of the first line for the current field, at which
742	 * point it clears itself. This can result in repeated vsync
743	 * interrupts, or a missed vsync. Read some of the registers
744	 * to determine the line being displayed and ensure we handle
745	 * one vsync per frame.
746	 */
747	unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
748	struct yuv_playback_info *yi = &itv->yuv_info;
749	int last_dma_frame = atomic_read(&yi->next_dma_frame);
750	struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
751
752	if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
753
754	if (((frame ^ f->sync_field) == 0 &&
755		((itv->last_vsync_field & 1) ^ f->sync_field)) ||
756			(frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
757		int next_dma_frame = last_dma_frame;
758
759		if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
760			if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
761				write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
762				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
763				write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
764				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
765				next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
766				atomic_set(&yi->next_dma_frame, next_dma_frame);
767				yi->fields_lapsed = -1;
768				yi->running = 1;
769			}
770		}
771	}
772	if (frame != (itv->last_vsync_field & 1)) {
773		static const struct v4l2_event evtop = {
774			.type = V4L2_EVENT_VSYNC,
775			.u.vsync.field = V4L2_FIELD_TOP,
776		};
777		static const struct v4l2_event evbottom = {
778			.type = V4L2_EVENT_VSYNC,
779			.u.vsync.field = V4L2_FIELD_BOTTOM,
780		};
781		struct ivtv_stream *s = ivtv_get_output_stream(itv);
782
783		itv->last_vsync_field += 1;
784		if (frame == 0) {
785			clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
786			clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
787		}
788		else {
789			set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
790		}
791		if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
792			set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
793			wake_up(&itv->event_waitq);
794			if (s)
795				wake_up(&s->waitq);
796		}
797		if (s && s->vdev)
798			v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
799		wake_up(&itv->vsync_waitq);
800
801		/* Send VBI to saa7127 */
802		if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
803			test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
804			test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
805			test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
806			set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
807			set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
808		}
809
810		/* Check if we need to update the yuv registers */
811		if (yi->running && (yi->yuv_forced_update || f->update)) {
812			if (!f->update) {
813				last_dma_frame =
814					(u8)(atomic_read(&yi->next_dma_frame) -
815						 1) % IVTV_YUV_BUFFERS;
816				f = &yi->new_frame_info[last_dma_frame];
817			}
818
819			if (f->src_w) {
820				yi->update_frame = last_dma_frame;
821				f->update = 0;
822				yi->yuv_forced_update = 0;
823				set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
824				set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
825			}
826		}
827
828		yi->fields_lapsed++;
829	}
830}
831
832#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
833
834irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
835{
836	struct ivtv *itv = (struct ivtv *)dev_id;
837	u32 combo;
838	u32 stat;
839	int i;
840	u8 vsync_force = 0;
841
842	spin_lock(&itv->dma_reg_lock);
843	/* get contents of irq status register */
844	stat = read_reg(IVTV_REG_IRQSTATUS);
845
846	combo = ~itv->irqmask & stat;
847
848	/* Clear out IRQ */
849	if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
850
851	if (0 == combo) {
852		/* The vsync interrupt is unusual and clears itself. If we
853		 * took too long, we may have missed it. Do some checks
854		 */
855		if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
856			/* vsync is enabled, see if we're in a new field */
857			if ((itv->last_vsync_field & 1) !=
858			    (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
859				/* New field, looks like we missed it */
860				IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
861				       read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
862				vsync_force = 1;
863			}
864		}
865
866		if (!vsync_force) {
867			/* No Vsync expected, wasn't for us */
868			spin_unlock(&itv->dma_reg_lock);
869			return IRQ_NONE;
870		}
871	}
872
873	/* Exclude interrupts noted below from the output, otherwise the log is flooded with
874	   these messages */
875	if (combo & ~0xff6d0400)
876		IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
877
878	if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
879		IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
880	}
881
882	if (combo & IVTV_IRQ_DMA_READ) {
883		ivtv_irq_dma_read(itv);
884	}
885
886	if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
887		ivtv_irq_enc_dma_complete(itv);
888	}
889
890	if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
891		ivtv_irq_enc_pio_complete(itv);
892	}
893
894	if (combo & IVTV_IRQ_DMA_ERR) {
895		ivtv_irq_dma_err(itv);
896	}
897
898	if (combo & IVTV_IRQ_ENC_START_CAP) {
899		ivtv_irq_enc_start_cap(itv);
900	}
901
902	if (combo & IVTV_IRQ_ENC_VBI_CAP) {
903		ivtv_irq_enc_vbi_cap(itv);
904	}
905
906	if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
907		ivtv_irq_dec_vbi_reinsert(itv);
908	}
909
910	if (combo & IVTV_IRQ_ENC_EOS) {
911		IVTV_DEBUG_IRQ("ENC EOS\n");
912		set_bit(IVTV_F_I_EOS, &itv->i_flags);
913		wake_up(&itv->eos_waitq);
914	}
915
916	if (combo & IVTV_IRQ_DEC_DATA_REQ) {
917		ivtv_irq_dec_data_req(itv);
918	}
919
920	/* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
921	if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
922		ivtv_irq_vsync(itv);
923	}
924
925	if (combo & IVTV_IRQ_ENC_VIM_RST) {
926		IVTV_DEBUG_IRQ("VIM RST\n");
927		/*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
928	}
929
930	if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
931		IVTV_DEBUG_INFO("Stereo mode changed\n");
932	}
933
934	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
935		itv->irq_rr_idx++;
936		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
937			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
938			struct ivtv_stream *s = &itv->streams[idx];
939
940			if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
941				continue;
942			if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
943				ivtv_dma_dec_start(s);
944			else
945				ivtv_dma_enc_start(s);
946			break;
947		}
948
949		if (i == IVTV_MAX_STREAMS &&
950		    test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
951			ivtv_udma_start(itv);
952	}
953
954	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
955		itv->irq_rr_idx++;
956		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
957			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
958			struct ivtv_stream *s = &itv->streams[idx];
959
960			if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
961				continue;
962			if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
963				ivtv_dma_enc_start(s);
964			break;
965		}
966	}
967
968	if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
969		queue_kthread_work(&itv->irq_worker, &itv->irq_work);
970	}
971
972	spin_unlock(&itv->dma_reg_lock);
973
974	/* If we've just handled a 'forced' vsync, it's safest to say it
975	 * wasn't ours. Another device may have triggered it at just
976	 * the right time.
977	 */
978	return vsync_force ? IRQ_NONE : IRQ_HANDLED;
979}
980
981void ivtv_unfinished_dma(unsigned long arg)
982{
983	struct ivtv *itv = (struct ivtv *)arg;
984
985	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
986		return;
987	IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
988
989	write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
990	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
991	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
992	itv->cur_dma_stream = -1;
993	wake_up(&itv->dma_waitq);
994}
995