1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
4 * flexcop-pci.c - covers the PCI part including DMA transfers
5 * see flexcop.c for copyright information
6 */
7
8#define FC_LOG_PREFIX "flexcop-pci"
9#include "flexcop-common.h"
10
11static int enable_pid_filtering = 1;
12module_param(enable_pid_filtering, int, 0444);
13MODULE_PARM_DESC(enable_pid_filtering,
14	"enable hardware pid filtering: supported values: 0 (fullts), 1");
15
16static int irq_chk_intv = 100;
17module_param(irq_chk_intv, int, 0644);
18MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
19
20#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
21#define dprintk(level, args...) \
22	do { if ((debug & (level))) printk(args); } while (0)
23#define DEBSTATUS ""
24#else
25#define dprintk(level, args...) no_printk(args)
26#define DEBSTATUS " (debugging is not enabled)"
27#endif
28
29#define deb_info(args...) dprintk(0x01, args)
30#define deb_reg(args...) dprintk(0x02, args)
31#define deb_ts(args...) dprintk(0x04, args)
32#define deb_irq(args...) dprintk(0x08, args)
33#define deb_chk(args...) dprintk(0x10, args)
34
35static int debug;
36module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug,
38	"set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
39	DEBSTATUS);
40
41#define DRIVER_VERSION "0.1"
42#define DRIVER_NAME "flexcop-pci"
43#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
44
45struct flexcop_pci {
46	struct pci_dev *pdev;
47
48#define FC_PCI_INIT     0x01
49#define FC_PCI_DMA_INIT 0x02
50	int init_state;
51
52	void __iomem *io_mem;
53	u32 irq;
54	/* buffersize (at least for DMA1, need to be % 188 == 0,
55	 * this logic is required */
56#define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
57#define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
58	struct flexcop_dma dma[2];
59
60	int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
61	u32 last_dma1_cur_pos;
62	/* position of the pointer last time the timer/packet irq occurred */
63	int count;
64	int count_prev;
65	int stream_problem;
66
67	spinlock_t irq_lock;
68	unsigned long last_irq;
69
70	struct delayed_work irq_check_work;
71	struct flexcop_device *fc_dev;
72};
73
74static int lastwreg, lastwval, lastrreg, lastrval;
75
76static flexcop_ibi_value flexcop_pci_read_ibi_reg(struct flexcop_device *fc,
77		flexcop_ibi_register r)
78{
79	struct flexcop_pci *fc_pci = fc->bus_specific;
80	flexcop_ibi_value v;
81	v.raw = readl(fc_pci->io_mem + r);
82
83	if (lastrreg != r || lastrval != v.raw) {
84		lastrreg = r; lastrval = v.raw;
85		deb_reg("new rd: %3x: %08x\n", r, v.raw);
86	}
87
88	return v;
89}
90
91static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc,
92		flexcop_ibi_register r, flexcop_ibi_value v)
93{
94	struct flexcop_pci *fc_pci = fc->bus_specific;
95
96	if (lastwreg != r || lastwval != v.raw) {
97		lastwreg = r; lastwval = v.raw;
98		deb_reg("new wr: %3x: %08x\n", r, v.raw);
99	}
100
101	writel(v.raw, fc_pci->io_mem + r);
102	return 0;
103}
104
105static void flexcop_pci_irq_check_work(struct work_struct *work)
106{
107	struct flexcop_pci *fc_pci =
108		container_of(work, struct flexcop_pci, irq_check_work.work);
109	struct flexcop_device *fc = fc_pci->fc_dev;
110
111	if (fc->feedcount) {
112
113		if (fc_pci->count == fc_pci->count_prev) {
114			deb_chk("no IRQ since the last check\n");
115			if (fc_pci->stream_problem++ == 3) {
116				struct dvb_demux_feed *feed;
117				deb_info("flexcop-pci: stream problem, resetting pid filter\n");
118
119				spin_lock_irq(&fc->demux.lock);
120				list_for_each_entry(feed, &fc->demux.feed_list,
121						list_head) {
122					flexcop_pid_feed_control(fc, feed, 0);
123				}
124
125				list_for_each_entry(feed, &fc->demux.feed_list,
126						list_head) {
127					flexcop_pid_feed_control(fc, feed, 1);
128				}
129				spin_unlock_irq(&fc->demux.lock);
130
131				fc_pci->stream_problem = 0;
132			}
133		} else {
134			fc_pci->stream_problem = 0;
135			fc_pci->count_prev = fc_pci->count;
136		}
137	}
138
139	schedule_delayed_work(&fc_pci->irq_check_work,
140			msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
141}
142
143/* When PID filtering is turned on, we use the timer IRQ, because small amounts
144 * of data need to be passed to the user space instantly as well. When PID
145 * filtering is turned off, we use the page-change-IRQ */
146static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
147{
148	struct flexcop_pci *fc_pci = dev_id;
149	struct flexcop_device *fc = fc_pci->fc_dev;
150	unsigned long flags;
151	flexcop_ibi_value v;
152	irqreturn_t ret = IRQ_HANDLED;
153
154	spin_lock_irqsave(&fc_pci->irq_lock, flags);
155	v = fc->read_ibi_reg(fc, irq_20c);
156
157	/* errors */
158	if (v.irq_20c.Data_receiver_error)
159		deb_chk("data receiver error\n");
160	if (v.irq_20c.Continuity_error_flag)
161		deb_chk("Continuity error flag is set\n");
162	if (v.irq_20c.LLC_SNAP_FLAG_set)
163		deb_chk("LLC_SNAP_FLAG_set is set\n");
164	if (v.irq_20c.Transport_Error)
165		deb_chk("Transport error\n");
166
167	if ((fc_pci->count % 1000) == 0)
168		deb_chk("%d valid irq took place so far\n", fc_pci->count);
169
170	if (v.irq_20c.DMA1_IRQ_Status == 1) {
171		if (fc_pci->active_dma1_addr == 0)
172			flexcop_pass_dmx_packets(fc_pci->fc_dev,
173					fc_pci->dma[0].cpu_addr0,
174					fc_pci->dma[0].size / 188);
175		else
176			flexcop_pass_dmx_packets(fc_pci->fc_dev,
177					fc_pci->dma[0].cpu_addr1,
178					fc_pci->dma[0].size / 188);
179
180		deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
181		fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
182		/* for the timer IRQ we only can use buffer dmx feeding, because we don't have
183		 * complete TS packets when reading from the DMA memory */
184	} else if (v.irq_20c.DMA1_Timer_Status == 1) {
185		dma_addr_t cur_addr =
186			fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
187		u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
188		if (cur_pos > fc_pci->dma[0].size * 2)
189			goto error;
190
191		deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
192				jiffies_to_usecs(jiffies - fc_pci->last_irq),
193				v.raw, (unsigned long long)cur_addr, cur_pos,
194				fc_pci->last_dma1_cur_pos);
195		fc_pci->last_irq = jiffies;
196
197		/* buffer end was reached, restarted from the beginning
198		 * pass the data from last_cur_pos to the buffer end to the demux
199		 */
200		if (cur_pos < fc_pci->last_dma1_cur_pos) {
201			deb_irq(" end was reached: passing %d bytes ",
202				(fc_pci->dma[0].size*2 - 1) -
203				fc_pci->last_dma1_cur_pos);
204			flexcop_pass_dmx_data(fc_pci->fc_dev,
205				fc_pci->dma[0].cpu_addr0 +
206					fc_pci->last_dma1_cur_pos,
207				(fc_pci->dma[0].size*2) -
208					fc_pci->last_dma1_cur_pos);
209			fc_pci->last_dma1_cur_pos = 0;
210		}
211
212		if (cur_pos > fc_pci->last_dma1_cur_pos) {
213			deb_irq(" passing %d bytes ",
214				cur_pos - fc_pci->last_dma1_cur_pos);
215			flexcop_pass_dmx_data(fc_pci->fc_dev,
216				fc_pci->dma[0].cpu_addr0 +
217					fc_pci->last_dma1_cur_pos,
218				cur_pos - fc_pci->last_dma1_cur_pos);
219		}
220		deb_irq("\n");
221
222		fc_pci->last_dma1_cur_pos = cur_pos;
223		fc_pci->count++;
224	} else {
225		deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
226			v.raw);
227		ret = IRQ_NONE;
228	}
229
230error:
231	spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
232	return ret;
233}
234
235static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
236{
237	struct flexcop_pci *fc_pci = fc->bus_specific;
238	if (onoff) {
239		flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
240		flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
241		flexcop_dma_config_timer(fc, FC_DMA_1, 0);
242		flexcop_dma_xfer_control(fc, FC_DMA_1,
243				FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 1);
244		deb_irq("DMA xfer enabled\n");
245
246		fc_pci->last_dma1_cur_pos = 0;
247		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 1);
248		deb_irq("IRQ enabled\n");
249		fc_pci->count_prev = fc_pci->count;
250	} else {
251		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 0);
252		deb_irq("IRQ disabled\n");
253
254		flexcop_dma_xfer_control(fc, FC_DMA_1,
255			 FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 0);
256		deb_irq("DMA xfer disabled\n");
257	}
258	return 0;
259}
260
261static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
262{
263	int ret;
264	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
265			FC_DEFAULT_DMA1_BUFSIZE);
266	if (ret != 0)
267		return ret;
268
269	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
270			FC_DEFAULT_DMA2_BUFSIZE);
271	if (ret != 0) {
272		flexcop_dma_free(&fc_pci->dma[0]);
273		return ret;
274	}
275
276	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
277			FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
278	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
279			FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
280	fc_pci->init_state |= FC_PCI_DMA_INIT;
281	return ret;
282}
283
284static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
285{
286	if (fc_pci->init_state & FC_PCI_DMA_INIT) {
287		flexcop_dma_free(&fc_pci->dma[0]);
288		flexcop_dma_free(&fc_pci->dma[1]);
289	}
290	fc_pci->init_state &= ~FC_PCI_DMA_INIT;
291}
292
293static int flexcop_pci_init(struct flexcop_pci *fc_pci)
294{
295	int ret;
296
297	info("card revision %x", fc_pci->pdev->revision);
298
299	if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
300		return ret;
301	pci_set_master(fc_pci->pdev);
302
303	if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
304		goto err_pci_disable_device;
305
306	fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
307
308	if (!fc_pci->io_mem) {
309		err("cannot map io memory\n");
310		ret = -EIO;
311		goto err_pci_release_regions;
312	}
313
314	pci_set_drvdata(fc_pci->pdev, fc_pci);
315	spin_lock_init(&fc_pci->irq_lock);
316	if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
317					IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
318		goto err_pci_iounmap;
319
320	fc_pci->init_state |= FC_PCI_INIT;
321	return ret;
322
323err_pci_iounmap:
324	pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
325err_pci_release_regions:
326	pci_release_regions(fc_pci->pdev);
327err_pci_disable_device:
328	pci_disable_device(fc_pci->pdev);
329	return ret;
330}
331
332static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
333{
334	if (fc_pci->init_state & FC_PCI_INIT) {
335		free_irq(fc_pci->pdev->irq, fc_pci);
336		pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
337		pci_release_regions(fc_pci->pdev);
338		pci_disable_device(fc_pci->pdev);
339	}
340	fc_pci->init_state &= ~FC_PCI_INIT;
341}
342
343static int flexcop_pci_probe(struct pci_dev *pdev,
344		const struct pci_device_id *ent)
345{
346	struct flexcop_device *fc;
347	struct flexcop_pci *fc_pci;
348	int ret = -ENOMEM;
349
350	if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
351		err("out of memory\n");
352		return -ENOMEM;
353	}
354
355	/* general flexcop init */
356	fc_pci = fc->bus_specific;
357	fc_pci->fc_dev = fc;
358
359	fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
360	fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
361	fc->i2c_request = flexcop_i2c_request;
362	fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
363	fc->stream_control = flexcop_pci_stream_control;
364
365	if (enable_pid_filtering)
366		info("will use the HW PID filter.");
367	else
368		info("will pass the complete TS to the demuxer.");
369
370	fc->pid_filtering = enable_pid_filtering;
371	fc->bus_type = FC_PCI;
372	fc->dev = &pdev->dev;
373	fc->owner = THIS_MODULE;
374
375	/* bus specific part */
376	fc_pci->pdev = pdev;
377	if ((ret = flexcop_pci_init(fc_pci)) != 0)
378		goto err_kfree;
379
380	/* init flexcop */
381	if ((ret = flexcop_device_initialize(fc)) != 0)
382		goto err_pci_exit;
383
384	/* init dma */
385	if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
386		goto err_fc_exit;
387
388	INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
389
390	if (irq_chk_intv > 0)
391		schedule_delayed_work(&fc_pci->irq_check_work,
392				msecs_to_jiffies(irq_chk_intv < 100 ?
393					100 :
394					irq_chk_intv));
395	return ret;
396
397err_fc_exit:
398	flexcop_device_exit(fc);
399err_pci_exit:
400	flexcop_pci_exit(fc_pci);
401err_kfree:
402	flexcop_device_kfree(fc);
403	return ret;
404}
405
406/* in theory every _exit function should be called exactly two times,
407 * here and in the bail-out-part of the _init-function
408 */
409static void flexcop_pci_remove(struct pci_dev *pdev)
410{
411	struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
412
413	if (irq_chk_intv > 0)
414		cancel_delayed_work(&fc_pci->irq_check_work);
415
416	flexcop_pci_dma_exit(fc_pci);
417	flexcop_device_exit(fc_pci->fc_dev);
418	flexcop_pci_exit(fc_pci);
419	flexcop_device_kfree(fc_pci->fc_dev);
420}
421
422static const struct pci_device_id flexcop_pci_tbl[] = {
423	{ PCI_DEVICE(0x13d0, 0x2103) },
424	{ },
425};
426
427MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
428
429static struct pci_driver flexcop_pci_driver = {
430	.name     = "b2c2_flexcop_pci",
431	.id_table = flexcop_pci_tbl,
432	.probe    = flexcop_pci_probe,
433	.remove   = flexcop_pci_remove,
434};
435
436module_pci_driver(flexcop_pci_driver);
437
438MODULE_AUTHOR(DRIVER_AUTHOR);
439MODULE_DESCRIPTION(DRIVER_NAME);
440MODULE_LICENSE("GPL");
441