• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/irda/
1/*
2 * Blackfin Infra-red Driver
3 *
4 * Copyright 2006-2009 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 *
10 */
11#include "bfin_sir.h"
12
13#ifdef CONFIG_SIR_BFIN_DMA
14#define DMA_SIR_RX_XCNT        10
15#define DMA_SIR_RX_YCNT        (PAGE_SIZE / DMA_SIR_RX_XCNT)
16#define DMA_SIR_RX_FLUSH_JIFS  (HZ * 4 / 250)
17#endif
18
19#if ANOMALY_05000447
20static int max_rate = 57600;
21#else
22static int max_rate = 115200;
23#endif
24
25static void turnaround_delay(unsigned long last_jif, int mtt)
26{
27	long ticks;
28
29	mtt = mtt < 10000 ? 10000 : mtt;
30	ticks = 1 + mtt / (USEC_PER_SEC / HZ);
31	schedule_timeout_uninterruptible(ticks);
32}
33
34static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
35{
36	int i;
37	struct resource *res;
38
39	for (i = 0; i < pdev->num_resources; i++) {
40		res = &pdev->resource[i];
41		switch (res->flags) {
42		case IORESOURCE_MEM:
43			sp->membase   = (void __iomem *)res->start;
44			break;
45		case IORESOURCE_IRQ:
46			sp->irq = res->start;
47			break;
48		case IORESOURCE_DMA:
49			sp->rx_dma_channel = res->start;
50			sp->tx_dma_channel = res->end;
51			break;
52		default:
53			break;
54		}
55	}
56
57	sp->clk = get_sclk();
58#ifdef CONFIG_SIR_BFIN_DMA
59	sp->tx_done        = 1;
60	init_timer(&(sp->rx_dma_timer));
61#endif
62}
63
64static void bfin_sir_stop_tx(struct bfin_sir_port *port)
65{
66#ifdef CONFIG_SIR_BFIN_DMA
67	disable_dma(port->tx_dma_channel);
68#endif
69
70	while (!(SIR_UART_GET_LSR(port) & THRE)) {
71		cpu_relax();
72		continue;
73	}
74
75	SIR_UART_STOP_TX(port);
76}
77
78static void bfin_sir_enable_tx(struct bfin_sir_port *port)
79{
80	SIR_UART_ENABLE_TX(port);
81}
82
83static void bfin_sir_stop_rx(struct bfin_sir_port *port)
84{
85	SIR_UART_STOP_RX(port);
86}
87
88static void bfin_sir_enable_rx(struct bfin_sir_port *port)
89{
90	SIR_UART_ENABLE_RX(port);
91}
92
93static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
94{
95	int ret = -EINVAL;
96	unsigned int quot;
97	unsigned short val, lsr, lcr;
98	static int utime;
99	int count = 10;
100
101	lcr = WLS(8);
102
103	switch (speed) {
104	case 9600:
105	case 19200:
106	case 38400:
107	case 57600:
108	case 115200:
109
110		/*
111		 * IRDA is not affected by anomaly 05000230, so there is no
112		 * need to tweak the divisor like he UART driver (which will
113		 * slightly speed up the baud rate on us).
114		 */
115		quot = (port->clk + (8 * speed)) / (16 * speed);
116
117		do {
118			udelay(utime);
119			lsr = SIR_UART_GET_LSR(port);
120		} while (!(lsr & TEMT) && count--);
121
122		/* The useconds for 1 bits to transmit */
123		utime = 1000000 / speed + 1;
124
125		/* Clear UCEN bit to reset the UART state machine
126		 * and control registers
127		 */
128		val = SIR_UART_GET_GCTL(port);
129		val &= ~UCEN;
130		SIR_UART_PUT_GCTL(port, val);
131
132		/* Set DLAB in LCR to Access THR RBR IER */
133		SIR_UART_SET_DLAB(port);
134		SSYNC();
135
136		SIR_UART_PUT_DLL(port, quot & 0xFF);
137		SIR_UART_PUT_DLH(port, (quot >> 8) & 0xFF);
138		SSYNC();
139
140		/* Clear DLAB in LCR */
141		SIR_UART_CLEAR_DLAB(port);
142		SSYNC();
143
144		SIR_UART_PUT_LCR(port, lcr);
145
146		val = SIR_UART_GET_GCTL(port);
147		val |= UCEN;
148		SIR_UART_PUT_GCTL(port, val);
149
150		ret = 0;
151		break;
152	default:
153		printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
154		break;
155	}
156
157	val = SIR_UART_GET_GCTL(port);
158	/* If not add the 'RPOLC', we can't catch the receive interrupt.
159	 * It's related with the HW layout and the IR transiver.
160	 */
161	val |= IREN | RPOLC;
162	SIR_UART_PUT_GCTL(port, val);
163	return ret;
164}
165
166static int bfin_sir_is_receiving(struct net_device *dev)
167{
168	struct bfin_sir_self *self = netdev_priv(dev);
169	struct bfin_sir_port *port = self->sir_port;
170
171	if (!(SIR_UART_GET_IER(port) & ERBFI))
172		return 0;
173	return self->rx_buff.state != OUTSIDE_FRAME;
174}
175
176#ifdef CONFIG_SIR_BFIN_PIO
177static void bfin_sir_tx_chars(struct net_device *dev)
178{
179	unsigned int chr;
180	struct bfin_sir_self *self = netdev_priv(dev);
181	struct bfin_sir_port *port = self->sir_port;
182
183	if (self->tx_buff.len != 0) {
184		chr = *(self->tx_buff.data);
185		SIR_UART_PUT_CHAR(port, chr);
186		self->tx_buff.data++;
187		self->tx_buff.len--;
188	} else {
189		self->stats.tx_packets++;
190		self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
191		if (self->newspeed) {
192			bfin_sir_set_speed(port, self->newspeed);
193			self->speed = self->newspeed;
194			self->newspeed = 0;
195		}
196		bfin_sir_stop_tx(port);
197		bfin_sir_enable_rx(port);
198		/* I'm hungry! */
199		netif_wake_queue(dev);
200	}
201}
202
203static void bfin_sir_rx_chars(struct net_device *dev)
204{
205	struct bfin_sir_self *self = netdev_priv(dev);
206	struct bfin_sir_port *port = self->sir_port;
207	unsigned char ch;
208
209	SIR_UART_CLEAR_LSR(port);
210	ch = SIR_UART_GET_CHAR(port);
211	async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
212	dev->last_rx = jiffies;
213}
214
215static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
216{
217	struct net_device *dev = dev_id;
218	struct bfin_sir_self *self = netdev_priv(dev);
219	struct bfin_sir_port *port = self->sir_port;
220
221	spin_lock(&self->lock);
222	while ((SIR_UART_GET_LSR(port) & DR))
223		bfin_sir_rx_chars(dev);
224	spin_unlock(&self->lock);
225
226	return IRQ_HANDLED;
227}
228
229static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
230{
231	struct net_device *dev = dev_id;
232	struct bfin_sir_self *self = netdev_priv(dev);
233	struct bfin_sir_port *port = self->sir_port;
234
235	spin_lock(&self->lock);
236	if (SIR_UART_GET_LSR(port) & THRE)
237		bfin_sir_tx_chars(dev);
238	spin_unlock(&self->lock);
239
240	return IRQ_HANDLED;
241}
242#endif /* CONFIG_SIR_BFIN_PIO */
243
244#ifdef CONFIG_SIR_BFIN_DMA
245static void bfin_sir_dma_tx_chars(struct net_device *dev)
246{
247	struct bfin_sir_self *self = netdev_priv(dev);
248	struct bfin_sir_port *port = self->sir_port;
249
250	if (!port->tx_done)
251		return;
252	port->tx_done = 0;
253
254	if (self->tx_buff.len == 0) {
255		self->stats.tx_packets++;
256		if (self->newspeed) {
257			bfin_sir_set_speed(port, self->newspeed);
258			self->speed = self->newspeed;
259			self->newspeed = 0;
260		}
261		bfin_sir_enable_rx(port);
262		port->tx_done = 1;
263		netif_wake_queue(dev);
264		return;
265	}
266
267	blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
268		(unsigned long)(self->tx_buff.data+self->tx_buff.len));
269	set_dma_config(port->tx_dma_channel,
270		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
271			INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
272			DMA_SYNC_RESTART));
273	set_dma_start_addr(port->tx_dma_channel,
274		(unsigned long)(self->tx_buff.data));
275	set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
276	set_dma_x_modify(port->tx_dma_channel, 1);
277	enable_dma(port->tx_dma_channel);
278}
279
280static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
281{
282	struct net_device *dev = dev_id;
283	struct bfin_sir_self *self = netdev_priv(dev);
284	struct bfin_sir_port *port = self->sir_port;
285
286	spin_lock(&self->lock);
287	if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
288		clear_dma_irqstat(port->tx_dma_channel);
289		bfin_sir_stop_tx(port);
290
291		self->stats.tx_packets++;
292		self->stats.tx_bytes += self->tx_buff.len;
293		self->tx_buff.len = 0;
294		if (self->newspeed) {
295			bfin_sir_set_speed(port, self->newspeed);
296			self->speed = self->newspeed;
297			self->newspeed = 0;
298		}
299		bfin_sir_enable_rx(port);
300		/* I'm hungry! */
301		netif_wake_queue(dev);
302		port->tx_done = 1;
303	}
304	spin_unlock(&self->lock);
305
306	return IRQ_HANDLED;
307}
308
309static void bfin_sir_dma_rx_chars(struct net_device *dev)
310{
311	struct bfin_sir_self *self = netdev_priv(dev);
312	struct bfin_sir_port *port = self->sir_port;
313	int i;
314
315	SIR_UART_CLEAR_LSR(port);
316
317	for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
318		async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
319}
320
321void bfin_sir_rx_dma_timeout(struct net_device *dev)
322{
323	struct bfin_sir_self *self = netdev_priv(dev);
324	struct bfin_sir_port *port = self->sir_port;
325	int x_pos, pos;
326	unsigned long flags;
327
328	spin_lock_irqsave(&self->lock, flags);
329	x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
330	if (x_pos == DMA_SIR_RX_XCNT)
331		x_pos = 0;
332
333	pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
334
335	if (pos > port->rx_dma_buf.tail) {
336		port->rx_dma_buf.tail = pos;
337		bfin_sir_dma_rx_chars(dev);
338		port->rx_dma_buf.head = port->rx_dma_buf.tail;
339	}
340	spin_unlock_irqrestore(&self->lock, flags);
341}
342
343static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
344{
345	struct net_device *dev = dev_id;
346	struct bfin_sir_self *self = netdev_priv(dev);
347	struct bfin_sir_port *port = self->sir_port;
348	unsigned short irqstat;
349
350	spin_lock(&self->lock);
351
352	port->rx_dma_nrows++;
353	port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
354	bfin_sir_dma_rx_chars(dev);
355	if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
356		port->rx_dma_nrows = 0;
357		port->rx_dma_buf.tail = 0;
358	}
359	port->rx_dma_buf.head = port->rx_dma_buf.tail;
360
361	irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
362	clear_dma_irqstat(port->rx_dma_channel);
363	spin_unlock(&self->lock);
364
365	mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
366	return IRQ_HANDLED;
367}
368#endif /* CONFIG_SIR_BFIN_DMA */
369
370static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
371{
372#ifdef CONFIG_SIR_BFIN_DMA
373	dma_addr_t dma_handle;
374#endif /* CONFIG_SIR_BFIN_DMA */
375
376	if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
377		dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
378		return -EBUSY;
379	}
380
381	if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
382		dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
383		free_dma(port->rx_dma_channel);
384		return -EBUSY;
385	}
386
387#ifdef CONFIG_SIR_BFIN_DMA
388
389	set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
390	set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
391
392	port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
393	port->rx_dma_buf.head = 0;
394	port->rx_dma_buf.tail = 0;
395	port->rx_dma_nrows = 0;
396
397	set_dma_config(port->rx_dma_channel,
398				set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
399									INTR_ON_ROW, DIMENSION_2D,
400									DATA_SIZE_8, DMA_SYNC_RESTART));
401	set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
402	set_dma_x_modify(port->rx_dma_channel, 1);
403	set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
404	set_dma_y_modify(port->rx_dma_channel, 1);
405	set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
406	enable_dma(port->rx_dma_channel);
407
408	port->rx_dma_timer.data = (unsigned long)(dev);
409	port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
410
411#else
412
413	if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
414		dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
415		return -EBUSY;
416	}
417
418	if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
419		dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
420		free_irq(port->irq, dev);
421		return -EBUSY;
422	}
423#endif
424
425	return 0;
426}
427
428static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
429{
430	unsigned short val;
431
432	bfin_sir_stop_rx(port);
433	SIR_UART_DISABLE_INTS(port);
434
435	val = SIR_UART_GET_GCTL(port);
436	val &= ~(UCEN | IREN | RPOLC);
437	SIR_UART_PUT_GCTL(port, val);
438
439#ifdef CONFIG_SIR_BFIN_DMA
440	disable_dma(port->tx_dma_channel);
441	disable_dma(port->rx_dma_channel);
442	del_timer(&(port->rx_dma_timer));
443	dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
444#else
445	free_irq(port->irq+1, dev);
446	free_irq(port->irq, dev);
447#endif
448	free_dma(port->tx_dma_channel);
449	free_dma(port->rx_dma_channel);
450}
451
452#ifdef CONFIG_PM
453static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
454{
455	struct bfin_sir_port *sir_port;
456	struct net_device *dev;
457	struct bfin_sir_self *self;
458
459	sir_port = platform_get_drvdata(pdev);
460	if (!sir_port)
461		return 0;
462
463	dev = sir_port->dev;
464	self = netdev_priv(dev);
465	if (self->open) {
466		flush_work(&self->work);
467		bfin_sir_shutdown(self->sir_port, dev);
468		netif_device_detach(dev);
469	}
470
471	return 0;
472}
473static int bfin_sir_resume(struct platform_device *pdev)
474{
475	struct bfin_sir_port *sir_port;
476	struct net_device *dev;
477	struct bfin_sir_self *self;
478	struct bfin_sir_port *port;
479
480	sir_port = platform_get_drvdata(pdev);
481	if (!sir_port)
482		return 0;
483
484	dev = sir_port->dev;
485	self = netdev_priv(dev);
486	port = self->sir_port;
487	if (self->open) {
488		if (self->newspeed) {
489			self->speed = self->newspeed;
490			self->newspeed = 0;
491		}
492		bfin_sir_startup(port, dev);
493		bfin_sir_set_speed(port, 9600);
494		bfin_sir_enable_rx(port);
495		netif_device_attach(dev);
496	}
497	return 0;
498}
499#else
500#define bfin_sir_suspend   NULL
501#define bfin_sir_resume    NULL
502#endif
503
504static void bfin_sir_send_work(struct work_struct *work)
505{
506	struct bfin_sir_self  *self = container_of(work, struct bfin_sir_self, work);
507	struct net_device *dev = self->sir_port->dev;
508	struct bfin_sir_port *port = self->sir_port;
509	unsigned short val;
510	int tx_cnt = 10;
511
512	while (bfin_sir_is_receiving(dev) && --tx_cnt)
513		turnaround_delay(dev->last_rx, self->mtt);
514
515	bfin_sir_stop_rx(port);
516
517	/* To avoid losting RX interrupt, we reset IR function before
518	 * sending data. We also can set the speed, which will
519	 * reset all the UART.
520	 */
521	val = SIR_UART_GET_GCTL(port);
522	val &= ~(IREN | RPOLC);
523	SIR_UART_PUT_GCTL(port, val);
524	SSYNC();
525	val |= IREN | RPOLC;
526	SIR_UART_PUT_GCTL(port, val);
527	SSYNC();
528	/* bfin_sir_set_speed(port, self->speed); */
529
530#ifdef CONFIG_SIR_BFIN_DMA
531	bfin_sir_dma_tx_chars(dev);
532#endif
533	bfin_sir_enable_tx(port);
534	dev->trans_start = jiffies;
535}
536
537static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
538{
539	struct bfin_sir_self *self = netdev_priv(dev);
540	int speed = irda_get_next_speed(skb);
541
542	netif_stop_queue(dev);
543
544	self->mtt = irda_get_mtt(skb);
545
546	if (speed != self->speed && speed != -1)
547		self->newspeed = speed;
548
549	self->tx_buff.data = self->tx_buff.head;
550	if (skb->len == 0)
551		self->tx_buff.len = 0;
552	else
553		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
554
555	schedule_work(&self->work);
556	dev_kfree_skb(skb);
557
558	return 0;
559}
560
561static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
562{
563	struct if_irda_req *rq = (struct if_irda_req *)ifreq;
564	struct bfin_sir_self *self = netdev_priv(dev);
565	struct bfin_sir_port *port = self->sir_port;
566	int ret = 0;
567
568	switch (cmd) {
569	case SIOCSBANDWIDTH:
570		if (capable(CAP_NET_ADMIN)) {
571			if (self->open) {
572				ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
573				bfin_sir_enable_rx(port);
574			} else {
575				dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
576				ret = 0;
577			}
578		}
579		break;
580
581	case SIOCSMEDIABUSY:
582		ret = -EPERM;
583		if (capable(CAP_NET_ADMIN)) {
584			irda_device_set_media_busy(dev, TRUE);
585			ret = 0;
586		}
587		break;
588
589	case SIOCGRECEIVING:
590		rq->ifr_receiving = bfin_sir_is_receiving(dev);
591		break;
592
593	default:
594		ret = -EOPNOTSUPP;
595		break;
596	}
597
598	return ret;
599}
600
601static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
602{
603	struct bfin_sir_self *self = netdev_priv(dev);
604
605	return &self->stats;
606}
607
608static int bfin_sir_open(struct net_device *dev)
609{
610	struct bfin_sir_self *self = netdev_priv(dev);
611	struct bfin_sir_port *port = self->sir_port;
612	int err = -ENOMEM;
613
614	self->newspeed = 0;
615	self->speed = 9600;
616
617	spin_lock_init(&self->lock);
618
619	err = bfin_sir_startup(port, dev);
620	if (err)
621		goto err_startup;
622
623	bfin_sir_set_speed(port, 9600);
624
625	self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
626	if (!self->irlap)
627		goto err_irlap;
628
629	INIT_WORK(&self->work, bfin_sir_send_work);
630
631	/*
632	 * Now enable the interrupt then start the queue
633	 */
634	self->open = 1;
635	bfin_sir_enable_rx(port);
636
637	netif_start_queue(dev);
638
639	return 0;
640
641err_irlap:
642	self->open = 0;
643	bfin_sir_shutdown(port, dev);
644err_startup:
645	return err;
646}
647
648static int bfin_sir_stop(struct net_device *dev)
649{
650	struct bfin_sir_self *self = netdev_priv(dev);
651
652	flush_work(&self->work);
653	bfin_sir_shutdown(self->sir_port, dev);
654
655	if (self->rxskb) {
656		dev_kfree_skb(self->rxskb);
657		self->rxskb = NULL;
658	}
659
660	/* Stop IrLAP */
661	if (self->irlap) {
662		irlap_close(self->irlap);
663		self->irlap = NULL;
664	}
665
666	netif_stop_queue(dev);
667	self->open = 0;
668
669	return 0;
670}
671
672static int bfin_sir_init_iobuf(iobuff_t *io, int size)
673{
674	io->head = kmalloc(size, GFP_KERNEL);
675	if (!io->head)
676		return -ENOMEM;
677	io->truesize = size;
678	io->in_frame = FALSE;
679	io->state    = OUTSIDE_FRAME;
680	io->data     = io->head;
681	return 0;
682}
683
684static const struct net_device_ops bfin_sir_ndo = {
685	.ndo_open		= bfin_sir_open,
686	.ndo_stop		= bfin_sir_stop,
687	.ndo_start_xmit		= bfin_sir_hard_xmit,
688	.ndo_do_ioctl		= bfin_sir_ioctl,
689	.ndo_get_stats		= bfin_sir_stats,
690};
691
692static int __devinit bfin_sir_probe(struct platform_device *pdev)
693{
694	struct net_device *dev;
695	struct bfin_sir_self *self;
696	unsigned int baudrate_mask;
697	struct bfin_sir_port *sir_port;
698	int err;
699
700	if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
701				per[pdev->id][3] == pdev->id) {
702		err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
703		if (err)
704			return err;
705	} else {
706		dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
707		return -ENODEV;
708	}
709
710	err = -ENOMEM;
711	sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
712	if (!sir_port)
713		goto err_mem_0;
714
715	bfin_sir_init_ports(sir_port, pdev);
716
717	dev = alloc_irdadev(sizeof(*self));
718	if (!dev)
719		goto err_mem_1;
720
721	self = netdev_priv(dev);
722	self->dev = &pdev->dev;
723	self->sir_port = sir_port;
724	sir_port->dev = dev;
725
726	err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
727	if (err)
728		goto err_mem_2;
729	err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
730	if (err)
731		goto err_mem_3;
732
733	dev->netdev_ops = &bfin_sir_ndo;
734	dev->irq = sir_port->irq;
735
736	irda_init_max_qos_capabilies(&self->qos);
737
738	baudrate_mask = IR_9600;
739
740	switch (max_rate) {
741	case 115200:
742		baudrate_mask |= IR_115200;
743	case 57600:
744		baudrate_mask |= IR_57600;
745	case 38400:
746		baudrate_mask |= IR_38400;
747	case 19200:
748		baudrate_mask |= IR_19200;
749	case 9600:
750		break;
751	default:
752		dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
753	}
754
755	self->qos.baud_rate.bits &= baudrate_mask;
756
757	self->qos.min_turn_time.bits = 1; /* 10 ms or more */
758
759	irda_qos_bits_to_value(&self->qos);
760
761	err = register_netdev(dev);
762
763	if (err) {
764		kfree(self->tx_buff.head);
765err_mem_3:
766		kfree(self->rx_buff.head);
767err_mem_2:
768		free_netdev(dev);
769err_mem_1:
770		kfree(sir_port);
771err_mem_0:
772		peripheral_free_list(per[pdev->id]);
773	} else
774		platform_set_drvdata(pdev, sir_port);
775
776	return err;
777}
778
779static int __devexit bfin_sir_remove(struct platform_device *pdev)
780{
781	struct bfin_sir_port *sir_port;
782	struct net_device *dev = NULL;
783	struct bfin_sir_self *self;
784
785	sir_port = platform_get_drvdata(pdev);
786	if (!sir_port)
787		return 0;
788	dev = sir_port->dev;
789	self = netdev_priv(dev);
790	unregister_netdev(dev);
791	kfree(self->tx_buff.head);
792	kfree(self->rx_buff.head);
793	free_netdev(dev);
794	kfree(sir_port);
795	platform_set_drvdata(pdev, NULL);
796
797	return 0;
798}
799
800static struct platform_driver bfin_ir_driver = {
801	.probe   = bfin_sir_probe,
802	.remove  = __devexit_p(bfin_sir_remove),
803	.suspend = bfin_sir_suspend,
804	.resume  = bfin_sir_resume,
805	.driver  = {
806		.name = DRIVER_NAME,
807	},
808};
809
810static int __init bfin_sir_init(void)
811{
812	return platform_driver_register(&bfin_ir_driver);
813}
814
815static void __exit bfin_sir_exit(void)
816{
817	platform_driver_unregister(&bfin_ir_driver);
818}
819
820module_init(bfin_sir_init);
821module_exit(bfin_sir_exit);
822
823module_param(max_rate, int, 0);
824MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
825
826MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
827MODULE_DESCRIPTION("Blackfin IrDA driver");
828MODULE_LICENSE("GPL");
829