• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/irda/
1/*********************************************************************
2 *
3 * Filename:      w83977af_ir.c
4 * Version:       1.0
5 * Description:   FIR driver for the Winbond W83977AF Super I/O chip
6 * Status:        Experimental.
7 * Author:        Paul VanderSpek
8 * Created at:    Wed Nov  4 11:46:16 1998
9 * Modified at:   Fri Jan 28 12:10:59 2000
10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
11 *
12 *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 *     Copyright (c) 1998-1999 Rebel.com
14 *
15 *     This program is free software; you can redistribute it and/or
16 *     modify it under the terms of the GNU General Public License as
17 *     published by the Free Software Foundation; either version 2 of
18 *     the License, or (at your option) any later version.
19 *
20 *     Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 *     warranty for any of this software. This material is provided "AS-IS"
22 *     and at no charge.
23 *
24 *     If you find bugs in this file, its very likely that the same bug
25 *     will also be in pc87108.c since the implementations are quite
26 *     similar.
27 *
28 *     Notice that all functions that needs to access the chip in _any_
29 *     way, must save BSR register on entry, and restore it on exit.
30 *     It is _very_ important to follow this policy!
31 *
32 *         __u8 bank;
33 *
34 *         bank = inb( iobase+BSR);
35 *
36 *         do_your_stuff_here();
37 *
38 *         outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/rtnetlink.h>
51#include <linux/dma-mapping.h>
52#include <linux/gfp.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <net/irda/irda.h>
59#include <net/irda/wrapper.h>
60#include <net/irda/irda_device.h>
61#include "w83977af.h"
62#include "w83977af_ir.h"
63
64#ifdef  CONFIG_ARCH_NETWINDER            /* Adjust to NetWinder differences */
65#undef  CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
67#endif
68#define CONFIG_USE_W977_PNP        /* Currently needed */
69#define PIO_MAX_SPEED       115200
70
71static char *driver_name = "w83977af_ir";
72static int  qos_mtt_bits = 0x07;   /* 1 ms or more */
73
74#define CHIP_IO_EXTENT 8
75
76static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
77#ifdef CONFIG_ARCH_NETWINDER             /* Adjust to NetWinder differences */
78static unsigned int irq[] = { 6, 0, 0, 0 };
79#else
80static unsigned int irq[] = { 11, 0, 0, 0 };
81#endif
82static unsigned int dma[] = { 1, 0, 0, 0 };
83static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
84static unsigned int efio = W977_EFIO_BASE;
85
86static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
87
88/* Some prototypes */
89static int  w83977af_open(int i, unsigned int iobase, unsigned int irq,
90                          unsigned int dma);
91static int  w83977af_close(struct w83977af_ir *self);
92static int  w83977af_probe(int iobase, int irq, int dma);
93static int  w83977af_dma_receive(struct w83977af_ir *self);
94static int  w83977af_dma_receive_complete(struct w83977af_ir *self);
95static netdev_tx_t  w83977af_hard_xmit(struct sk_buff *skb,
96					     struct net_device *dev);
97static int  w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
98static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100static int  w83977af_is_receiving(struct w83977af_ir *self);
101
102static int  w83977af_net_open(struct net_device *dev);
103static int  w83977af_net_close(struct net_device *dev);
104static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105
106/*
107 * Function w83977af_init ()
108 *
109 *    Initialize chip. Just try to find out how many chips we are dealing with
110 *    and where they are
111 */
112static int __init w83977af_init(void)
113{
114        int i;
115
116	IRDA_DEBUG(0, "%s()\n", __func__ );
117
118	for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
119		if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
120			return 0;
121	}
122	return -ENODEV;
123}
124
125/*
126 * Function w83977af_cleanup ()
127 *
128 *    Close all configured chips
129 *
130 */
131static void __exit w83977af_cleanup(void)
132{
133	int i;
134
135        IRDA_DEBUG(4, "%s()\n", __func__ );
136
137	for (i=0; i < ARRAY_SIZE(dev_self); i++) {
138		if (dev_self[i])
139			w83977af_close(dev_self[i]);
140	}
141}
142
143static const struct net_device_ops w83977_netdev_ops = {
144	.ndo_open       = w83977af_net_open,
145	.ndo_stop       = w83977af_net_close,
146	.ndo_start_xmit = w83977af_hard_xmit,
147	.ndo_do_ioctl   = w83977af_net_ioctl,
148};
149
150/*
151 * Function w83977af_open (iobase, irq)
152 *
153 *    Open driver instance
154 *
155 */
156static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
157			 unsigned int dma)
158{
159	struct net_device *dev;
160        struct w83977af_ir *self;
161	int err;
162
163	IRDA_DEBUG(0, "%s()\n", __func__ );
164
165	/* Lock the port that we need */
166	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
167		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
168		      __func__ , iobase);
169		return -ENODEV;
170	}
171
172	if (w83977af_probe(iobase, irq, dma) == -1) {
173		err = -1;
174		goto err_out;
175	}
176	/*
177	 *  Allocate new instance of the driver
178	 */
179	dev = alloc_irdadev(sizeof(struct w83977af_ir));
180	if (dev == NULL) {
181		printk( KERN_ERR "IrDA: Can't allocate memory for "
182			"IrDA control block!\n");
183		err = -ENOMEM;
184		goto err_out;
185	}
186
187	self = netdev_priv(dev);
188	spin_lock_init(&self->lock);
189
190
191	/* Initialize IO */
192	self->io.fir_base   = iobase;
193        self->io.irq       = irq;
194        self->io.fir_ext   = CHIP_IO_EXTENT;
195        self->io.dma       = dma;
196        self->io.fifo_size = 32;
197
198	/* Initialize QoS for this device */
199	irda_init_max_qos_capabilies(&self->qos);
200
201	/* The only value we must override it the baudrate */
202
203	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
204		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
205
206	/* The HP HDLS-1100 needs 1 ms according to the specs */
207	self->qos.min_turn_time.bits = qos_mtt_bits;
208	irda_qos_bits_to_value(&self->qos);
209
210	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
211	self->rx_buff.truesize = 14384;
212	self->tx_buff.truesize = 4000;
213
214	/* Allocate memory if needed */
215	self->rx_buff.head =
216		dma_alloc_coherent(NULL, self->rx_buff.truesize,
217				   &self->rx_buff_dma, GFP_KERNEL);
218	if (self->rx_buff.head == NULL) {
219		err = -ENOMEM;
220		goto err_out1;
221	}
222
223	memset(self->rx_buff.head, 0, self->rx_buff.truesize);
224
225	self->tx_buff.head =
226		dma_alloc_coherent(NULL, self->tx_buff.truesize,
227				   &self->tx_buff_dma, GFP_KERNEL);
228	if (self->tx_buff.head == NULL) {
229		err = -ENOMEM;
230		goto err_out2;
231	}
232	memset(self->tx_buff.head, 0, self->tx_buff.truesize);
233
234	self->rx_buff.in_frame = FALSE;
235	self->rx_buff.state = OUTSIDE_FRAME;
236	self->tx_buff.data = self->tx_buff.head;
237	self->rx_buff.data = self->rx_buff.head;
238	self->netdev = dev;
239
240	dev->netdev_ops	= &w83977_netdev_ops;
241
242	err = register_netdev(dev);
243	if (err) {
244		IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
245		goto err_out3;
246	}
247	IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
248
249	/* Need to store self somewhere */
250	dev_self[i] = self;
251
252	return 0;
253err_out3:
254	dma_free_coherent(NULL, self->tx_buff.truesize,
255			  self->tx_buff.head, self->tx_buff_dma);
256err_out2:
257	dma_free_coherent(NULL, self->rx_buff.truesize,
258			  self->rx_buff.head, self->rx_buff_dma);
259err_out1:
260	free_netdev(dev);
261err_out:
262	release_region(iobase, CHIP_IO_EXTENT);
263	return err;
264}
265
266/*
267 * Function w83977af_close (self)
268 *
269 *    Close driver instance
270 *
271 */
272static int w83977af_close(struct w83977af_ir *self)
273{
274	int iobase;
275
276	IRDA_DEBUG(0, "%s()\n", __func__ );
277
278        iobase = self->io.fir_base;
279
280#ifdef CONFIG_USE_W977_PNP
281	/* enter PnP configuration mode */
282	w977_efm_enter(efio);
283
284	w977_select_device(W977_DEVICE_IR, efio);
285
286	/* Deactivate device */
287	w977_write_reg(0x30, 0x00, efio);
288
289	w977_efm_exit(efio);
290#endif /* CONFIG_USE_W977_PNP */
291
292	/* Remove netdevice */
293	unregister_netdev(self->netdev);
294
295	/* Release the PORT that this driver is using */
296	IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
297	      __func__ , self->io.fir_base);
298	release_region(self->io.fir_base, self->io.fir_ext);
299
300	if (self->tx_buff.head)
301		dma_free_coherent(NULL, self->tx_buff.truesize,
302				  self->tx_buff.head, self->tx_buff_dma);
303
304	if (self->rx_buff.head)
305		dma_free_coherent(NULL, self->rx_buff.truesize,
306				  self->rx_buff.head, self->rx_buff_dma);
307
308	free_netdev(self->netdev);
309
310	return 0;
311}
312
313static int w83977af_probe(int iobase, int irq, int dma)
314{
315  	int version;
316	int i;
317
318 	for (i=0; i < 2; i++) {
319		IRDA_DEBUG( 0, "%s()\n", __func__ );
320#ifdef CONFIG_USE_W977_PNP
321 		/* Enter PnP configuration mode */
322		w977_efm_enter(efbase[i]);
323
324 		w977_select_device(W977_DEVICE_IR, efbase[i]);
325
326 		/* Configure PnP port, IRQ, and DMA channel */
327 		w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
328 		w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
329
330 		w977_write_reg(0x70, irq, efbase[i]);
331#ifdef CONFIG_ARCH_NETWINDER
332		/* Netwinder uses 1 higher than Linux */
333 		w977_write_reg(0x74, dma+1, efbase[i]);
334#else
335 		w977_write_reg(0x74, dma, efbase[i]);
336#endif /*CONFIG_ARCH_NETWINDER */
337 		w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
338
339 		/* Set append hardware CRC, enable IR bank selection */
340 		w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
341
342 		/* Activate device */
343 		w977_write_reg(0x30, 0x01, efbase[i]);
344
345 		w977_efm_exit(efbase[i]);
346#endif /* CONFIG_USE_W977_PNP */
347  		/* Disable Advanced mode */
348  		switch_bank(iobase, SET2);
349  		outb(iobase+2, 0x00);
350
351 		/* Turn on UART (global) interrupts */
352 		switch_bank(iobase, SET0);
353  		outb(HCR_EN_IRQ, iobase+HCR);
354
355  		/* Switch to advanced mode */
356  		switch_bank(iobase, SET2);
357  		outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
358
359  		/* Set default IR-mode */
360  		switch_bank(iobase, SET0);
361  		outb(HCR_SIR, iobase+HCR);
362
363  		/* Read the Advanced IR ID */
364  		switch_bank(iobase, SET3);
365  		version = inb(iobase+AUID);
366
367  		/* Should be 0x1? */
368  		if (0x10 == (version & 0xf0)) {
369 			efio = efbase[i];
370
371 			/* Set FIFO size to 32 */
372 			switch_bank(iobase, SET2);
373 			outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
374
375 			/* Set FIFO threshold to TX17, RX16 */
376 			switch_bank(iobase, SET0);
377 			outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
378			     UFR_EN_FIFO,iobase+UFR);
379
380 			/* Receiver frame length */
381 			switch_bank(iobase, SET4);
382			outb(2048 & 0xff, iobase+6);
383			outb((2048 >> 8) & 0x1f, iobase+7);
384
385			/*
386			 * Init HP HSDL-1100 transceiver.
387			 *
388			 * Set IRX_MSL since we have 2 * receive paths IRRX,
389			 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
390			 * be a input pin used for IRRXH
391			 *
392			 *   IRRX  pin 37 connected to receiver
393			 *   IRTX  pin 38 connected to transmitter
394			 *   FIRRX pin 39 connected to receiver      (IRSL0)
395			 *   CIRRX pin 40 connected to pin 37
396			 */
397			switch_bank(iobase, SET7);
398			outb(0x40, iobase+7);
399
400			IRDA_MESSAGE("W83977AF (IR) driver loaded. "
401				     "Version: 0x%02x\n", version);
402
403			return 0;
404		} else {
405			/* Try next extented function register address */
406			IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
407		}
408  	}
409	return -1;
410}
411
412static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
413{
414	int ir_mode = HCR_SIR;
415	int iobase;
416	__u8 set;
417
418	iobase = self->io.fir_base;
419
420	/* Update accounting for new speed */
421	self->io.speed = speed;
422
423	/* Save current bank */
424	set = inb(iobase+SSR);
425
426	/* Disable interrupts */
427	switch_bank(iobase, SET0);
428	outb(0, iobase+ICR);
429
430	/* Select Set 2 */
431	switch_bank(iobase, SET2);
432	outb(0x00, iobase+ABHL);
433
434	switch (speed) {
435	case 9600:   outb(0x0c, iobase+ABLL); break;
436	case 19200:  outb(0x06, iobase+ABLL); break;
437	case 38400:  outb(0x03, iobase+ABLL); break;
438	case 57600:  outb(0x02, iobase+ABLL); break;
439	case 115200: outb(0x01, iobase+ABLL); break;
440	case 576000:
441		ir_mode = HCR_MIR_576;
442		IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
443		break;
444	case 1152000:
445		ir_mode = HCR_MIR_1152;
446		IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
447		break;
448	case 4000000:
449		ir_mode = HCR_FIR;
450		IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
451		break;
452	default:
453		ir_mode = HCR_FIR;
454		IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
455		break;
456	}
457
458	/* Set speed mode */
459	switch_bank(iobase, SET0);
460	outb(ir_mode, iobase+HCR);
461
462	/* set FIFO size to 32 */
463	switch_bank(iobase, SET2);
464	outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
465
466	/* set FIFO threshold to TX17, RX16 */
467	switch_bank(iobase, SET0);
468	outb(0x00, iobase+UFR);        /* Reset */
469	outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
470	outb(0xa7, iobase+UFR);
471
472	netif_wake_queue(self->netdev);
473
474	/* Enable some interrupts so we can receive frames */
475	switch_bank(iobase, SET0);
476	if (speed > PIO_MAX_SPEED) {
477		outb(ICR_EFSFI, iobase+ICR);
478		w83977af_dma_receive(self);
479	} else
480		outb(ICR_ERBRI, iobase+ICR);
481
482	/* Restore SSR */
483	outb(set, iobase+SSR);
484}
485
486/*
487 * Function w83977af_hard_xmit (skb, dev)
488 *
489 *    Sets up a DMA transfer to send the current frame.
490 *
491 */
492static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
493					    struct net_device *dev)
494{
495	struct w83977af_ir *self;
496	__s32 speed;
497	int iobase;
498	__u8 set;
499	int mtt;
500
501	self = netdev_priv(dev);
502
503	iobase = self->io.fir_base;
504
505	IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
506		   (int) skb->len);
507
508	/* Lock transmit buffer */
509	netif_stop_queue(dev);
510
511	/* Check if we need to change the speed */
512	speed = irda_get_next_speed(skb);
513	if ((speed != self->io.speed) && (speed != -1)) {
514		/* Check for empty frame */
515		if (!skb->len) {
516			w83977af_change_speed(self, speed);
517			dev_kfree_skb(skb);
518			return NETDEV_TX_OK;
519		} else
520			self->new_speed = speed;
521	}
522
523	/* Save current set */
524	set = inb(iobase+SSR);
525
526	/* Decide if we should use PIO or DMA transfer */
527	if (self->io.speed > PIO_MAX_SPEED) {
528		self->tx_buff.data = self->tx_buff.head;
529		skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
530		self->tx_buff.len = skb->len;
531
532		mtt = irda_get_mtt(skb);
533			IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
534			if (mtt)
535				udelay(mtt);
536
537			/* Enable DMA interrupt */
538			switch_bank(iobase, SET0);
539	 		outb(ICR_EDMAI, iobase+ICR);
540	     		w83977af_dma_write(self, iobase);
541	} else {
542		self->tx_buff.data = self->tx_buff.head;
543		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
544						   self->tx_buff.truesize);
545
546		/* Add interrupt on tx low level (will fire immediately) */
547		switch_bank(iobase, SET0);
548		outb(ICR_ETXTHI, iobase+ICR);
549	}
550	dev_kfree_skb(skb);
551
552	/* Restore set register */
553	outb(set, iobase+SSR);
554
555	return NETDEV_TX_OK;
556}
557
558/*
559 * Function w83977af_dma_write (self, iobase)
560 *
561 *    Send frame using DMA
562 *
563 */
564static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
565{
566	__u8 set;
567#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
568	unsigned long flags;
569	__u8 hcr;
570#endif
571        IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
572
573	/* Save current set */
574	set = inb(iobase+SSR);
575
576	/* Disable DMA */
577	switch_bank(iobase, SET0);
578	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
579
580	/* Choose transmit DMA channel  */
581	switch_bank(iobase, SET2);
582	outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
583#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
584	spin_lock_irqsave(&self->lock, flags);
585
586	disable_dma(self->io.dma);
587	clear_dma_ff(self->io.dma);
588	set_dma_mode(self->io.dma, DMA_MODE_READ);
589	set_dma_addr(self->io.dma, self->tx_buff_dma);
590	set_dma_count(self->io.dma, self->tx_buff.len);
591#else
592	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
593		       DMA_MODE_WRITE);
594#endif
595	self->io.direction = IO_XMIT;
596
597	/* Enable DMA */
598 	switch_bank(iobase, SET0);
599#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
600	hcr = inb(iobase+HCR);
601	outb(hcr | HCR_EN_DMA, iobase+HCR);
602	enable_dma(self->io.dma);
603	spin_unlock_irqrestore(&self->lock, flags);
604#else
605	outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
606#endif
607
608	/* Restore set register */
609	outb(set, iobase+SSR);
610}
611
612/*
613 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
614 *
615 *
616 *
617 */
618static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
619{
620	int actual = 0;
621	__u8 set;
622
623	IRDA_DEBUG(4, "%s()\n", __func__ );
624
625	/* Save current bank */
626	set = inb(iobase+SSR);
627
628	switch_bank(iobase, SET0);
629	if (!(inb_p(iobase+USR) & USR_TSRE)) {
630		IRDA_DEBUG(4,
631			   "%s(), warning, FIFO not empty yet!\n", __func__  );
632
633		fifo_size -= 17;
634		IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
635			   __func__ , fifo_size);
636	}
637
638	/* Fill FIFO with current frame */
639	while ((fifo_size-- > 0) && (actual < len)) {
640		/* Transmit next byte */
641		outb(buf[actual++], iobase+TBR);
642	}
643
644	IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
645		   __func__ , fifo_size, actual, len);
646
647	/* Restore bank */
648	outb(set, iobase+SSR);
649
650	return actual;
651}
652
653/*
654 * Function w83977af_dma_xmit_complete (self)
655 *
656 *    The transfer of a frame in finished. So do the necessary things
657 *
658 *
659 */
660static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
661{
662	int iobase;
663	__u8 set;
664
665	IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
666
667	IRDA_ASSERT(self != NULL, return;);
668
669	iobase = self->io.fir_base;
670
671	/* Save current set */
672	set = inb(iobase+SSR);
673
674	/* Disable DMA */
675	switch_bank(iobase, SET0);
676	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
677
678	/* Check for underrrun! */
679	if (inb(iobase+AUDR) & AUDR_UNDR) {
680		IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
681
682		self->netdev->stats.tx_errors++;
683		self->netdev->stats.tx_fifo_errors++;
684
685		/* Clear bit, by writing 1 to it */
686		outb(AUDR_UNDR, iobase+AUDR);
687	} else
688		self->netdev->stats.tx_packets++;
689
690
691	if (self->new_speed) {
692		w83977af_change_speed(self, self->new_speed);
693		self->new_speed = 0;
694	}
695
696	/* Unlock tx_buff and request another frame */
697	/* Tell the network layer, that we want more frames */
698	netif_wake_queue(self->netdev);
699
700	/* Restore set */
701	outb(set, iobase+SSR);
702}
703
704/*
705 * Function w83977af_dma_receive (self)
706 *
707 *    Get ready for receiving a frame. The device will initiate a DMA
708 *    if it starts to receive a frame.
709 *
710 */
711static int w83977af_dma_receive(struct w83977af_ir *self)
712{
713	int iobase;
714	__u8 set;
715#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
716	unsigned long flags;
717	__u8 hcr;
718#endif
719	IRDA_ASSERT(self != NULL, return -1;);
720
721	IRDA_DEBUG(4, "%s\n", __func__ );
722
723	iobase= self->io.fir_base;
724
725	/* Save current set */
726	set = inb(iobase+SSR);
727
728	/* Disable DMA */
729	switch_bank(iobase, SET0);
730	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
731
732	/* Choose DMA Rx, DMA Fairness, and Advanced mode */
733	switch_bank(iobase, SET2);
734	outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
735	     iobase+ADCR1);
736
737	self->io.direction = IO_RECV;
738	self->rx_buff.data = self->rx_buff.head;
739
740#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
741	spin_lock_irqsave(&self->lock, flags);
742
743	disable_dma(self->io.dma);
744	clear_dma_ff(self->io.dma);
745	set_dma_mode(self->io.dma, DMA_MODE_READ);
746	set_dma_addr(self->io.dma, self->rx_buff_dma);
747	set_dma_count(self->io.dma, self->rx_buff.truesize);
748#else
749	irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
750		       DMA_MODE_READ);
751#endif
752	/*
753	 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
754	 * important that we don't reset the Tx FIFO since it might not
755	 * be finished transmitting yet
756	 */
757	switch_bank(iobase, SET0);
758	outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
759	self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
760
761	/* Enable DMA */
762	switch_bank(iobase, SET0);
763#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
764	hcr = inb(iobase+HCR);
765	outb(hcr | HCR_EN_DMA, iobase+HCR);
766	enable_dma(self->io.dma);
767	spin_unlock_irqrestore(&self->lock, flags);
768#else
769	outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
770#endif
771	/* Restore set */
772	outb(set, iobase+SSR);
773
774	return 0;
775}
776
777/*
778 * Function w83977af_receive_complete (self)
779 *
780 *    Finished with receiving a frame
781 *
782 */
783static int w83977af_dma_receive_complete(struct w83977af_ir *self)
784{
785	struct sk_buff *skb;
786	struct st_fifo *st_fifo;
787	int len;
788	int iobase;
789	__u8 set;
790	__u8 status;
791
792	IRDA_DEBUG(4, "%s\n", __func__ );
793
794	st_fifo = &self->st_fifo;
795
796	iobase = self->io.fir_base;
797
798	/* Save current set */
799	set = inb(iobase+SSR);
800
801	iobase = self->io.fir_base;
802
803	/* Read status FIFO */
804	switch_bank(iobase, SET5);
805	while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
806		st_fifo->entries[st_fifo->tail].status = status;
807
808		st_fifo->entries[st_fifo->tail].len  = inb(iobase+RFLFL);
809		st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
810
811		st_fifo->tail++;
812		st_fifo->len++;
813	}
814
815	while (st_fifo->len) {
816		/* Get first entry */
817		status = st_fifo->entries[st_fifo->head].status;
818		len    = st_fifo->entries[st_fifo->head].len;
819		st_fifo->head++;
820		st_fifo->len--;
821
822		/* Check for errors */
823		if (status & FS_FO_ERR_MSK) {
824			if (status & FS_FO_LST_FR) {
825				/* Add number of lost frames to stats */
826				self->netdev->stats.rx_errors += len;
827			} else {
828				/* Skip frame */
829				self->netdev->stats.rx_errors++;
830
831				self->rx_buff.data += len;
832
833				if (status & FS_FO_MX_LEX)
834					self->netdev->stats.rx_length_errors++;
835
836				if (status & FS_FO_PHY_ERR)
837					self->netdev->stats.rx_frame_errors++;
838
839				if (status & FS_FO_CRC_ERR)
840					self->netdev->stats.rx_crc_errors++;
841			}
842			/* The errors below can be reported in both cases */
843			if (status & FS_FO_RX_OV)
844				self->netdev->stats.rx_fifo_errors++;
845
846			if (status & FS_FO_FSF_OV)
847				self->netdev->stats.rx_fifo_errors++;
848
849		} else {
850			/* Check if we have transferred all data to memory */
851			switch_bank(iobase, SET0);
852			if (inb(iobase+USR) & USR_RDR) {
853				udelay(80); /* Should be enough!? */
854			}
855
856			skb = dev_alloc_skb(len+1);
857			if (skb == NULL)  {
858				printk(KERN_INFO
859				       "%s(), memory squeeze, dropping frame.\n", __func__);
860				/* Restore set register */
861				outb(set, iobase+SSR);
862
863				return FALSE;
864			}
865
866			/*  Align to 20 bytes */
867			skb_reserve(skb, 1);
868
869			/* Copy frame without CRC */
870			if (self->io.speed < 4000000) {
871				skb_put(skb, len-2);
872				skb_copy_to_linear_data(skb,
873							self->rx_buff.data,
874							len - 2);
875			} else {
876				skb_put(skb, len-4);
877				skb_copy_to_linear_data(skb,
878							self->rx_buff.data,
879							len - 4);
880			}
881
882			/* Move to next frame */
883			self->rx_buff.data += len;
884			self->netdev->stats.rx_packets++;
885
886			skb->dev = self->netdev;
887			skb_reset_mac_header(skb);
888			skb->protocol = htons(ETH_P_IRDA);
889			netif_rx(skb);
890		}
891	}
892	/* Restore set register */
893	outb(set, iobase+SSR);
894
895	return TRUE;
896}
897
898/*
899 * Function pc87108_pio_receive (self)
900 *
901 *    Receive all data in receiver FIFO
902 *
903 */
904static void w83977af_pio_receive(struct w83977af_ir *self)
905{
906	__u8 byte = 0x00;
907	int iobase;
908
909	IRDA_DEBUG(4, "%s()\n", __func__ );
910
911	IRDA_ASSERT(self != NULL, return;);
912
913	iobase = self->io.fir_base;
914
915	/*  Receive all characters in Rx FIFO */
916	do {
917		byte = inb(iobase+RBR);
918		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
919				  byte);
920	} while (inb(iobase+USR) & USR_RDR); /* Data available */
921}
922
923/*
924 * Function w83977af_sir_interrupt (self, eir)
925 *
926 *    Handle SIR interrupt
927 *
928 */
929static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
930{
931	int actual;
932	__u8 new_icr = 0;
933	__u8 set;
934	int iobase;
935
936	IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
937
938	iobase = self->io.fir_base;
939	/* Transmit FIFO low on data */
940	if (isr & ISR_TXTH_I) {
941		/* Write data left in transmit buffer */
942		actual = w83977af_pio_write(self->io.fir_base,
943					    self->tx_buff.data,
944					    self->tx_buff.len,
945					    self->io.fifo_size);
946
947		self->tx_buff.data += actual;
948		self->tx_buff.len  -= actual;
949
950		self->io.direction = IO_XMIT;
951
952		/* Check if finished */
953		if (self->tx_buff.len > 0) {
954			new_icr |= ICR_ETXTHI;
955		} else {
956			set = inb(iobase+SSR);
957			switch_bank(iobase, SET0);
958			outb(AUDR_SFEND, iobase+AUDR);
959			outb(set, iobase+SSR);
960
961			self->netdev->stats.tx_packets++;
962
963			/* Feed me more packets */
964			netif_wake_queue(self->netdev);
965			new_icr |= ICR_ETBREI;
966		}
967	}
968	/* Check if transmission has completed */
969	if (isr & ISR_TXEMP_I) {
970		/* Check if we need to change the speed? */
971		if (self->new_speed) {
972			IRDA_DEBUG(2,
973				   "%s(), Changing speed!\n", __func__ );
974			w83977af_change_speed(self, self->new_speed);
975			self->new_speed = 0;
976		}
977
978		/* Turn around and get ready to receive some data */
979		self->io.direction = IO_RECV;
980		new_icr |= ICR_ERBRI;
981	}
982
983	/* Rx FIFO threshold or timeout */
984	if (isr & ISR_RXTH_I) {
985		w83977af_pio_receive(self);
986
987		/* Keep receiving */
988		new_icr |= ICR_ERBRI;
989	}
990	return new_icr;
991}
992
993/*
994 * Function pc87108_fir_interrupt (self, eir)
995 *
996 *    Handle MIR/FIR interrupt
997 *
998 */
999static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1000{
1001	__u8 new_icr = 0;
1002	__u8 set;
1003	int iobase;
1004
1005	iobase = self->io.fir_base;
1006	set = inb(iobase+SSR);
1007
1008	/* End of frame detected in FIFO */
1009	if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1010		if (w83977af_dma_receive_complete(self)) {
1011
1012			/* Wait for next status FIFO interrupt */
1013			new_icr |= ICR_EFSFI;
1014		} else {
1015			/* DMA not finished yet */
1016
1017			/* Set timer value, resolution 1 ms */
1018			switch_bank(iobase, SET4);
1019			outb(0x01, iobase+TMRL); /* 1 ms */
1020			outb(0x00, iobase+TMRH);
1021
1022			/* Start timer */
1023			outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1024
1025			new_icr |= ICR_ETMRI;
1026		}
1027	}
1028	/* Timer finished */
1029	if (isr & ISR_TMR_I) {
1030		/* Disable timer */
1031		switch_bank(iobase, SET4);
1032		outb(0, iobase+IR_MSL);
1033
1034		/* Clear timer event */
1035		/* switch_bank(iobase, SET0); */
1036/* 		outb(ASCR_CTE, iobase+ASCR); */
1037
1038		/* Check if this is a TX timer interrupt */
1039		if (self->io.direction == IO_XMIT) {
1040			w83977af_dma_write(self, iobase);
1041
1042			new_icr |= ICR_EDMAI;
1043		} else {
1044			/* Check if DMA has now finished */
1045			w83977af_dma_receive_complete(self);
1046
1047			new_icr |= ICR_EFSFI;
1048		}
1049	}
1050	/* Finished with DMA */
1051	if (isr & ISR_DMA_I) {
1052		w83977af_dma_xmit_complete(self);
1053
1054		/* Check if there are more frames to be transmitted */
1055		/* if (irda_device_txqueue_empty(self)) { */
1056
1057		/* Prepare for receive
1058		 *
1059		 * ** Netwinder Tx DMA likes that we do this anyway **
1060		 */
1061		w83977af_dma_receive(self);
1062		new_icr = ICR_EFSFI;
1063	       /* } */
1064	}
1065
1066	/* Restore set */
1067	outb(set, iobase+SSR);
1068
1069	return new_icr;
1070}
1071
1072/*
1073 * Function w83977af_interrupt (irq, dev_id, regs)
1074 *
1075 *    An interrupt from the chip has arrived. Time to do some work
1076 *
1077 */
1078static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1079{
1080	struct net_device *dev = dev_id;
1081	struct w83977af_ir *self;
1082	__u8 set, icr, isr;
1083	int iobase;
1084
1085	self = netdev_priv(dev);
1086
1087	iobase = self->io.fir_base;
1088
1089	/* Save current bank */
1090	set = inb(iobase+SSR);
1091	switch_bank(iobase, SET0);
1092
1093	icr = inb(iobase+ICR);
1094	isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1095
1096	outb(0, iobase+ICR); /* Disable interrupts */
1097
1098	if (isr) {
1099		/* Dispatch interrupt handler for the current speed */
1100		if (self->io.speed > PIO_MAX_SPEED )
1101			icr = w83977af_fir_interrupt(self, isr);
1102		else
1103			icr = w83977af_sir_interrupt(self, isr);
1104	}
1105
1106	outb(icr, iobase+ICR);    /* Restore (new) interrupts */
1107	outb(set, iobase+SSR);    /* Restore bank register */
1108	return IRQ_RETVAL(isr);
1109}
1110
1111/*
1112 * Function w83977af_is_receiving (self)
1113 *
1114 *    Return TRUE is we are currently receiving a frame
1115 *
1116 */
1117static int w83977af_is_receiving(struct w83977af_ir *self)
1118{
1119	int status = FALSE;
1120	int iobase;
1121	__u8 set;
1122
1123	IRDA_ASSERT(self != NULL, return FALSE;);
1124
1125	if (self->io.speed > 115200) {
1126		iobase = self->io.fir_base;
1127
1128		/* Check if rx FIFO is not empty */
1129		set = inb(iobase+SSR);
1130		switch_bank(iobase, SET2);
1131		if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1132			/* We are receiving something */
1133			status =  TRUE;
1134		}
1135		outb(set, iobase+SSR);
1136	} else
1137		status = (self->rx_buff.state != OUTSIDE_FRAME);
1138
1139	return status;
1140}
1141
1142/*
1143 * Function w83977af_net_open (dev)
1144 *
1145 *    Start the device
1146 *
1147 */
1148static int w83977af_net_open(struct net_device *dev)
1149{
1150	struct w83977af_ir *self;
1151	int iobase;
1152	char hwname[32];
1153	__u8 set;
1154
1155	IRDA_DEBUG(0, "%s()\n", __func__ );
1156
1157	IRDA_ASSERT(dev != NULL, return -1;);
1158	self = netdev_priv(dev);
1159
1160	IRDA_ASSERT(self != NULL, return 0;);
1161
1162	iobase = self->io.fir_base;
1163
1164	if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1165			(void *) dev)) {
1166		return -EAGAIN;
1167	}
1168	/*
1169	 * Always allocate the DMA channel after the IRQ,
1170	 * and clean up on failure.
1171	 */
1172	if (request_dma(self->io.dma, dev->name)) {
1173		free_irq(self->io.irq, self);
1174		return -EAGAIN;
1175	}
1176
1177	/* Save current set */
1178	set = inb(iobase+SSR);
1179
1180 	/* Enable some interrupts so we can receive frames again */
1181 	switch_bank(iobase, SET0);
1182 	if (self->io.speed > 115200) {
1183 		outb(ICR_EFSFI, iobase+ICR);
1184 		w83977af_dma_receive(self);
1185 	} else
1186 		outb(ICR_ERBRI, iobase+ICR);
1187
1188	/* Restore bank register */
1189	outb(set, iobase+SSR);
1190
1191	/* Ready to play! */
1192	netif_start_queue(dev);
1193
1194	/* Give self a hardware name */
1195	sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1196
1197	/*
1198	 * Open new IrLAP layer instance, now that everything should be
1199	 * initialized properly
1200	 */
1201	self->irlap = irlap_open(dev, &self->qos, hwname);
1202
1203	return 0;
1204}
1205
1206/*
1207 * Function w83977af_net_close (dev)
1208 *
1209 *    Stop the device
1210 *
1211 */
1212static int w83977af_net_close(struct net_device *dev)
1213{
1214	struct w83977af_ir *self;
1215	int iobase;
1216	__u8 set;
1217
1218	IRDA_DEBUG(0, "%s()\n", __func__ );
1219
1220	IRDA_ASSERT(dev != NULL, return -1;);
1221
1222	self = netdev_priv(dev);
1223
1224	IRDA_ASSERT(self != NULL, return 0;);
1225
1226	iobase = self->io.fir_base;
1227
1228	/* Stop device */
1229	netif_stop_queue(dev);
1230
1231	/* Stop and remove instance of IrLAP */
1232	if (self->irlap)
1233		irlap_close(self->irlap);
1234	self->irlap = NULL;
1235
1236	disable_dma(self->io.dma);
1237
1238	/* Save current set */
1239	set = inb(iobase+SSR);
1240
1241	/* Disable interrupts */
1242	switch_bank(iobase, SET0);
1243	outb(0, iobase+ICR);
1244
1245	free_irq(self->io.irq, dev);
1246	free_dma(self->io.dma);
1247
1248	/* Restore bank register */
1249	outb(set, iobase+SSR);
1250
1251	return 0;
1252}
1253
1254/*
1255 * Function w83977af_net_ioctl (dev, rq, cmd)
1256 *
1257 *    Process IOCTL commands for this device
1258 *
1259 */
1260static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1261{
1262	struct if_irda_req *irq = (struct if_irda_req *) rq;
1263	struct w83977af_ir *self;
1264	unsigned long flags;
1265	int ret = 0;
1266
1267	IRDA_ASSERT(dev != NULL, return -1;);
1268
1269	self = netdev_priv(dev);
1270
1271	IRDA_ASSERT(self != NULL, return -1;);
1272
1273	IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1274
1275	spin_lock_irqsave(&self->lock, flags);
1276
1277	switch (cmd) {
1278	case SIOCSBANDWIDTH: /* Set bandwidth */
1279		if (!capable(CAP_NET_ADMIN)) {
1280			ret = -EPERM;
1281			goto out;
1282		}
1283		w83977af_change_speed(self, irq->ifr_baudrate);
1284		break;
1285	case SIOCSMEDIABUSY: /* Set media busy */
1286		if (!capable(CAP_NET_ADMIN)) {
1287			ret = -EPERM;
1288			goto out;
1289		}
1290		irda_device_set_media_busy(self->netdev, TRUE);
1291		break;
1292	case SIOCGRECEIVING: /* Check if we are receiving right now */
1293		irq->ifr_receiving = w83977af_is_receiving(self);
1294		break;
1295	default:
1296		ret = -EOPNOTSUPP;
1297	}
1298out:
1299	spin_unlock_irqrestore(&self->lock, flags);
1300	return ret;
1301}
1302
1303MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1304MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1305MODULE_LICENSE("GPL");
1306
1307
1308module_param(qos_mtt_bits, int, 0);
1309MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1310module_param_array(io, int, NULL, 0);
1311MODULE_PARM_DESC(io, "Base I/O addresses");
1312module_param_array(irq, int, NULL, 0);
1313MODULE_PARM_DESC(irq, "IRQ lines");
1314
1315/*
1316 * Function init_module (void)
1317 *
1318 *
1319 *
1320 */
1321module_init(w83977af_init);
1322
1323/*
1324 * Function cleanup_module (void)
1325 *
1326 *
1327 *
1328 */
1329module_exit(w83977af_cleanup);
1330