1/*********************************************************************
2 *
3 * Filename:      w83977af_ir.c
4 * Version:       1.0
5 * Description:   FIR driver for the Winbond W83977AF Super I/O chip
6 * Status:        Experimental.
7 * Author:        Paul VanderSpek
8 * Created at:    Wed Nov  4 11:46:16 1998
9 * Modified at:   Fri Jan 28 12:10:59 2000
10 * Modified by:   Dag Brattli <dagb@cs.uit.no>
11 *
12 *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 *     Copyright (c) 1998-1999 Rebel.com
14 *
15 *     This program is free software; you can redistribute it and/or
16 *     modify it under the terms of the GNU General Public License as
17 *     published by the Free Software Foundation; either version 2 of
18 *     the License, or (at your option) any later version.
19 *
20 *     Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 *     warranty for any of this software. This material is provided "AS-IS"
22 *     and at no charge.
23 *
24 *     If you find bugs in this file, its very likely that the same bug
25 *     will also be in pc87108.c since the implementations are quite
26 *     similar.
27 *
28 *     Notice that all functions that needs to access the chip in _any_
29 *     way, must save BSR register on entry, and restore it on exit.
30 *     It is _very_ important to follow this policy!
31 *
32 *         __u8 bank;
33 *
34 *         bank = inb( iobase+BSR);
35 *
36 *         do_your_stuff_here();
37 *
38 *         outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/rtnetlink.h>
52#include <linux/dma-mapping.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <net/irda/irda.h>
59#include <net/irda/wrapper.h>
60#include <net/irda/irda_device.h>
61#include "w83977af.h"
62#include "w83977af_ir.h"
63
64#ifdef  CONFIG_ARCH_NETWINDER                /* Adjust to NetWinder differences */
65#undef  CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
67#endif
68#undef  CONFIG_USE_INTERNAL_TIMER  /* Just cannot make that timer work */
69#define CONFIG_USE_W977_PNP        /* Currently needed */
70#define PIO_MAX_SPEED       115200
71
72static char *driver_name = "w83977af_ir";
73static int  qos_mtt_bits = 0x07;   /* 1 ms or more */
74
75#define CHIP_IO_EXTENT 8
76
77static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
78#ifdef CONFIG_ARCH_NETWINDER                 /* Adjust to NetWinder differences */
79static unsigned int irq[] = { 6, 0, 0, 0 };
80#else
81static unsigned int irq[] = { 11, 0, 0, 0 };
82#endif
83static unsigned int dma[] = { 1, 0, 0, 0 };
84static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
85static unsigned int efio = W977_EFIO_BASE;
86
87static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
88
89/* Some prototypes */
90static int  w83977af_open(int i, unsigned int iobase, unsigned int irq,
91                          unsigned int dma);
92static int  w83977af_close(struct w83977af_ir *self);
93static int  w83977af_probe(int iobase, int irq, int dma);
94static int  w83977af_dma_receive(struct w83977af_ir *self);
95static int  w83977af_dma_receive_complete(struct w83977af_ir *self);
96static int  w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev);
97static int  w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
98static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100static int  w83977af_is_receiving(struct w83977af_ir *self);
101
102static int  w83977af_net_open(struct net_device *dev);
103static int  w83977af_net_close(struct net_device *dev);
104static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
106
107/*
108 * Function w83977af_init ()
109 *
110 *    Initialize chip. Just try to find out how many chips we are dealing with
111 *    and where they are
112 */
113static int __init w83977af_init(void)
114{
115        int i;
116
117	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
118
119	for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
120		if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
121			return 0;
122	}
123	return -ENODEV;
124}
125
126/*
127 * Function w83977af_cleanup ()
128 *
129 *    Close all configured chips
130 *
131 */
132static void __exit w83977af_cleanup(void)
133{
134	int i;
135
136        IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
137
138	for (i=0; i < ARRAY_SIZE(dev_self); i++) {
139		if (dev_self[i])
140			w83977af_close(dev_self[i]);
141	}
142}
143
144/*
145 * Function w83977af_open (iobase, irq)
146 *
147 *    Open driver instance
148 *
149 */
150int w83977af_open(int i, unsigned int iobase, unsigned int irq,
151		  unsigned int dma)
152{
153	struct net_device *dev;
154        struct w83977af_ir *self;
155	int err;
156
157	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
158
159	/* Lock the port that we need */
160	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
161		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
162		      __FUNCTION__ , iobase);
163		return -ENODEV;
164	}
165
166	if (w83977af_probe(iobase, irq, dma) == -1) {
167		err = -1;
168		goto err_out;
169	}
170	/*
171	 *  Allocate new instance of the driver
172	 */
173	dev = alloc_irdadev(sizeof(struct w83977af_ir));
174	if (dev == NULL) {
175		printk( KERN_ERR "IrDA: Can't allocate memory for "
176			"IrDA control block!\n");
177		err = -ENOMEM;
178		goto err_out;
179	}
180
181	self = dev->priv;
182	spin_lock_init(&self->lock);
183
184
185	/* Initialize IO */
186	self->io.fir_base   = iobase;
187        self->io.irq       = irq;
188        self->io.fir_ext   = CHIP_IO_EXTENT;
189        self->io.dma       = dma;
190        self->io.fifo_size = 32;
191
192	/* Initialize QoS for this device */
193	irda_init_max_qos_capabilies(&self->qos);
194
195	/* The only value we must override it the baudrate */
196
197	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
198		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
199
200	/* The HP HDLS-1100 needs 1 ms according to the specs */
201	self->qos.min_turn_time.bits = qos_mtt_bits;
202	irda_qos_bits_to_value(&self->qos);
203
204	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
205	self->rx_buff.truesize = 14384;
206	self->tx_buff.truesize = 4000;
207
208	/* Allocate memory if needed */
209	self->rx_buff.head =
210		dma_alloc_coherent(NULL, self->rx_buff.truesize,
211				   &self->rx_buff_dma, GFP_KERNEL);
212	if (self->rx_buff.head == NULL) {
213		err = -ENOMEM;
214		goto err_out1;
215	}
216
217	memset(self->rx_buff.head, 0, self->rx_buff.truesize);
218
219	self->tx_buff.head =
220		dma_alloc_coherent(NULL, self->tx_buff.truesize,
221				   &self->tx_buff_dma, GFP_KERNEL);
222	if (self->tx_buff.head == NULL) {
223		err = -ENOMEM;
224		goto err_out2;
225	}
226	memset(self->tx_buff.head, 0, self->tx_buff.truesize);
227
228	self->rx_buff.in_frame = FALSE;
229	self->rx_buff.state = OUTSIDE_FRAME;
230	self->tx_buff.data = self->tx_buff.head;
231	self->rx_buff.data = self->rx_buff.head;
232	self->netdev = dev;
233
234	/* Keep track of module usage */
235	SET_MODULE_OWNER(dev);
236
237	/* Override the network functions we need to use */
238	dev->hard_start_xmit = w83977af_hard_xmit;
239	dev->open            = w83977af_net_open;
240	dev->stop            = w83977af_net_close;
241	dev->do_ioctl        = w83977af_net_ioctl;
242	dev->get_stats	     = w83977af_net_get_stats;
243
244	err = register_netdev(dev);
245	if (err) {
246		IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
247		goto err_out3;
248	}
249	IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
250
251	/* Need to store self somewhere */
252	dev_self[i] = self;
253
254	return 0;
255err_out3:
256	dma_free_coherent(NULL, self->tx_buff.truesize,
257			  self->tx_buff.head, self->tx_buff_dma);
258err_out2:
259	dma_free_coherent(NULL, self->rx_buff.truesize,
260			  self->rx_buff.head, self->rx_buff_dma);
261err_out1:
262	free_netdev(dev);
263err_out:
264	release_region(iobase, CHIP_IO_EXTENT);
265	return err;
266}
267
268/*
269 * Function w83977af_close (self)
270 *
271 *    Close driver instance
272 *
273 */
274static int w83977af_close(struct w83977af_ir *self)
275{
276	int iobase;
277
278	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
279
280        iobase = self->io.fir_base;
281
282#ifdef CONFIG_USE_W977_PNP
283	/* enter PnP configuration mode */
284	w977_efm_enter(efio);
285
286	w977_select_device(W977_DEVICE_IR, efio);
287
288	/* Deactivate device */
289	w977_write_reg(0x30, 0x00, efio);
290
291	w977_efm_exit(efio);
292#endif /* CONFIG_USE_W977_PNP */
293
294	/* Remove netdevice */
295	unregister_netdev(self->netdev);
296
297	/* Release the PORT that this driver is using */
298	IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
299	      __FUNCTION__ , self->io.fir_base);
300	release_region(self->io.fir_base, self->io.fir_ext);
301
302	if (self->tx_buff.head)
303		dma_free_coherent(NULL, self->tx_buff.truesize,
304				  self->tx_buff.head, self->tx_buff_dma);
305
306	if (self->rx_buff.head)
307		dma_free_coherent(NULL, self->rx_buff.truesize,
308				  self->rx_buff.head, self->rx_buff_dma);
309
310	free_netdev(self->netdev);
311
312	return 0;
313}
314
315int w83977af_probe( int iobase, int irq, int dma)
316{
317  	int version;
318	int i;
319
320 	for (i=0; i < 2; i++) {
321 		IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ );
322#ifdef CONFIG_USE_W977_PNP
323 		/* Enter PnP configuration mode */
324		w977_efm_enter(efbase[i]);
325
326 		w977_select_device(W977_DEVICE_IR, efbase[i]);
327
328 		/* Configure PnP port, IRQ, and DMA channel */
329 		w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
330 		w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
331
332 		w977_write_reg(0x70, irq, efbase[i]);
333#ifdef CONFIG_ARCH_NETWINDER
334		/* Netwinder uses 1 higher than Linux */
335 		w977_write_reg(0x74, dma+1, efbase[i]);
336#else
337 		w977_write_reg(0x74, dma, efbase[i]);
338#endif /*CONFIG_ARCH_NETWINDER */
339 		w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
340
341 		/* Set append hardware CRC, enable IR bank selection */
342 		w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
343
344 		/* Activate device */
345 		w977_write_reg(0x30, 0x01, efbase[i]);
346
347 		w977_efm_exit(efbase[i]);
348#endif /* CONFIG_USE_W977_PNP */
349  		/* Disable Advanced mode */
350  		switch_bank(iobase, SET2);
351  		outb(iobase+2, 0x00);
352
353 		/* Turn on UART (global) interrupts */
354 		switch_bank(iobase, SET0);
355  		outb(HCR_EN_IRQ, iobase+HCR);
356
357  		/* Switch to advanced mode */
358  		switch_bank(iobase, SET2);
359  		outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
360
361  		/* Set default IR-mode */
362  		switch_bank(iobase, SET0);
363  		outb(HCR_SIR, iobase+HCR);
364
365  		/* Read the Advanced IR ID */
366  		switch_bank(iobase, SET3);
367  		version = inb(iobase+AUID);
368
369  		/* Should be 0x1? */
370  		if (0x10 == (version & 0xf0)) {
371 			efio = efbase[i];
372
373 			/* Set FIFO size to 32 */
374 			switch_bank(iobase, SET2);
375 			outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
376
377 			/* Set FIFO threshold to TX17, RX16 */
378 			switch_bank(iobase, SET0);
379 			outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
380			     UFR_EN_FIFO,iobase+UFR);
381
382 			/* Receiver frame length */
383 			switch_bank(iobase, SET4);
384			outb(2048 & 0xff, iobase+6);
385			outb((2048 >> 8) & 0x1f, iobase+7);
386
387			/*
388			 * Init HP HSDL-1100 transceiver.
389			 *
390			 * Set IRX_MSL since we have 2 * receive paths IRRX,
391			 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
392			 * be a input pin used for IRRXH
393			 *
394			 *   IRRX  pin 37 connected to receiver
395			 *   IRTX  pin 38 connected to transmitter
396			 *   FIRRX pin 39 connected to receiver      (IRSL0)
397			 *   CIRRX pin 40 connected to pin 37
398			 */
399			switch_bank(iobase, SET7);
400			outb(0x40, iobase+7);
401
402			IRDA_MESSAGE("W83977AF (IR) driver loaded. "
403				     "Version: 0x%02x\n", version);
404
405			return 0;
406		} else {
407			/* Try next extented function register address */
408			IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ );
409		}
410  	}
411	return -1;
412}
413
414void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
415{
416	int ir_mode = HCR_SIR;
417	int iobase;
418	__u8 set;
419
420	iobase = self->io.fir_base;
421
422	/* Update accounting for new speed */
423	self->io.speed = speed;
424
425	/* Save current bank */
426	set = inb(iobase+SSR);
427
428	/* Disable interrupts */
429	switch_bank(iobase, SET0);
430	outb(0, iobase+ICR);
431
432	/* Select Set 2 */
433	switch_bank(iobase, SET2);
434	outb(0x00, iobase+ABHL);
435
436	switch (speed) {
437	case 9600:   outb(0x0c, iobase+ABLL); break;
438	case 19200:  outb(0x06, iobase+ABLL); break;
439	case 38400:  outb(0x03, iobase+ABLL); break;
440	case 57600:  outb(0x02, iobase+ABLL); break;
441	case 115200: outb(0x01, iobase+ABLL); break;
442	case 576000:
443		ir_mode = HCR_MIR_576;
444		IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ );
445		break;
446	case 1152000:
447		ir_mode = HCR_MIR_1152;
448		IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ );
449		break;
450	case 4000000:
451		ir_mode = HCR_FIR;
452		IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ );
453		break;
454	default:
455		ir_mode = HCR_FIR;
456		IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed);
457		break;
458	}
459
460	/* Set speed mode */
461	switch_bank(iobase, SET0);
462	outb(ir_mode, iobase+HCR);
463
464	/* set FIFO size to 32 */
465	switch_bank(iobase, SET2);
466	outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
467
468	/* set FIFO threshold to TX17, RX16 */
469	switch_bank(iobase, SET0);
470	outb(0x00, iobase+UFR);        /* Reset */
471	outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
472	outb(0xa7, iobase+UFR);
473
474	netif_wake_queue(self->netdev);
475
476	/* Enable some interrupts so we can receive frames */
477	switch_bank(iobase, SET0);
478	if (speed > PIO_MAX_SPEED) {
479		outb(ICR_EFSFI, iobase+ICR);
480		w83977af_dma_receive(self);
481	} else
482		outb(ICR_ERBRI, iobase+ICR);
483
484	/* Restore SSR */
485	outb(set, iobase+SSR);
486}
487
488/*
489 * Function w83977af_hard_xmit (skb, dev)
490 *
491 *    Sets up a DMA transfer to send the current frame.
492 *
493 */
494int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
495{
496	struct w83977af_ir *self;
497	__s32 speed;
498	int iobase;
499	__u8 set;
500	int mtt;
501
502	self = (struct w83977af_ir *) dev->priv;
503
504	iobase = self->io.fir_base;
505
506	IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies,
507		   (int) skb->len);
508
509	/* Lock transmit buffer */
510	netif_stop_queue(dev);
511
512	/* Check if we need to change the speed */
513	speed = irda_get_next_speed(skb);
514	if ((speed != self->io.speed) && (speed != -1)) {
515		/* Check for empty frame */
516		if (!skb->len) {
517			w83977af_change_speed(self, speed);
518			dev->trans_start = jiffies;
519			dev_kfree_skb(skb);
520			return 0;
521		} else
522			self->new_speed = speed;
523	}
524
525	/* Save current set */
526	set = inb(iobase+SSR);
527
528	/* Decide if we should use PIO or DMA transfer */
529	if (self->io.speed > PIO_MAX_SPEED) {
530		self->tx_buff.data = self->tx_buff.head;
531		skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
532		self->tx_buff.len = skb->len;
533
534		mtt = irda_get_mtt(skb);
535#ifdef CONFIG_USE_INTERNAL_TIMER
536	        if (mtt > 50) {
537			/* Adjust for timer resolution */
538			mtt /= 1000+1;
539
540			/* Setup timer */
541			switch_bank(iobase, SET4);
542			outb(mtt & 0xff, iobase+TMRL);
543			outb((mtt >> 8) & 0x0f, iobase+TMRH);
544
545			/* Start timer */
546			outb(IR_MSL_EN_TMR, iobase+IR_MSL);
547			self->io.direction = IO_XMIT;
548
549			/* Enable timer interrupt */
550			switch_bank(iobase, SET0);
551			outb(ICR_ETMRI, iobase+ICR);
552		} else {
553#endif
554			IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt);
555			if (mtt)
556				udelay(mtt);
557
558			/* Enable DMA interrupt */
559			switch_bank(iobase, SET0);
560	 		outb(ICR_EDMAI, iobase+ICR);
561	     		w83977af_dma_write(self, iobase);
562#ifdef CONFIG_USE_INTERNAL_TIMER
563		}
564#endif
565	} else {
566		self->tx_buff.data = self->tx_buff.head;
567		self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
568						   self->tx_buff.truesize);
569
570		/* Add interrupt on tx low level (will fire immediately) */
571		switch_bank(iobase, SET0);
572		outb(ICR_ETXTHI, iobase+ICR);
573	}
574	dev->trans_start = jiffies;
575	dev_kfree_skb(skb);
576
577	/* Restore set register */
578	outb(set, iobase+SSR);
579
580	return 0;
581}
582
583/*
584 * Function w83977af_dma_write (self, iobase)
585 *
586 *    Send frame using DMA
587 *
588 */
589static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
590{
591	__u8 set;
592#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
593	unsigned long flags;
594	__u8 hcr;
595#endif
596        IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len);
597
598	/* Save current set */
599	set = inb(iobase+SSR);
600
601	/* Disable DMA */
602	switch_bank(iobase, SET0);
603	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
604
605	/* Choose transmit DMA channel  */
606	switch_bank(iobase, SET2);
607	outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
608#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
609	spin_lock_irqsave(&self->lock, flags);
610
611	disable_dma(self->io.dma);
612	clear_dma_ff(self->io.dma);
613	set_dma_mode(self->io.dma, DMA_MODE_READ);
614	set_dma_addr(self->io.dma, self->tx_buff_dma);
615	set_dma_count(self->io.dma, self->tx_buff.len);
616#else
617	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
618		       DMA_MODE_WRITE);
619#endif
620	self->io.direction = IO_XMIT;
621
622	/* Enable DMA */
623 	switch_bank(iobase, SET0);
624#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
625	hcr = inb(iobase+HCR);
626	outb(hcr | HCR_EN_DMA, iobase+HCR);
627	enable_dma(self->io.dma);
628	spin_unlock_irqrestore(&self->lock, flags);
629#else
630	outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
631#endif
632
633	/* Restore set register */
634	outb(set, iobase+SSR);
635}
636
637/*
638 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
639 *
640 *
641 *
642 */
643static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
644{
645	int actual = 0;
646	__u8 set;
647
648	IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
649
650	/* Save current bank */
651	set = inb(iobase+SSR);
652
653	switch_bank(iobase, SET0);
654	if (!(inb_p(iobase+USR) & USR_TSRE)) {
655		IRDA_DEBUG(4,
656			   "%s(), warning, FIFO not empty yet!\n", __FUNCTION__  );
657
658		fifo_size -= 17;
659		IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
660			   __FUNCTION__ , fifo_size);
661	}
662
663	/* Fill FIFO with current frame */
664	while ((fifo_size-- > 0) && (actual < len)) {
665		/* Transmit next byte */
666		outb(buf[actual++], iobase+TBR);
667	}
668
669	IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
670		   __FUNCTION__ , fifo_size, actual, len);
671
672	/* Restore bank */
673	outb(set, iobase+SSR);
674
675	return actual;
676}
677
678/*
679 * Function w83977af_dma_xmit_complete (self)
680 *
681 *    The transfer of a frame in finished. So do the necessary things
682 *
683 *
684 */
685static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
686{
687	int iobase;
688	__u8 set;
689
690	IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies);
691
692	IRDA_ASSERT(self != NULL, return;);
693
694	iobase = self->io.fir_base;
695
696	/* Save current set */
697	set = inb(iobase+SSR);
698
699	/* Disable DMA */
700	switch_bank(iobase, SET0);
701	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
702
703	/* Check for underrrun! */
704	if (inb(iobase+AUDR) & AUDR_UNDR) {
705		IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ );
706
707		self->stats.tx_errors++;
708		self->stats.tx_fifo_errors++;
709
710		/* Clear bit, by writing 1 to it */
711		outb(AUDR_UNDR, iobase+AUDR);
712	} else
713		self->stats.tx_packets++;
714
715
716	if (self->new_speed) {
717		w83977af_change_speed(self, self->new_speed);
718		self->new_speed = 0;
719	}
720
721	/* Unlock tx_buff and request another frame */
722	/* Tell the network layer, that we want more frames */
723	netif_wake_queue(self->netdev);
724
725	/* Restore set */
726	outb(set, iobase+SSR);
727}
728
729/*
730 * Function w83977af_dma_receive (self)
731 *
732 *    Get ready for receiving a frame. The device will initiate a DMA
733 *    if it starts to receive a frame.
734 *
735 */
736int w83977af_dma_receive(struct w83977af_ir *self)
737{
738	int iobase;
739	__u8 set;
740#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
741	unsigned long flags;
742	__u8 hcr;
743#endif
744	IRDA_ASSERT(self != NULL, return -1;);
745
746	IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
747
748	iobase= self->io.fir_base;
749
750	/* Save current set */
751	set = inb(iobase+SSR);
752
753	/* Disable DMA */
754	switch_bank(iobase, SET0);
755	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
756
757	/* Choose DMA Rx, DMA Fairness, and Advanced mode */
758	switch_bank(iobase, SET2);
759	outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
760	     iobase+ADCR1);
761
762	self->io.direction = IO_RECV;
763	self->rx_buff.data = self->rx_buff.head;
764
765#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
766	spin_lock_irqsave(&self->lock, flags);
767
768	disable_dma(self->io.dma);
769	clear_dma_ff(self->io.dma);
770	set_dma_mode(self->io.dma, DMA_MODE_READ);
771	set_dma_addr(self->io.dma, self->rx_buff_dma);
772	set_dma_count(self->io.dma, self->rx_buff.truesize);
773#else
774	irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
775		       DMA_MODE_READ);
776#endif
777	/*
778	 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
779	 * important that we don't reset the Tx FIFO since it might not
780	 * be finished transmitting yet
781	 */
782	switch_bank(iobase, SET0);
783	outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
784	self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
785
786	/* Enable DMA */
787	switch_bank(iobase, SET0);
788#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
789	hcr = inb(iobase+HCR);
790	outb(hcr | HCR_EN_DMA, iobase+HCR);
791	enable_dma(self->io.dma);
792	spin_unlock_irqrestore(&self->lock, flags);
793#else
794	outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
795#endif
796	/* Restore set */
797	outb(set, iobase+SSR);
798
799	return 0;
800}
801
802/*
803 * Function w83977af_receive_complete (self)
804 *
805 *    Finished with receiving a frame
806 *
807 */
808int w83977af_dma_receive_complete(struct w83977af_ir *self)
809{
810	struct sk_buff *skb;
811	struct st_fifo *st_fifo;
812	int len;
813	int iobase;
814	__u8 set;
815	__u8 status;
816
817	IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
818
819	st_fifo = &self->st_fifo;
820
821	iobase = self->io.fir_base;
822
823	/* Save current set */
824	set = inb(iobase+SSR);
825
826	iobase = self->io.fir_base;
827
828	/* Read status FIFO */
829	switch_bank(iobase, SET5);
830	while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
831		st_fifo->entries[st_fifo->tail].status = status;
832
833		st_fifo->entries[st_fifo->tail].len  = inb(iobase+RFLFL);
834		st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
835
836		st_fifo->tail++;
837		st_fifo->len++;
838	}
839
840	while (st_fifo->len) {
841		/* Get first entry */
842		status = st_fifo->entries[st_fifo->head].status;
843		len    = st_fifo->entries[st_fifo->head].len;
844		st_fifo->head++;
845		st_fifo->len--;
846
847		/* Check for errors */
848		if (status & FS_FO_ERR_MSK) {
849			if (status & FS_FO_LST_FR) {
850				/* Add number of lost frames to stats */
851				self->stats.rx_errors += len;
852			} else {
853				/* Skip frame */
854				self->stats.rx_errors++;
855
856				self->rx_buff.data += len;
857
858				if (status & FS_FO_MX_LEX)
859					self->stats.rx_length_errors++;
860
861				if (status & FS_FO_PHY_ERR)
862					self->stats.rx_frame_errors++;
863
864				if (status & FS_FO_CRC_ERR)
865					self->stats.rx_crc_errors++;
866			}
867			/* The errors below can be reported in both cases */
868			if (status & FS_FO_RX_OV)
869				self->stats.rx_fifo_errors++;
870
871			if (status & FS_FO_FSF_OV)
872				self->stats.rx_fifo_errors++;
873
874		} else {
875			/* Check if we have transferred all data to memory */
876			switch_bank(iobase, SET0);
877			if (inb(iobase+USR) & USR_RDR) {
878#ifdef CONFIG_USE_INTERNAL_TIMER
879				/* Put this entry back in fifo */
880				st_fifo->head--;
881				st_fifo->len++;
882				st_fifo->entries[st_fifo->head].status = status;
883				st_fifo->entries[st_fifo->head].len = len;
884
885				/* Restore set register */
886				outb(set, iobase+SSR);
887
888				return FALSE; 	/* I'll be back! */
889#else
890				udelay(80); /* Should be enough!? */
891#endif
892			}
893
894			skb = dev_alloc_skb(len+1);
895			if (skb == NULL)  {
896				printk(KERN_INFO
897				       "%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
898				/* Restore set register */
899				outb(set, iobase+SSR);
900
901				return FALSE;
902			}
903
904			/*  Align to 20 bytes */
905			skb_reserve(skb, 1);
906
907			/* Copy frame without CRC */
908			if (self->io.speed < 4000000) {
909				skb_put(skb, len-2);
910				skb_copy_to_linear_data(skb,
911							self->rx_buff.data,
912							len - 2);
913			} else {
914				skb_put(skb, len-4);
915				skb_copy_to_linear_data(skb,
916							self->rx_buff.data,
917							len - 4);
918			}
919
920			/* Move to next frame */
921			self->rx_buff.data += len;
922			self->stats.rx_packets++;
923
924			skb->dev = self->netdev;
925			skb_reset_mac_header(skb);
926			skb->protocol = htons(ETH_P_IRDA);
927			netif_rx(skb);
928			self->netdev->last_rx = jiffies;
929		}
930	}
931	/* Restore set register */
932	outb(set, iobase+SSR);
933
934	return TRUE;
935}
936
937/*
938 * Function pc87108_pio_receive (self)
939 *
940 *    Receive all data in receiver FIFO
941 *
942 */
943static void w83977af_pio_receive(struct w83977af_ir *self)
944{
945	__u8 byte = 0x00;
946	int iobase;
947
948	IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
949
950	IRDA_ASSERT(self != NULL, return;);
951
952	iobase = self->io.fir_base;
953
954	/*  Receive all characters in Rx FIFO */
955	do {
956		byte = inb(iobase+RBR);
957		async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
958				  byte);
959	} while (inb(iobase+USR) & USR_RDR); /* Data available */
960}
961
962/*
963 * Function w83977af_sir_interrupt (self, eir)
964 *
965 *    Handle SIR interrupt
966 *
967 */
968static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
969{
970	int actual;
971	__u8 new_icr = 0;
972	__u8 set;
973	int iobase;
974
975	IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr);
976
977	iobase = self->io.fir_base;
978	/* Transmit FIFO low on data */
979	if (isr & ISR_TXTH_I) {
980		/* Write data left in transmit buffer */
981		actual = w83977af_pio_write(self->io.fir_base,
982					    self->tx_buff.data,
983					    self->tx_buff.len,
984					    self->io.fifo_size);
985
986		self->tx_buff.data += actual;
987		self->tx_buff.len  -= actual;
988
989		self->io.direction = IO_XMIT;
990
991		/* Check if finished */
992		if (self->tx_buff.len > 0) {
993			new_icr |= ICR_ETXTHI;
994		} else {
995			set = inb(iobase+SSR);
996			switch_bank(iobase, SET0);
997			outb(AUDR_SFEND, iobase+AUDR);
998			outb(set, iobase+SSR);
999
1000			self->stats.tx_packets++;
1001
1002			/* Feed me more packets */
1003			netif_wake_queue(self->netdev);
1004			new_icr |= ICR_ETBREI;
1005		}
1006	}
1007	/* Check if transmission has completed */
1008	if (isr & ISR_TXEMP_I) {
1009		/* Check if we need to change the speed? */
1010		if (self->new_speed) {
1011			IRDA_DEBUG(2,
1012				   "%s(), Changing speed!\n", __FUNCTION__ );
1013			w83977af_change_speed(self, self->new_speed);
1014			self->new_speed = 0;
1015		}
1016
1017		/* Turn around and get ready to receive some data */
1018		self->io.direction = IO_RECV;
1019		new_icr |= ICR_ERBRI;
1020	}
1021
1022	/* Rx FIFO threshold or timeout */
1023	if (isr & ISR_RXTH_I) {
1024		w83977af_pio_receive(self);
1025
1026		/* Keep receiving */
1027		new_icr |= ICR_ERBRI;
1028	}
1029	return new_icr;
1030}
1031
1032/*
1033 * Function pc87108_fir_interrupt (self, eir)
1034 *
1035 *    Handle MIR/FIR interrupt
1036 *
1037 */
1038static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1039{
1040	__u8 new_icr = 0;
1041	__u8 set;
1042	int iobase;
1043
1044	iobase = self->io.fir_base;
1045	set = inb(iobase+SSR);
1046
1047	/* End of frame detected in FIFO */
1048	if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1049		if (w83977af_dma_receive_complete(self)) {
1050
1051			/* Wait for next status FIFO interrupt */
1052			new_icr |= ICR_EFSFI;
1053		} else {
1054			/* DMA not finished yet */
1055
1056			/* Set timer value, resolution 1 ms */
1057			switch_bank(iobase, SET4);
1058			outb(0x01, iobase+TMRL); /* 1 ms */
1059			outb(0x00, iobase+TMRH);
1060
1061			/* Start timer */
1062			outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1063
1064			new_icr |= ICR_ETMRI;
1065		}
1066	}
1067	/* Timer finished */
1068	if (isr & ISR_TMR_I) {
1069		/* Disable timer */
1070		switch_bank(iobase, SET4);
1071		outb(0, iobase+IR_MSL);
1072
1073		/* Clear timer event */
1074		/* switch_bank(iobase, SET0); */
1075/* 		outb(ASCR_CTE, iobase+ASCR); */
1076
1077		/* Check if this is a TX timer interrupt */
1078		if (self->io.direction == IO_XMIT) {
1079			w83977af_dma_write(self, iobase);
1080
1081			new_icr |= ICR_EDMAI;
1082		} else {
1083			/* Check if DMA has now finished */
1084			w83977af_dma_receive_complete(self);
1085
1086			new_icr |= ICR_EFSFI;
1087		}
1088	}
1089	/* Finished with DMA */
1090	if (isr & ISR_DMA_I) {
1091		w83977af_dma_xmit_complete(self);
1092
1093		/* Check if there are more frames to be transmitted */
1094		/* if (irda_device_txqueue_empty(self)) { */
1095
1096		/* Prepare for receive
1097		 *
1098		 * ** Netwinder Tx DMA likes that we do this anyway **
1099		 */
1100		w83977af_dma_receive(self);
1101		new_icr = ICR_EFSFI;
1102	       /* } */
1103	}
1104
1105	/* Restore set */
1106	outb(set, iobase+SSR);
1107
1108	return new_icr;
1109}
1110
1111/*
1112 * Function w83977af_interrupt (irq, dev_id, regs)
1113 *
1114 *    An interrupt from the chip has arrived. Time to do some work
1115 *
1116 */
1117static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1118{
1119	struct net_device *dev = dev_id;
1120	struct w83977af_ir *self;
1121	__u8 set, icr, isr;
1122	int iobase;
1123
1124	self = dev->priv;
1125
1126	iobase = self->io.fir_base;
1127
1128	/* Save current bank */
1129	set = inb(iobase+SSR);
1130	switch_bank(iobase, SET0);
1131
1132	icr = inb(iobase+ICR);
1133	isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1134
1135	outb(0, iobase+ICR); /* Disable interrupts */
1136
1137	if (isr) {
1138		/* Dispatch interrupt handler for the current speed */
1139		if (self->io.speed > PIO_MAX_SPEED )
1140			icr = w83977af_fir_interrupt(self, isr);
1141		else
1142			icr = w83977af_sir_interrupt(self, isr);
1143	}
1144
1145	outb(icr, iobase+ICR);    /* Restore (new) interrupts */
1146	outb(set, iobase+SSR);    /* Restore bank register */
1147	return IRQ_RETVAL(isr);
1148}
1149
1150/*
1151 * Function w83977af_is_receiving (self)
1152 *
1153 *    Return TRUE is we are currently receiving a frame
1154 *
1155 */
1156static int w83977af_is_receiving(struct w83977af_ir *self)
1157{
1158	int status = FALSE;
1159	int iobase;
1160	__u8 set;
1161
1162	IRDA_ASSERT(self != NULL, return FALSE;);
1163
1164	if (self->io.speed > 115200) {
1165		iobase = self->io.fir_base;
1166
1167		/* Check if rx FIFO is not empty */
1168		set = inb(iobase+SSR);
1169		switch_bank(iobase, SET2);
1170		if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1171			/* We are receiving something */
1172			status =  TRUE;
1173		}
1174		outb(set, iobase+SSR);
1175	} else
1176		status = (self->rx_buff.state != OUTSIDE_FRAME);
1177
1178	return status;
1179}
1180
1181/*
1182 * Function w83977af_net_open (dev)
1183 *
1184 *    Start the device
1185 *
1186 */
1187static int w83977af_net_open(struct net_device *dev)
1188{
1189	struct w83977af_ir *self;
1190	int iobase;
1191	char hwname[32];
1192	__u8 set;
1193
1194	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1195
1196	IRDA_ASSERT(dev != NULL, return -1;);
1197	self = (struct w83977af_ir *) dev->priv;
1198
1199	IRDA_ASSERT(self != NULL, return 0;);
1200
1201	iobase = self->io.fir_base;
1202
1203	if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1204			(void *) dev)) {
1205		return -EAGAIN;
1206	}
1207	/*
1208	 * Always allocate the DMA channel after the IRQ,
1209	 * and clean up on failure.
1210	 */
1211	if (request_dma(self->io.dma, dev->name)) {
1212		free_irq(self->io.irq, self);
1213		return -EAGAIN;
1214	}
1215
1216	/* Save current set */
1217	set = inb(iobase+SSR);
1218
1219 	/* Enable some interrupts so we can receive frames again */
1220 	switch_bank(iobase, SET0);
1221 	if (self->io.speed > 115200) {
1222 		outb(ICR_EFSFI, iobase+ICR);
1223 		w83977af_dma_receive(self);
1224 	} else
1225 		outb(ICR_ERBRI, iobase+ICR);
1226
1227	/* Restore bank register */
1228	outb(set, iobase+SSR);
1229
1230	/* Ready to play! */
1231	netif_start_queue(dev);
1232
1233	/* Give self a hardware name */
1234	sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1235
1236	/*
1237	 * Open new IrLAP layer instance, now that everything should be
1238	 * initialized properly
1239	 */
1240	self->irlap = irlap_open(dev, &self->qos, hwname);
1241
1242	return 0;
1243}
1244
1245/*
1246 * Function w83977af_net_close (dev)
1247 *
1248 *    Stop the device
1249 *
1250 */
1251static int w83977af_net_close(struct net_device *dev)
1252{
1253	struct w83977af_ir *self;
1254	int iobase;
1255	__u8 set;
1256
1257	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1258
1259	IRDA_ASSERT(dev != NULL, return -1;);
1260
1261	self = (struct w83977af_ir *) dev->priv;
1262
1263	IRDA_ASSERT(self != NULL, return 0;);
1264
1265	iobase = self->io.fir_base;
1266
1267	/* Stop device */
1268	netif_stop_queue(dev);
1269
1270	/* Stop and remove instance of IrLAP */
1271	if (self->irlap)
1272		irlap_close(self->irlap);
1273	self->irlap = NULL;
1274
1275	disable_dma(self->io.dma);
1276
1277	/* Save current set */
1278	set = inb(iobase+SSR);
1279
1280	/* Disable interrupts */
1281	switch_bank(iobase, SET0);
1282	outb(0, iobase+ICR);
1283
1284	free_irq(self->io.irq, dev);
1285	free_dma(self->io.dma);
1286
1287	/* Restore bank register */
1288	outb(set, iobase+SSR);
1289
1290	return 0;
1291}
1292
1293/*
1294 * Function w83977af_net_ioctl (dev, rq, cmd)
1295 *
1296 *    Process IOCTL commands for this device
1297 *
1298 */
1299static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1300{
1301	struct if_irda_req *irq = (struct if_irda_req *) rq;
1302	struct w83977af_ir *self;
1303	unsigned long flags;
1304	int ret = 0;
1305
1306	IRDA_ASSERT(dev != NULL, return -1;);
1307
1308	self = dev->priv;
1309
1310	IRDA_ASSERT(self != NULL, return -1;);
1311
1312	IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
1313
1314	spin_lock_irqsave(&self->lock, flags);
1315
1316	switch (cmd) {
1317	case SIOCSBANDWIDTH: /* Set bandwidth */
1318		if (!capable(CAP_NET_ADMIN)) {
1319			ret = -EPERM;
1320			goto out;
1321		}
1322		w83977af_change_speed(self, irq->ifr_baudrate);
1323		break;
1324	case SIOCSMEDIABUSY: /* Set media busy */
1325		if (!capable(CAP_NET_ADMIN)) {
1326			ret = -EPERM;
1327			goto out;
1328		}
1329		irda_device_set_media_busy(self->netdev, TRUE);
1330		break;
1331	case SIOCGRECEIVING: /* Check if we are receiving right now */
1332		irq->ifr_receiving = w83977af_is_receiving(self);
1333		break;
1334	default:
1335		ret = -EOPNOTSUPP;
1336	}
1337out:
1338	spin_unlock_irqrestore(&self->lock, flags);
1339	return ret;
1340}
1341
1342static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
1343{
1344	struct w83977af_ir *self = (struct w83977af_ir *) dev->priv;
1345
1346	return &self->stats;
1347}
1348
1349MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1350MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1351MODULE_LICENSE("GPL");
1352
1353
1354module_param(qos_mtt_bits, int, 0);
1355MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1356module_param_array(io, int, NULL, 0);
1357MODULE_PARM_DESC(io, "Base I/O addresses");
1358module_param_array(irq, int, NULL, 0);
1359MODULE_PARM_DESC(irq, "IRQ lines");
1360
1361/*
1362 * Function init_module (void)
1363 *
1364 *
1365 *
1366 */
1367module_init(w83977af_init);
1368
1369/*
1370 * Function cleanup_module (void)
1371 *
1372 *
1373 *
1374 */
1375module_exit(w83977af_cleanup);
1376