• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/irda/
1/********************************************************************
2 Filename:      via-ircc.c
3 Version:       1.0
4 Description:   Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author:        VIA Technologies,inc
6 Date  :	08/06/2003
7
8Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10This program is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free Software
12Foundation; either version 2, or (at your option) any later version.
13
14This program is distributed in the hope that it will be useful, but WITHOUT
15ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17See the GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License along with
20this program; if not, write to the Free Software Foundation, Inc.,
2159 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25 Comment :
26       jul/09/2002 : only implement two kind of dongle currently.
27       Oct/02/2002 : work on VT8231 and VT8233 .
28       Aug/06/2003 : change driver format to pci driver .
29
302004-02-16: <sda@bdit.de>
31- Removed unneeded 'legacy' pci stuff.
32- Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff.
33- On speed change from core, don't send SIR frame with new speed.
34  Use current speed and change speeds later.
35- Make module-param dongle_id actually work.
36- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
37  Tested with home-grown PCB on EPIA boards.
38- Code cleanup.
39
40 ********************************************************************/
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/ioport.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/rtnetlink.h>
50#include <linux/pci.h>
51#include <linux/dma-mapping.h>
52#include <linux/gfp.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <linux/pm.h>
59
60#include <net/irda/wrapper.h>
61#include <net/irda/irda.h>
62#include <net/irda/irda_device.h>
63
64#include "via-ircc.h"
65
66#define VIA_MODULE_NAME "via-ircc"
67#define CHIP_IO_EXTENT 0x40
68
69static char *driver_name = VIA_MODULE_NAME;
70
71/* Module parameters */
72static int qos_mtt_bits = 0x07;	/* 1 ms or more */
73static int dongle_id = 0;	/* default: probe */
74
75/* We can't guess the type of connected dongle, user *must* supply it. */
76module_param(dongle_id, int, 0);
77
78/* Max 4 instances for now */
79static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
80
81/* Some prototypes */
82static int via_ircc_open(int i, chipio_t * info, unsigned int id);
83static int via_ircc_close(struct via_ircc_cb *self);
84static int via_ircc_dma_receive(struct via_ircc_cb *self);
85static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
86					 int iobase);
87static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
88						struct net_device *dev);
89static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
90						struct net_device *dev);
91static void via_hw_init(struct via_ircc_cb *self);
92static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
93static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
94static int via_ircc_is_receiving(struct via_ircc_cb *self);
95static int via_ircc_read_dongle_id(int iobase);
96
97static int via_ircc_net_open(struct net_device *dev);
98static int via_ircc_net_close(struct net_device *dev);
99static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
100			      int cmd);
101static void via_ircc_change_dongle_speed(int iobase, int speed,
102					 int dongle_id);
103static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
104static void hwreset(struct via_ircc_cb *self);
105static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
106static int upload_rxdata(struct via_ircc_cb *self, int iobase);
107static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
108static void __devexit via_remove_one (struct pci_dev *pdev);
109
110static void iodelay(int udelay)
111{
112	u8 data;
113	int i;
114
115	for (i = 0; i < udelay; i++) {
116		data = inb(0x80);
117	}
118}
119
120static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
121	{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
122	{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
123	{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
124	{ PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
125	{ PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
126	{ 0, }
127};
128
129MODULE_DEVICE_TABLE(pci,via_pci_tbl);
130
131
132static struct pci_driver via_driver = {
133	.name		= VIA_MODULE_NAME,
134	.id_table	= via_pci_tbl,
135	.probe		= via_init_one,
136	.remove		= __devexit_p(via_remove_one),
137};
138
139
140/*
141 * Function via_ircc_init ()
142 *
143 *    Initialize chip. Just find out chip type and resource.
144 */
145static int __init via_ircc_init(void)
146{
147	int rc;
148
149	IRDA_DEBUG(3, "%s()\n", __func__);
150
151	rc = pci_register_driver(&via_driver);
152	if (rc < 0) {
153		IRDA_DEBUG(0, "%s(): error rc = %d, returning  -ENODEV...\n",
154			   __func__, rc);
155		return -ENODEV;
156	}
157	return 0;
158}
159
160static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
161{
162	int rc;
163        u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
164	u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
165	chipio_t info;
166
167	IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
168
169	rc = pci_enable_device (pcidev);
170	if (rc) {
171		IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
172		return -ENODEV;
173	}
174
175	// South Bridge exist
176        if ( ReadLPCReg(0x20) != 0x3C )
177		Chipset=0x3096;
178	else
179		Chipset=0x3076;
180
181	if (Chipset==0x3076) {
182		IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
183
184		WriteLPCReg(7,0x0c );
185		temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
186		if((temp&0x01)==1) {   // BIOS close or no FIR
187			WriteLPCReg(0x1d, 0x82 );
188			WriteLPCReg(0x23,0x18);
189			temp=ReadLPCReg(0xF0);
190			if((temp&0x01)==0) {
191				temp=(ReadLPCReg(0x74)&0x03);    //DMA
192				FirDRQ0=temp + 4;
193				temp=(ReadLPCReg(0x74)&0x0C) >> 2;
194				FirDRQ1=temp + 4;
195			} else {
196				temp=(ReadLPCReg(0x74)&0x0C) >> 2;    //DMA
197				FirDRQ0=temp + 4;
198				FirDRQ1=FirDRQ0;
199			}
200			FirIRQ=(ReadLPCReg(0x70)&0x0f);		//IRQ
201			FirIOBase=ReadLPCReg(0x60 ) << 8;	//IO Space :high byte
202			FirIOBase=FirIOBase| ReadLPCReg(0x61) ;	//low byte
203			FirIOBase=FirIOBase  ;
204			info.fir_base=FirIOBase;
205			info.irq=FirIRQ;
206			info.dma=FirDRQ1;
207			info.dma2=FirDRQ0;
208			pci_read_config_byte(pcidev,0x40,&bTmp);
209			pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
210			pci_read_config_byte(pcidev,0x42,&bTmp);
211			pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
212			pci_write_config_byte(pcidev,0x5a,0xc0);
213			WriteLPCReg(0x28, 0x70 );
214			if (via_ircc_open(0, &info,0x3076) == 0)
215				rc=0;
216		} else
217			rc = -ENODEV; //IR not turn on
218	} else { //Not VT1211
219		IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
220
221		pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
222		if((bTmp&0x01)==1) {  // BIOS enable FIR
223			//Enable Double DMA clock
224			pci_read_config_byte(pcidev,0x42,&oldPCI_40);
225			pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
226			pci_read_config_byte(pcidev,0x40,&oldPCI_40);
227			pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
228			pci_read_config_byte(pcidev,0x44,&oldPCI_44);
229			pci_write_config_byte(pcidev,0x44,0x4e);
230  //---------- read configuration from Function0 of south bridge
231			if((bTmp&0x02)==0) {
232				pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
233				FirDRQ0 = (bTmp1 & 0x30) >> 4;
234				pci_read_config_byte(pcidev,0x44,&bTmp1);
235				FirDRQ1 = (bTmp1 & 0xc0) >> 6;
236			} else  {
237				pci_read_config_byte(pcidev,0x44,&bTmp1);    //DMA
238				FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
239				FirDRQ1=0;
240			}
241			pci_read_config_byte(pcidev,0x47,&bTmp1);  //IRQ
242			FirIRQ = bTmp1 & 0x0f;
243
244			pci_read_config_byte(pcidev,0x69,&bTmp);
245			FirIOBase = bTmp << 8;//hight byte
246			pci_read_config_byte(pcidev,0x68,&bTmp);
247			FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
248  //-------------------------
249			info.fir_base=FirIOBase;
250			info.irq=FirIRQ;
251			info.dma=FirDRQ1;
252			info.dma2=FirDRQ0;
253			if (via_ircc_open(0, &info,0x3096) == 0)
254				rc=0;
255		} else
256			rc = -ENODEV; //IR not turn on !!!!!
257	}//Not VT1211
258
259	IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
260	return rc;
261}
262
263/*
264 * Function via_ircc_clean ()
265 *
266 *    Close all configured chips
267 *
268 */
269static void via_ircc_clean(void)
270{
271	int i;
272
273	IRDA_DEBUG(3, "%s()\n", __func__);
274
275	for (i=0; i < ARRAY_SIZE(dev_self); i++) {
276		if (dev_self[i])
277			via_ircc_close(dev_self[i]);
278	}
279}
280
281static void __devexit via_remove_one (struct pci_dev *pdev)
282{
283	IRDA_DEBUG(3, "%s()\n", __func__);
284
285	via_ircc_clean();
286
287	pci_disable_device(pdev);
288}
289
290static void __exit via_ircc_cleanup(void)
291{
292	IRDA_DEBUG(3, "%s()\n", __func__);
293
294	via_ircc_clean();
295
296	/* Cleanup all instances of the driver */
297	pci_unregister_driver (&via_driver);
298}
299
300static const struct net_device_ops via_ircc_sir_ops = {
301	.ndo_start_xmit = via_ircc_hard_xmit_sir,
302	.ndo_open = via_ircc_net_open,
303	.ndo_stop = via_ircc_net_close,
304	.ndo_do_ioctl = via_ircc_net_ioctl,
305};
306static const struct net_device_ops via_ircc_fir_ops = {
307	.ndo_start_xmit = via_ircc_hard_xmit_fir,
308	.ndo_open = via_ircc_net_open,
309	.ndo_stop = via_ircc_net_close,
310	.ndo_do_ioctl = via_ircc_net_ioctl,
311};
312
313/*
314 * Function via_ircc_open (iobase, irq)
315 *
316 *    Open driver instance
317 *
318 */
319static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
320{
321	struct net_device *dev;
322	struct via_ircc_cb *self;
323	int err;
324
325	IRDA_DEBUG(3, "%s()\n", __func__);
326
327	if (i >= ARRAY_SIZE(dev_self))
328		return -ENOMEM;
329
330	/* Allocate new instance of the driver */
331	dev = alloc_irdadev(sizeof(struct via_ircc_cb));
332	if (dev == NULL)
333		return -ENOMEM;
334
335	self = netdev_priv(dev);
336	self->netdev = dev;
337	spin_lock_init(&self->lock);
338
339	/* Need to store self somewhere */
340	dev_self[i] = self;
341	self->index = i;
342	/* Initialize Resource */
343	self->io.cfg_base = info->cfg_base;
344	self->io.fir_base = info->fir_base;
345	self->io.irq = info->irq;
346	self->io.fir_ext = CHIP_IO_EXTENT;
347	self->io.dma = info->dma;
348	self->io.dma2 = info->dma2;
349	self->io.fifo_size = 32;
350	self->chip_id = id;
351	self->st_fifo.len = 0;
352	self->RxDataReady = 0;
353
354	/* Reserve the ioports that we need */
355	if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
356		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
357			   __func__, self->io.fir_base);
358		err = -ENODEV;
359		goto err_out1;
360	}
361
362	/* Initialize QoS for this device */
363	irda_init_max_qos_capabilies(&self->qos);
364
365	/* Check if user has supplied the dongle id or not */
366	if (!dongle_id)
367		dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
368	self->io.dongle_id = dongle_id;
369
370	/* The only value we must override it the baudrate */
371	/* Maximum speeds and capabilities are dongle-dependant. */
372	switch( self->io.dongle_id ){
373	case 0x0d:
374		self->qos.baud_rate.bits =
375		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
376		    IR_576000 | IR_1152000 | (IR_4000000 << 8);
377		break;
378	default:
379		self->qos.baud_rate.bits =
380		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
381		break;
382	}
383
384	/* Following was used for testing:
385	 *
386	 *   self->qos.baud_rate.bits = IR_9600;
387	 *
388	 * Is is no good, as it prohibits (error-prone) speed-changes.
389	 */
390
391	self->qos.min_turn_time.bits = qos_mtt_bits;
392	irda_qos_bits_to_value(&self->qos);
393
394	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
395	self->rx_buff.truesize = 14384 + 2048;
396	self->tx_buff.truesize = 14384 + 2048;
397
398	/* Allocate memory if needed */
399	self->rx_buff.head =
400		dma_alloc_coherent(NULL, self->rx_buff.truesize,
401				   &self->rx_buff_dma, GFP_KERNEL);
402	if (self->rx_buff.head == NULL) {
403		err = -ENOMEM;
404		goto err_out2;
405	}
406	memset(self->rx_buff.head, 0, self->rx_buff.truesize);
407
408	self->tx_buff.head =
409		dma_alloc_coherent(NULL, self->tx_buff.truesize,
410				   &self->tx_buff_dma, GFP_KERNEL);
411	if (self->tx_buff.head == NULL) {
412		err = -ENOMEM;
413		goto err_out3;
414	}
415	memset(self->tx_buff.head, 0, self->tx_buff.truesize);
416
417	self->rx_buff.in_frame = FALSE;
418	self->rx_buff.state = OUTSIDE_FRAME;
419	self->tx_buff.data = self->tx_buff.head;
420	self->rx_buff.data = self->rx_buff.head;
421
422	/* Reset Tx queue info */
423	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
424	self->tx_fifo.tail = self->tx_buff.head;
425
426	/* Override the network functions we need to use */
427	dev->netdev_ops = &via_ircc_sir_ops;
428
429	err = register_netdev(dev);
430	if (err)
431		goto err_out4;
432
433	IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
434
435	/* Initialise the hardware..
436	*/
437	self->io.speed = 9600;
438	via_hw_init(self);
439	return 0;
440 err_out4:
441	dma_free_coherent(NULL, self->tx_buff.truesize,
442			  self->tx_buff.head, self->tx_buff_dma);
443 err_out3:
444	dma_free_coherent(NULL, self->rx_buff.truesize,
445			  self->rx_buff.head, self->rx_buff_dma);
446 err_out2:
447	release_region(self->io.fir_base, self->io.fir_ext);
448 err_out1:
449	free_netdev(dev);
450	dev_self[i] = NULL;
451	return err;
452}
453
454/*
455 * Function via_ircc_close (self)
456 *
457 *    Close driver instance
458 *
459 */
460static int via_ircc_close(struct via_ircc_cb *self)
461{
462	int iobase;
463
464	IRDA_DEBUG(3, "%s()\n", __func__);
465
466	IRDA_ASSERT(self != NULL, return -1;);
467
468	iobase = self->io.fir_base;
469
470	ResetChip(iobase, 5);	//hardware reset.
471	/* Remove netdevice */
472	unregister_netdev(self->netdev);
473
474	/* Release the PORT that this driver is using */
475	IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
476		   __func__, self->io.fir_base);
477	release_region(self->io.fir_base, self->io.fir_ext);
478	if (self->tx_buff.head)
479		dma_free_coherent(NULL, self->tx_buff.truesize,
480				  self->tx_buff.head, self->tx_buff_dma);
481	if (self->rx_buff.head)
482		dma_free_coherent(NULL, self->rx_buff.truesize,
483				  self->rx_buff.head, self->rx_buff_dma);
484	dev_self[self->index] = NULL;
485
486	free_netdev(self->netdev);
487
488	return 0;
489}
490
491/*
492 * Function via_hw_init(self)
493 *
494 *    Returns non-negative on success.
495 *
496 * Formerly via_ircc_setup
497 */
498static void via_hw_init(struct via_ircc_cb *self)
499{
500	int iobase = self->io.fir_base;
501
502	IRDA_DEBUG(3, "%s()\n", __func__);
503
504	SetMaxRxPacketSize(iobase, 0x0fff);	//set to max:4095
505	// FIFO Init
506	EnRXFIFOReadyInt(iobase, OFF);
507	EnRXFIFOHalfLevelInt(iobase, OFF);
508	EnTXFIFOHalfLevelInt(iobase, OFF);
509	EnTXFIFOUnderrunEOMInt(iobase, ON);
510	EnTXFIFOReadyInt(iobase, OFF);
511	InvertTX(iobase, OFF);
512	InvertRX(iobase, OFF);
513
514	if (ReadLPCReg(0x20) == 0x3c)
515		WriteLPCReg(0xF0, 0);	// for VT1211
516	/* Int Init */
517	EnRXSpecInt(iobase, ON);
518
519	/* The following is basically hwreset */
520	/* If this is the case, why not just call hwreset() ? Jean II */
521	ResetChip(iobase, 5);
522	EnableDMA(iobase, OFF);
523	EnableTX(iobase, OFF);
524	EnableRX(iobase, OFF);
525	EnRXDMA(iobase, OFF);
526	EnTXDMA(iobase, OFF);
527	RXStart(iobase, OFF);
528	TXStart(iobase, OFF);
529	InitCard(iobase);
530	CommonInit(iobase);
531	SIRFilter(iobase, ON);
532	SetSIR(iobase, ON);
533	CRC16(iobase, ON);
534	EnTXCRC(iobase, 0);
535	WriteReg(iobase, I_ST_CT_0, 0x00);
536	SetBaudRate(iobase, 9600);
537	SetPulseWidth(iobase, 12);
538	SetSendPreambleCount(iobase, 0);
539
540	self->io.speed = 9600;
541	self->st_fifo.len = 0;
542
543	via_ircc_change_dongle_speed(iobase, self->io.speed,
544				     self->io.dongle_id);
545
546	WriteReg(iobase, I_ST_CT_0, 0x80);
547}
548
549/*
550 * Function via_ircc_read_dongle_id (void)
551 *
552 */
553static int via_ircc_read_dongle_id(int iobase)
554{
555	int dongle_id = 9;	/* Default to IBM */
556
557	IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
558	return dongle_id;
559}
560
561/*
562 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
563 *    Change speed of the attach dongle
564 *    only implement two type of dongle currently.
565 */
566static void via_ircc_change_dongle_speed(int iobase, int speed,
567					 int dongle_id)
568{
569	u8 mode = 0;
570
571	/* speed is unused, as we use IsSIROn()/IsMIROn() */
572	speed = speed;
573
574	IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
575		   __func__, speed, iobase, dongle_id);
576
577	switch (dongle_id) {
578
579		/* Note: The dongle_id's listed here are derived from
580		 * nsc-ircc.c */
581
582	case 0x08:		/* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
583		UseOneRX(iobase, ON);	// use one RX pin   RX1,RX2
584		InvertTX(iobase, OFF);
585		InvertRX(iobase, OFF);
586
587		EnRX2(iobase, ON);	//sir to rx2
588		EnGPIOtoRX2(iobase, OFF);
589
590		if (IsSIROn(iobase)) {	//sir
591			// Mode select Off
592			SlowIRRXLowActive(iobase, ON);
593			udelay(1000);
594			SlowIRRXLowActive(iobase, OFF);
595		} else {
596			if (IsMIROn(iobase)) {	//mir
597				// Mode select On
598				SlowIRRXLowActive(iobase, OFF);
599				udelay(20);
600			} else {	// fir
601				if (IsFIROn(iobase)) {	//fir
602					// Mode select On
603					SlowIRRXLowActive(iobase, OFF);
604					udelay(20);
605				}
606			}
607		}
608		break;
609
610	case 0x09:		/* IBM31T1100 or Temic TFDS6000/TFDS6500 */
611		UseOneRX(iobase, ON);	//use ONE RX....RX1
612		InvertTX(iobase, OFF);
613		InvertRX(iobase, OFF);	// invert RX pin
614
615		EnRX2(iobase, ON);
616		EnGPIOtoRX2(iobase, OFF);
617		if (IsSIROn(iobase)) {	//sir
618			// Mode select On
619			SlowIRRXLowActive(iobase, ON);
620			udelay(20);
621			// Mode select Off
622			SlowIRRXLowActive(iobase, OFF);
623		}
624		if (IsMIROn(iobase)) {	//mir
625			// Mode select On
626			SlowIRRXLowActive(iobase, OFF);
627			udelay(20);
628			// Mode select Off
629			SlowIRRXLowActive(iobase, ON);
630		} else {	// fir
631			if (IsFIROn(iobase)) {	//fir
632				// Mode select On
633				SlowIRRXLowActive(iobase, OFF);
634				// TX On
635				WriteTX(iobase, ON);
636				udelay(20);
637				// Mode select OFF
638				SlowIRRXLowActive(iobase, ON);
639				udelay(20);
640				// TX Off
641				WriteTX(iobase, OFF);
642			}
643		}
644		break;
645
646	case 0x0d:
647		UseOneRX(iobase, OFF);	// use two RX pin   RX1,RX2
648		InvertTX(iobase, OFF);
649		InvertRX(iobase, OFF);
650		SlowIRRXLowActive(iobase, OFF);
651		if (IsSIROn(iobase)) {	//sir
652			EnGPIOtoRX2(iobase, OFF);
653			WriteGIO(iobase, OFF);
654			EnRX2(iobase, OFF);	//sir to rx2
655		} else {	// fir mir
656			EnGPIOtoRX2(iobase, OFF);
657			WriteGIO(iobase, OFF);
658			EnRX2(iobase, OFF);	//fir to rx
659		}
660		break;
661
662	case 0x11:		/* Temic TFDS4500 */
663
664		IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
665
666		UseOneRX(iobase, ON);	//use ONE RX....RX1
667		InvertTX(iobase, OFF);
668		InvertRX(iobase, ON);	// invert RX pin
669
670		EnRX2(iobase, ON);	//sir to rx2
671		EnGPIOtoRX2(iobase, OFF);
672
673		if( IsSIROn(iobase) ){	//sir
674
675			// Mode select On
676			SlowIRRXLowActive(iobase, ON);
677			udelay(20);
678			// Mode select Off
679			SlowIRRXLowActive(iobase, OFF);
680
681		} else{
682			IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
683		}
684		break;
685
686	case 0x0ff:		/* Vishay */
687		if (IsSIROn(iobase))
688			mode = 0;
689		else if (IsMIROn(iobase))
690			mode = 1;
691		else if (IsFIROn(iobase))
692			mode = 2;
693		else if (IsVFIROn(iobase))
694			mode = 5;	//VFIR-16
695		SI_SetMode(iobase, mode);
696		break;
697
698	default:
699		IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
700			   __func__, dongle_id);
701	}
702}
703
704/*
705 * Function via_ircc_change_speed (self, baud)
706 *
707 *    Change the speed of the device
708 *
709 */
710static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
711{
712	struct net_device *dev = self->netdev;
713	u16 iobase;
714	u8 value = 0, bTmp;
715
716	iobase = self->io.fir_base;
717	/* Update accounting for new speed */
718	self->io.speed = speed;
719	IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
720
721	WriteReg(iobase, I_ST_CT_0, 0x0);
722
723	/* Controller mode sellection */
724	switch (speed) {
725	case 2400:
726	case 9600:
727	case 19200:
728	case 38400:
729	case 57600:
730	case 115200:
731		value = (115200/speed)-1;
732		SetSIR(iobase, ON);
733		CRC16(iobase, ON);
734		break;
735	case 576000:
736		value = 0;
737		SetSIR(iobase, ON);
738		CRC16(iobase, ON);
739		break;
740	case 1152000:
741		value = 0;
742		SetMIR(iobase, ON);
743		break;
744	case 4000000:
745		value = 0;
746		SetFIR(iobase, ON);
747		SetPulseWidth(iobase, 0);
748		SetSendPreambleCount(iobase, 14);
749		CRC16(iobase, OFF);
750		EnTXCRC(iobase, ON);
751		break;
752	case 16000000:
753		value = 0;
754		SetVFIR(iobase, ON);
755		break;
756	default:
757		value = 0;
758		break;
759	}
760
761	/* Set baudrate to 0x19[2..7] */
762	bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
763	bTmp |= value << 2;
764	WriteReg(iobase, I_CF_H_1, bTmp);
765
766	/* Some dongles may need to be informed about speed changes. */
767	via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
768
769	/* Set FIFO size to 64 */
770	SetFIFO(iobase, 64);
771
772	/* Enable IR */
773	WriteReg(iobase, I_ST_CT_0, 0x80);
774
775	// EnTXFIFOHalfLevelInt(iobase,ON);
776
777	/* Enable some interrupts so we can receive frames */
778	//EnAllInt(iobase,ON);
779
780	if (IsSIROn(iobase)) {
781		SIRFilter(iobase, ON);
782		SIRRecvAny(iobase, ON);
783	} else {
784		SIRFilter(iobase, OFF);
785		SIRRecvAny(iobase, OFF);
786	}
787
788	if (speed > 115200) {
789		/* Install FIR xmit handler */
790		dev->netdev_ops = &via_ircc_fir_ops;
791		via_ircc_dma_receive(self);
792	} else {
793		/* Install SIR xmit handler */
794		dev->netdev_ops = &via_ircc_sir_ops;
795	}
796	netif_wake_queue(dev);
797}
798
799/*
800 * Function via_ircc_hard_xmit (skb, dev)
801 *
802 *    Transmit the frame!
803 *
804 */
805static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
806						struct net_device *dev)
807{
808	struct via_ircc_cb *self;
809	unsigned long flags;
810	u16 iobase;
811	__u32 speed;
812
813	self = netdev_priv(dev);
814	IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
815	iobase = self->io.fir_base;
816
817	netif_stop_queue(dev);
818	/* Check if we need to change the speed */
819	speed = irda_get_next_speed(skb);
820	if ((speed != self->io.speed) && (speed != -1)) {
821		/* Check for empty frame */
822		if (!skb->len) {
823			via_ircc_change_speed(self, speed);
824			dev->trans_start = jiffies;
825			dev_kfree_skb(skb);
826			return NETDEV_TX_OK;
827		} else
828			self->new_speed = speed;
829	}
830	InitCard(iobase);
831	CommonInit(iobase);
832	SIRFilter(iobase, ON);
833	SetSIR(iobase, ON);
834	CRC16(iobase, ON);
835	EnTXCRC(iobase, 0);
836	WriteReg(iobase, I_ST_CT_0, 0x00);
837
838	spin_lock_irqsave(&self->lock, flags);
839	self->tx_buff.data = self->tx_buff.head;
840	self->tx_buff.len =
841	    async_wrap_skb(skb, self->tx_buff.data,
842			   self->tx_buff.truesize);
843
844	dev->stats.tx_bytes += self->tx_buff.len;
845	/* Send this frame with old speed */
846	SetBaudRate(iobase, self->io.speed);
847	SetPulseWidth(iobase, 12);
848	SetSendPreambleCount(iobase, 0);
849	WriteReg(iobase, I_ST_CT_0, 0x80);
850
851	EnableTX(iobase, ON);
852	EnableRX(iobase, OFF);
853
854	ResetChip(iobase, 0);
855	ResetChip(iobase, 1);
856	ResetChip(iobase, 2);
857	ResetChip(iobase, 3);
858	ResetChip(iobase, 4);
859
860	EnAllInt(iobase, ON);
861	EnTXDMA(iobase, ON);
862	EnRXDMA(iobase, OFF);
863
864	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
865		       DMA_TX_MODE);
866
867	SetSendByte(iobase, self->tx_buff.len);
868	RXStart(iobase, OFF);
869	TXStart(iobase, ON);
870
871	dev->trans_start = jiffies;
872	spin_unlock_irqrestore(&self->lock, flags);
873	dev_kfree_skb(skb);
874	return NETDEV_TX_OK;
875}
876
877static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
878						struct net_device *dev)
879{
880	struct via_ircc_cb *self;
881	u16 iobase;
882	__u32 speed;
883	unsigned long flags;
884
885	self = netdev_priv(dev);
886	iobase = self->io.fir_base;
887
888	if (self->st_fifo.len)
889		return NETDEV_TX_OK;
890	if (self->chip_id == 0x3076)
891		iodelay(1500);
892	else
893		udelay(1500);
894	netif_stop_queue(dev);
895	speed = irda_get_next_speed(skb);
896	if ((speed != self->io.speed) && (speed != -1)) {
897		if (!skb->len) {
898			via_ircc_change_speed(self, speed);
899			dev->trans_start = jiffies;
900			dev_kfree_skb(skb);
901			return NETDEV_TX_OK;
902		} else
903			self->new_speed = speed;
904	}
905	spin_lock_irqsave(&self->lock, flags);
906	self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
907	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
908
909	self->tx_fifo.tail += skb->len;
910	dev->stats.tx_bytes += skb->len;
911	skb_copy_from_linear_data(skb,
912		      self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
913	self->tx_fifo.len++;
914	self->tx_fifo.free++;
915//F01   if (self->tx_fifo.len == 1) {
916	via_ircc_dma_xmit(self, iobase);
917//F01   }
918//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
919	dev->trans_start = jiffies;
920	dev_kfree_skb(skb);
921	spin_unlock_irqrestore(&self->lock, flags);
922	return NETDEV_TX_OK;
923
924}
925
926static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
927{
928	EnTXDMA(iobase, OFF);
929	self->io.direction = IO_XMIT;
930	EnPhys(iobase, ON);
931	EnableTX(iobase, ON);
932	EnableRX(iobase, OFF);
933	ResetChip(iobase, 0);
934	ResetChip(iobase, 1);
935	ResetChip(iobase, 2);
936	ResetChip(iobase, 3);
937	ResetChip(iobase, 4);
938	EnAllInt(iobase, ON);
939	EnTXDMA(iobase, ON);
940	EnRXDMA(iobase, OFF);
941	irda_setup_dma(self->io.dma,
942		       ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
943			self->tx_buff.head) + self->tx_buff_dma,
944		       self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
945	IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
946		   __func__, self->tx_fifo.ptr,
947		   self->tx_fifo.queue[self->tx_fifo.ptr].len,
948		   self->tx_fifo.len);
949
950	SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
951	RXStart(iobase, OFF);
952	TXStart(iobase, ON);
953	return 0;
954
955}
956
957/*
958 * Function via_ircc_dma_xmit_complete (self)
959 *
960 *    The transfer of a frame in finished. This function will only be called
961 *    by the interrupt handler
962 *
963 */
964static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
965{
966	int iobase;
967	int ret = TRUE;
968	u8 Tx_status;
969
970	IRDA_DEBUG(3, "%s()\n", __func__);
971
972	iobase = self->io.fir_base;
973	/* Disable DMA */
974//      DisableDmaChannel(self->io.dma);
975	/* Check for underrrun! */
976	/* Clear bit, by writing 1 into it */
977	Tx_status = GetTXStatus(iobase);
978	if (Tx_status & 0x08) {
979		self->netdev->stats.tx_errors++;
980		self->netdev->stats.tx_fifo_errors++;
981		hwreset(self);
982// how to clear underrrun ?
983	} else {
984		self->netdev->stats.tx_packets++;
985		ResetChip(iobase, 3);
986		ResetChip(iobase, 4);
987	}
988	/* Check if we need to change the speed */
989	if (self->new_speed) {
990		via_ircc_change_speed(self, self->new_speed);
991		self->new_speed = 0;
992	}
993
994	/* Finished with this frame, so prepare for next */
995	if (IsFIROn(iobase)) {
996		if (self->tx_fifo.len) {
997			self->tx_fifo.len--;
998			self->tx_fifo.ptr++;
999		}
1000	}
1001	IRDA_DEBUG(1,
1002		   "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1003		   __func__,
1004		   self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1005/* F01_S
1006	// Any frames to be sent back-to-back?
1007	if (self->tx_fifo.len) {
1008		// Not finished yet!
1009	  	via_ircc_dma_xmit(self, iobase);
1010		ret = FALSE;
1011	} else {
1012F01_E*/
1013	// Reset Tx FIFO info
1014	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1015	self->tx_fifo.tail = self->tx_buff.head;
1016//F01   }
1017
1018	// Make sure we have room for more frames
1019//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
1020	// Not busy transmitting anymore
1021	// Tell the network layer, that we can accept more frames
1022	netif_wake_queue(self->netdev);
1023//F01   }
1024	return ret;
1025}
1026
1027/*
1028 * Function via_ircc_dma_receive (self)
1029 *
1030 *    Set configuration for receive a frame.
1031 *
1032 */
1033static int via_ircc_dma_receive(struct via_ircc_cb *self)
1034{
1035	int iobase;
1036
1037	iobase = self->io.fir_base;
1038
1039	IRDA_DEBUG(3, "%s()\n", __func__);
1040
1041	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1042	self->tx_fifo.tail = self->tx_buff.head;
1043	self->RxDataReady = 0;
1044	self->io.direction = IO_RECV;
1045	self->rx_buff.data = self->rx_buff.head;
1046	self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1047	self->st_fifo.tail = self->st_fifo.head = 0;
1048
1049	EnPhys(iobase, ON);
1050	EnableTX(iobase, OFF);
1051	EnableRX(iobase, ON);
1052
1053	ResetChip(iobase, 0);
1054	ResetChip(iobase, 1);
1055	ResetChip(iobase, 2);
1056	ResetChip(iobase, 3);
1057	ResetChip(iobase, 4);
1058
1059	EnAllInt(iobase, ON);
1060	EnTXDMA(iobase, OFF);
1061	EnRXDMA(iobase, ON);
1062	irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1063		  self->rx_buff.truesize, DMA_RX_MODE);
1064	TXStart(iobase, OFF);
1065	RXStart(iobase, ON);
1066
1067	return 0;
1068}
1069
1070/*
1071 * Function via_ircc_dma_receive_complete (self)
1072 *
1073 *    Controller Finished with receiving frames,
1074 *    and this routine is call by ISR
1075 *
1076 */
1077static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1078					 int iobase)
1079{
1080	struct st_fifo *st_fifo;
1081	struct sk_buff *skb;
1082	int len, i;
1083	u8 status = 0;
1084
1085	iobase = self->io.fir_base;
1086	st_fifo = &self->st_fifo;
1087
1088	if (self->io.speed < 4000000) {	//Speed below FIR
1089		len = GetRecvByte(iobase, self);
1090		skb = dev_alloc_skb(len + 1);
1091		if (skb == NULL)
1092			return FALSE;
1093		// Make sure IP header gets aligned
1094		skb_reserve(skb, 1);
1095		skb_put(skb, len - 2);
1096		if (self->chip_id == 0x3076) {
1097			for (i = 0; i < len - 2; i++)
1098				skb->data[i] = self->rx_buff.data[i * 2];
1099		} else {
1100			if (self->chip_id == 0x3096) {
1101				for (i = 0; i < len - 2; i++)
1102					skb->data[i] =
1103					    self->rx_buff.data[i];
1104			}
1105		}
1106		// Move to next frame
1107		self->rx_buff.data += len;
1108		self->netdev->stats.rx_bytes += len;
1109		self->netdev->stats.rx_packets++;
1110		skb->dev = self->netdev;
1111		skb_reset_mac_header(skb);
1112		skb->protocol = htons(ETH_P_IRDA);
1113		netif_rx(skb);
1114		return TRUE;
1115	}
1116
1117	else {			//FIR mode
1118		len = GetRecvByte(iobase, self);
1119		if (len == 0)
1120			return TRUE;	//interrupt only, data maybe move by RxT
1121		if (((len - 4) < 2) || ((len - 4) > 2048)) {
1122			IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1123				   __func__, len, RxCurCount(iobase, self),
1124				   self->RxLastCount);
1125			hwreset(self);
1126			return FALSE;
1127		}
1128		IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1129			   __func__,
1130			   st_fifo->len, len - 4, RxCurCount(iobase, self));
1131
1132		st_fifo->entries[st_fifo->tail].status = status;
1133		st_fifo->entries[st_fifo->tail].len = len;
1134		st_fifo->pending_bytes += len;
1135		st_fifo->tail++;
1136		st_fifo->len++;
1137		if (st_fifo->tail > MAX_RX_WINDOW)
1138			st_fifo->tail = 0;
1139		self->RxDataReady = 0;
1140
1141		// It maybe have MAX_RX_WINDOW package receive by
1142		// receive_complete before Timer IRQ
1143/* F01_S
1144          if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1145		  RXStart(iobase,ON);
1146	  	  SetTimer(iobase,4);
1147	  }
1148	  else	  {
1149F01_E */
1150		EnableRX(iobase, OFF);
1151		EnRXDMA(iobase, OFF);
1152		RXStart(iobase, OFF);
1153//F01_S
1154		// Put this entry back in fifo
1155		if (st_fifo->head > MAX_RX_WINDOW)
1156			st_fifo->head = 0;
1157		status = st_fifo->entries[st_fifo->head].status;
1158		len = st_fifo->entries[st_fifo->head].len;
1159		st_fifo->head++;
1160		st_fifo->len--;
1161
1162		skb = dev_alloc_skb(len + 1 - 4);
1163		/*
1164		 * if frame size,data ptr,or skb ptr are wrong ,the get next
1165		 * entry.
1166		 */
1167		if ((skb == NULL) || (skb->data == NULL) ||
1168		    (self->rx_buff.data == NULL) || (len < 6)) {
1169			self->netdev->stats.rx_dropped++;
1170			return TRUE;
1171		}
1172		skb_reserve(skb, 1);
1173		skb_put(skb, len - 4);
1174
1175		skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1176		IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1177			   len - 4, self->rx_buff.data);
1178
1179		// Move to next frame
1180		self->rx_buff.data += len;
1181		self->netdev->stats.rx_bytes += len;
1182		self->netdev->stats.rx_packets++;
1183		skb->dev = self->netdev;
1184		skb_reset_mac_header(skb);
1185		skb->protocol = htons(ETH_P_IRDA);
1186		netif_rx(skb);
1187
1188//F01_E
1189	}			//FIR
1190	return TRUE;
1191
1192}
1193
1194/*
1195 * if frame is received , but no INT ,then use this routine to upload frame.
1196 */
1197static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1198{
1199	struct sk_buff *skb;
1200	int len;
1201	struct st_fifo *st_fifo;
1202	st_fifo = &self->st_fifo;
1203
1204	len = GetRecvByte(iobase, self);
1205
1206	IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1207
1208	if ((len - 4) < 2) {
1209		self->netdev->stats.rx_dropped++;
1210		return FALSE;
1211	}
1212
1213	skb = dev_alloc_skb(len + 1);
1214	if (skb == NULL) {
1215		self->netdev->stats.rx_dropped++;
1216		return FALSE;
1217	}
1218	skb_reserve(skb, 1);
1219	skb_put(skb, len - 4 + 1);
1220	skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1221	st_fifo->tail++;
1222	st_fifo->len++;
1223	if (st_fifo->tail > MAX_RX_WINDOW)
1224		st_fifo->tail = 0;
1225	// Move to next frame
1226	self->rx_buff.data += len;
1227	self->netdev->stats.rx_bytes += len;
1228	self->netdev->stats.rx_packets++;
1229	skb->dev = self->netdev;
1230	skb_reset_mac_header(skb);
1231	skb->protocol = htons(ETH_P_IRDA);
1232	netif_rx(skb);
1233	if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1234		RXStart(iobase, ON);
1235	} else {
1236		EnableRX(iobase, OFF);
1237		EnRXDMA(iobase, OFF);
1238		RXStart(iobase, OFF);
1239	}
1240	return TRUE;
1241}
1242
1243/*
1244 * Implement back to back receive , use this routine to upload data.
1245 */
1246
1247static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1248{
1249	struct st_fifo *st_fifo;
1250	struct sk_buff *skb;
1251	int len;
1252	u8 status;
1253
1254	st_fifo = &self->st_fifo;
1255
1256	if (CkRxRecv(iobase, self)) {
1257		// if still receiving ,then return ,don't upload frame
1258		self->RetryCount = 0;
1259		SetTimer(iobase, 20);
1260		self->RxDataReady++;
1261		return FALSE;
1262	} else
1263		self->RetryCount++;
1264
1265	if ((self->RetryCount >= 1) ||
1266	    ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1267	    (st_fifo->len >= (MAX_RX_WINDOW))) {
1268		while (st_fifo->len > 0) {	//upload frame
1269			// Put this entry back in fifo
1270			if (st_fifo->head > MAX_RX_WINDOW)
1271				st_fifo->head = 0;
1272			status = st_fifo->entries[st_fifo->head].status;
1273			len = st_fifo->entries[st_fifo->head].len;
1274			st_fifo->head++;
1275			st_fifo->len--;
1276
1277			skb = dev_alloc_skb(len + 1 - 4);
1278			/*
1279			 * if frame size, data ptr, or skb ptr are wrong,
1280			 * then get next entry.
1281			 */
1282			if ((skb == NULL) || (skb->data == NULL) ||
1283			    (self->rx_buff.data == NULL) || (len < 6)) {
1284				self->netdev->stats.rx_dropped++;
1285				continue;
1286			}
1287			skb_reserve(skb, 1);
1288			skb_put(skb, len - 4);
1289			skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1290
1291			IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1292				   len - 4, st_fifo->head);
1293
1294			// Move to next frame
1295			self->rx_buff.data += len;
1296			self->netdev->stats.rx_bytes += len;
1297			self->netdev->stats.rx_packets++;
1298			skb->dev = self->netdev;
1299			skb_reset_mac_header(skb);
1300			skb->protocol = htons(ETH_P_IRDA);
1301			netif_rx(skb);
1302		}		//while
1303		self->RetryCount = 0;
1304
1305		IRDA_DEBUG(2,
1306			   "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1307			   __func__,
1308			   GetHostStatus(iobase), GetRXStatus(iobase));
1309
1310		/*
1311		 * if frame is receive complete at this routine ,then upload
1312		 * frame.
1313		 */
1314		if ((GetRXStatus(iobase) & 0x10) &&
1315		    (RxCurCount(iobase, self) != self->RxLastCount)) {
1316			upload_rxdata(self, iobase);
1317			if (irda_device_txqueue_empty(self->netdev))
1318				via_ircc_dma_receive(self);
1319		}
1320	}			// timer detect complete
1321	else
1322		SetTimer(iobase, 4);
1323	return TRUE;
1324
1325}
1326
1327
1328
1329/*
1330 * Function via_ircc_interrupt (irq, dev_id)
1331 *
1332 *    An interrupt from the chip has arrived. Time to do some work
1333 *
1334 */
1335static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1336{
1337	struct net_device *dev = dev_id;
1338	struct via_ircc_cb *self = netdev_priv(dev);
1339	int iobase;
1340	u8 iHostIntType, iRxIntType, iTxIntType;
1341
1342	iobase = self->io.fir_base;
1343	spin_lock(&self->lock);
1344	iHostIntType = GetHostStatus(iobase);
1345
1346	IRDA_DEBUG(4, "%s(): iHostIntType %02x:  %s %s %s  %02x\n",
1347		   __func__, iHostIntType,
1348		   (iHostIntType & 0x40) ? "Timer" : "",
1349		   (iHostIntType & 0x20) ? "Tx" : "",
1350		   (iHostIntType & 0x10) ? "Rx" : "",
1351		   (iHostIntType & 0x0e) >> 1);
1352
1353	if ((iHostIntType & 0x40) != 0) {	//Timer Event
1354		self->EventFlag.TimeOut++;
1355		ClearTimerInt(iobase, 1);
1356		if (self->io.direction == IO_XMIT) {
1357			via_ircc_dma_xmit(self, iobase);
1358		}
1359		if (self->io.direction == IO_RECV) {
1360			/*
1361			 * frame ready hold too long, must reset.
1362			 */
1363			if (self->RxDataReady > 30) {
1364				hwreset(self);
1365				if (irda_device_txqueue_empty(self->netdev)) {
1366					via_ircc_dma_receive(self);
1367				}
1368			} else {	// call this to upload frame.
1369				RxTimerHandler(self, iobase);
1370			}
1371		}		//RECV
1372	}			//Timer Event
1373	if ((iHostIntType & 0x20) != 0) {	//Tx Event
1374		iTxIntType = GetTXStatus(iobase);
1375
1376		IRDA_DEBUG(4, "%s(): iTxIntType %02x:  %s %s %s %s\n",
1377			   __func__, iTxIntType,
1378			   (iTxIntType & 0x08) ? "FIFO underr." : "",
1379			   (iTxIntType & 0x04) ? "EOM" : "",
1380			   (iTxIntType & 0x02) ? "FIFO ready" : "",
1381			   (iTxIntType & 0x01) ? "Early EOM" : "");
1382
1383		if (iTxIntType & 0x4) {
1384			self->EventFlag.EOMessage++;	// read and will auto clean
1385			if (via_ircc_dma_xmit_complete(self)) {
1386				if (irda_device_txqueue_empty
1387				    (self->netdev)) {
1388					via_ircc_dma_receive(self);
1389				}
1390			} else {
1391				self->EventFlag.Unknown++;
1392			}
1393		}		//EOP
1394	}			//Tx Event
1395	//----------------------------------------
1396	if ((iHostIntType & 0x10) != 0) {	//Rx Event
1397		/* Check if DMA has finished */
1398		iRxIntType = GetRXStatus(iobase);
1399
1400		IRDA_DEBUG(4, "%s(): iRxIntType %02x:  %s %s %s %s %s %s %s\n",
1401			   __func__, iRxIntType,
1402			   (iRxIntType & 0x80) ? "PHY err."	: "",
1403			   (iRxIntType & 0x40) ? "CRC err"	: "",
1404			   (iRxIntType & 0x20) ? "FIFO overr."	: "",
1405			   (iRxIntType & 0x10) ? "EOF"		: "",
1406			   (iRxIntType & 0x08) ? "RxData"	: "",
1407			   (iRxIntType & 0x02) ? "RxMaxLen"	: "",
1408			   (iRxIntType & 0x01) ? "SIR bad"	: "");
1409		if (!iRxIntType)
1410			IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1411
1412		if (iRxIntType & 0x10) {
1413			if (via_ircc_dma_receive_complete(self, iobase)) {
1414//F01       if(!(IsFIROn(iobase)))  via_ircc_dma_receive(self);
1415				via_ircc_dma_receive(self);
1416			}
1417		}		// No ERR
1418		else {		//ERR
1419			IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1420				   __func__, iRxIntType, iHostIntType,
1421				   RxCurCount(iobase, self),
1422				   self->RxLastCount);
1423
1424			if (iRxIntType & 0x20) {	//FIFO OverRun ERR
1425				ResetChip(iobase, 0);
1426				ResetChip(iobase, 1);
1427			} else {	//PHY,CRC ERR
1428
1429				if (iRxIntType != 0x08)
1430					hwreset(self);	//F01
1431			}
1432			via_ircc_dma_receive(self);
1433		}		//ERR
1434
1435	}			//Rx Event
1436	spin_unlock(&self->lock);
1437	return IRQ_RETVAL(iHostIntType);
1438}
1439
1440static void hwreset(struct via_ircc_cb *self)
1441{
1442	int iobase;
1443	iobase = self->io.fir_base;
1444
1445	IRDA_DEBUG(3, "%s()\n", __func__);
1446
1447	ResetChip(iobase, 5);
1448	EnableDMA(iobase, OFF);
1449	EnableTX(iobase, OFF);
1450	EnableRX(iobase, OFF);
1451	EnRXDMA(iobase, OFF);
1452	EnTXDMA(iobase, OFF);
1453	RXStart(iobase, OFF);
1454	TXStart(iobase, OFF);
1455	InitCard(iobase);
1456	CommonInit(iobase);
1457	SIRFilter(iobase, ON);
1458	SetSIR(iobase, ON);
1459	CRC16(iobase, ON);
1460	EnTXCRC(iobase, 0);
1461	WriteReg(iobase, I_ST_CT_0, 0x00);
1462	SetBaudRate(iobase, 9600);
1463	SetPulseWidth(iobase, 12);
1464	SetSendPreambleCount(iobase, 0);
1465	WriteReg(iobase, I_ST_CT_0, 0x80);
1466
1467	/* Restore speed. */
1468	via_ircc_change_speed(self, self->io.speed);
1469
1470	self->st_fifo.len = 0;
1471}
1472
1473/*
1474 * Function via_ircc_is_receiving (self)
1475 *
1476 *    Return TRUE is we are currently receiving a frame
1477 *
1478 */
1479static int via_ircc_is_receiving(struct via_ircc_cb *self)
1480{
1481	int status = FALSE;
1482	int iobase;
1483
1484	IRDA_ASSERT(self != NULL, return FALSE;);
1485
1486	iobase = self->io.fir_base;
1487	if (CkRxRecv(iobase, self))
1488		status = TRUE;
1489
1490	IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1491
1492	return status;
1493}
1494
1495
1496/*
1497 * Function via_ircc_net_open (dev)
1498 *
1499 *    Start the device
1500 *
1501 */
1502static int via_ircc_net_open(struct net_device *dev)
1503{
1504	struct via_ircc_cb *self;
1505	int iobase;
1506	char hwname[32];
1507
1508	IRDA_DEBUG(3, "%s()\n", __func__);
1509
1510	IRDA_ASSERT(dev != NULL, return -1;);
1511	self = netdev_priv(dev);
1512	dev->stats.rx_packets = 0;
1513	IRDA_ASSERT(self != NULL, return 0;);
1514	iobase = self->io.fir_base;
1515	if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1516		IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1517			     self->io.irq);
1518		return -EAGAIN;
1519	}
1520	/*
1521	 * Always allocate the DMA channel after the IRQ, and clean up on
1522	 * failure.
1523	 */
1524	if (request_dma(self->io.dma, dev->name)) {
1525		IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1526			     self->io.dma);
1527		free_irq(self->io.irq, self);
1528		return -EAGAIN;
1529	}
1530	if (self->io.dma2 != self->io.dma) {
1531		if (request_dma(self->io.dma2, dev->name)) {
1532			IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1533				     driver_name, self->io.dma2);
1534			free_irq(self->io.irq, self);
1535			free_dma(self->io.dma);
1536			return -EAGAIN;
1537		}
1538	}
1539
1540
1541	/* turn on interrupts */
1542	EnAllInt(iobase, ON);
1543	EnInternalLoop(iobase, OFF);
1544	EnExternalLoop(iobase, OFF);
1545
1546	/* */
1547	via_ircc_dma_receive(self);
1548
1549	/* Ready to play! */
1550	netif_start_queue(dev);
1551
1552	/*
1553	 * Open new IrLAP layer instance, now that everything should be
1554	 * initialized properly
1555	 */
1556	sprintf(hwname, "VIA @ 0x%x", iobase);
1557	self->irlap = irlap_open(dev, &self->qos, hwname);
1558
1559	self->RxLastCount = 0;
1560
1561	return 0;
1562}
1563
1564/*
1565 * Function via_ircc_net_close (dev)
1566 *
1567 *    Stop the device
1568 *
1569 */
1570static int via_ircc_net_close(struct net_device *dev)
1571{
1572	struct via_ircc_cb *self;
1573	int iobase;
1574
1575	IRDA_DEBUG(3, "%s()\n", __func__);
1576
1577	IRDA_ASSERT(dev != NULL, return -1;);
1578	self = netdev_priv(dev);
1579	IRDA_ASSERT(self != NULL, return 0;);
1580
1581	/* Stop device */
1582	netif_stop_queue(dev);
1583	/* Stop and remove instance of IrLAP */
1584	if (self->irlap)
1585		irlap_close(self->irlap);
1586	self->irlap = NULL;
1587	iobase = self->io.fir_base;
1588	EnTXDMA(iobase, OFF);
1589	EnRXDMA(iobase, OFF);
1590	DisableDmaChannel(self->io.dma);
1591
1592	/* Disable interrupts */
1593	EnAllInt(iobase, OFF);
1594	free_irq(self->io.irq, dev);
1595	free_dma(self->io.dma);
1596	if (self->io.dma2 != self->io.dma)
1597		free_dma(self->io.dma2);
1598
1599	return 0;
1600}
1601
1602/*
1603 * Function via_ircc_net_ioctl (dev, rq, cmd)
1604 *
1605 *    Process IOCTL commands for this device
1606 *
1607 */
1608static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1609			      int cmd)
1610{
1611	struct if_irda_req *irq = (struct if_irda_req *) rq;
1612	struct via_ircc_cb *self;
1613	unsigned long flags;
1614	int ret = 0;
1615
1616	IRDA_ASSERT(dev != NULL, return -1;);
1617	self = netdev_priv(dev);
1618	IRDA_ASSERT(self != NULL, return -1;);
1619	IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1620		   cmd);
1621	/* Disable interrupts & save flags */
1622	spin_lock_irqsave(&self->lock, flags);
1623	switch (cmd) {
1624	case SIOCSBANDWIDTH:	/* Set bandwidth */
1625		if (!capable(CAP_NET_ADMIN)) {
1626			ret = -EPERM;
1627			goto out;
1628		}
1629		via_ircc_change_speed(self, irq->ifr_baudrate);
1630		break;
1631	case SIOCSMEDIABUSY:	/* Set media busy */
1632		if (!capable(CAP_NET_ADMIN)) {
1633			ret = -EPERM;
1634			goto out;
1635		}
1636		irda_device_set_media_busy(self->netdev, TRUE);
1637		break;
1638	case SIOCGRECEIVING:	/* Check if we are receiving right now */
1639		irq->ifr_receiving = via_ircc_is_receiving(self);
1640		break;
1641	default:
1642		ret = -EOPNOTSUPP;
1643	}
1644      out:
1645	spin_unlock_irqrestore(&self->lock, flags);
1646	return ret;
1647}
1648
1649MODULE_AUTHOR("VIA Technologies,inc");
1650MODULE_DESCRIPTION("VIA IrDA Device Driver");
1651MODULE_LICENSE("GPL");
1652
1653module_init(via_ircc_init);
1654module_exit(via_ircc_cleanup);
1655