1/********************************************************************* 2 * 3 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 4 * 5 * Copyright (c) 2001-2003 Martin Diehl 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 20 * MA 02111-1307 USA 21 * 22 ********************************************************************/ 23 24#include <linux/module.h> 25 26#define DRIVER_NAME "vlsi_ir" 27#define DRIVER_VERSION "v0.5" 28#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 29#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 30 31MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 32MODULE_AUTHOR(DRIVER_AUTHOR); 33MODULE_LICENSE("GPL"); 34 35/********************************************************/ 36 37#include <linux/kernel.h> 38#include <linux/init.h> 39#include <linux/pci.h> 40#include <linux/slab.h> 41#include <linux/netdevice.h> 42#include <linux/skbuff.h> 43#include <linux/delay.h> 44#include <linux/time.h> 45#include <linux/proc_fs.h> 46#include <linux/seq_file.h> 47#include <asm/uaccess.h> 48#include <asm/byteorder.h> 49 50#include <net/irda/irda.h> 51#include <net/irda/irda_device.h> 52#include <net/irda/wrapper.h> 53#include <net/irda/crc.h> 54 55#include "vlsi_ir.h" 56 57/********************************************************/ 58 59static /* const */ char drivername[] = DRIVER_NAME; 60 61static struct pci_device_id vlsi_irda_table [] = { 62 { 63 .class = PCI_CLASS_WIRELESS_IRDA << 8, 64 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 65 .vendor = PCI_VENDOR_ID_VLSI, 66 .device = PCI_DEVICE_ID_VLSI_82C147, 67 .subvendor = PCI_ANY_ID, 68 .subdevice = PCI_ANY_ID, 69 }, 70 { /* all zeroes */ } 71}; 72 73MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 74 75/********************************************************/ 76 77/* clksrc: which clock source to be used 78 * 0: auto - try PLL, fallback to 40MHz XCLK 79 * 1: on-chip 48MHz PLL 80 * 2: external 48MHz XCLK 81 * 3: external 40MHz XCLK (HP OB-800) 82 */ 83 84static int clksrc = 0; /* default is 0(auto) */ 85module_param(clksrc, int, 0); 86MODULE_PARM_DESC(clksrc, "clock input source selection"); 87 88/* ringsize: size of the tx and rx descriptor rings 89 * independent for tx and rx 90 * specify as ringsize=tx[,rx] 91 * allowed values: 4, 8, 16, 32, 64 92 * Due to the IrDA 1.x max. allowed window size=7, 93 * there should be no gain when using rings larger than 8 94 */ 95 96static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 97module_param_array(ringsize, int, NULL, 0); 98MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 99 100/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 101 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 102 * 1: nominal 3/16 bittime width 103 * note: IrDA compliant peer devices should be happy regardless 104 * which one is used. Primary goal is to save some power 105 * on the sender's side - at 9.6kbaud for example the short 106 * pulse width saves more than 90% of the transmitted IR power. 107 */ 108 109static int sirpulse = 1; /* default is 3/16 bittime */ 110module_param(sirpulse, int, 0); 111MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 112 113/* qos_mtt_bits: encoded min-turn-time value we require the peer device 114 * to use before transmitting to us. "Type 1" (per-station) 115 * bitfield according to IrLAP definition (section 6.6.8) 116 * Don't know which transceiver is used by my OB800 - the 117 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 118 */ 119 120static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 121module_param(qos_mtt_bits, int, 0); 122MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 123 124/********************************************************/ 125 126static void vlsi_reg_debug(unsigned iobase, const char *s) 127{ 128 int i; 129 130 printk(KERN_DEBUG "%s: ", s); 131 for (i = 0; i < 0x20; i++) 132 printk("%02x", (unsigned)inb((iobase+i))); 133 printk("\n"); 134} 135 136static void vlsi_ring_debug(struct vlsi_ring *r) 137{ 138 struct ring_descr *rd; 139 unsigned i; 140 141 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 142 __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 143 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__, 144 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 145 for (i = 0; i < r->size; i++) { 146 rd = &r->rd[i]; 147 printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i); 148 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 149 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 150 __FUNCTION__, (unsigned) rd_get_status(rd), 151 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 152 } 153} 154 155/********************************************************/ 156 157/* needed regardless of CONFIG_PROC_FS */ 158static struct proc_dir_entry *vlsi_proc_root = NULL; 159 160#ifdef CONFIG_PROC_FS 161 162static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 163{ 164 unsigned iobase = pci_resource_start(pdev, 0); 165 unsigned i; 166 167 seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n", 168 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 169 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 170 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 171 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 172 seq_printf(seq, "hw registers: "); 173 for (i = 0; i < 0x20; i++) 174 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 175 seq_printf(seq, "\n"); 176} 177 178static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 179{ 180 vlsi_irda_dev_t *idev = ndev->priv; 181 u8 byte; 182 u16 word; 183 unsigned delta1, delta2; 184 struct timeval now; 185 unsigned iobase = ndev->base_addr; 186 187 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 188 netif_device_present(ndev) ? "attached" : "detached", 189 netif_running(ndev) ? "running" : "not running", 190 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 191 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 192 193 if (!netif_running(ndev)) 194 return; 195 196 seq_printf(seq, "\nhw-state:\n"); 197 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 198 seq_printf(seq, "IRMISC:%s%s%s uart%s", 199 (byte&IRMISC_IRRAIL) ? " irrail" : "", 200 (byte&IRMISC_IRPD) ? " irpd" : "", 201 (byte&IRMISC_UARTTST) ? " uarttest" : "", 202 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 203 if (byte&IRMISC_UARTEN) { 204 seq_printf(seq, "0x%s\n", 205 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 206 : ((byte&1) ? "3f8" : "2f8")); 207 } 208 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 209 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 210 (byte&CLKCTL_PD_INV) ? "powered" : "down", 211 (byte&CLKCTL_LOCK) ? " locked" : "", 212 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 213 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 214 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 215 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 216 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 217 218 byte = inb(iobase+VLSI_PIO_IRINTR); 219 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 220 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 221 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 222 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 223 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 224 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 225 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 226 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 227 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 228 word = inw(iobase+VLSI_PIO_RINGPTR); 229 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 230 word = inw(iobase+VLSI_PIO_RINGBASE); 231 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 232 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 233 word = inw(iobase+VLSI_PIO_RINGSIZE); 234 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 235 RINGSIZE_TO_TXSIZE(word)); 236 237 word = inw(iobase+VLSI_PIO_IRCFG); 238 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 239 (word&IRCFG_LOOP) ? " LOOP" : "", 240 (word&IRCFG_ENTX) ? " ENTX" : "", 241 (word&IRCFG_ENRX) ? " ENRX" : "", 242 (word&IRCFG_MSTR) ? " MSTR" : "", 243 (word&IRCFG_RXANY) ? " RXANY" : "", 244 (word&IRCFG_CRC16) ? " CRC16" : "", 245 (word&IRCFG_FIR) ? " FIR" : "", 246 (word&IRCFG_MIR) ? " MIR" : "", 247 (word&IRCFG_SIR) ? " SIR" : "", 248 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 249 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 250 (word&IRCFG_TXPOL) ? " TXPOL" : "", 251 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 252 word = inw(iobase+VLSI_PIO_IRENABLE); 253 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 254 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 255 (word&IRENABLE_CFGER) ? " CFGERR" : "", 256 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 257 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 258 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 259 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 260 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 261 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 262 word = inw(iobase+VLSI_PIO_PHYCTL); 263 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 264 (unsigned)PHYCTL_TO_BAUD(word), 265 (unsigned)PHYCTL_TO_PLSWID(word), 266 (unsigned)PHYCTL_TO_PREAMB(word)); 267 word = inw(iobase+VLSI_PIO_NPHYCTL); 268 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 269 (unsigned)PHYCTL_TO_BAUD(word), 270 (unsigned)PHYCTL_TO_PLSWID(word), 271 (unsigned)PHYCTL_TO_PREAMB(word)); 272 word = inw(iobase+VLSI_PIO_MAXPKT); 273 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 274 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 275 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 276 277 seq_printf(seq, "\nsw-state:\n"); 278 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 279 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 280 do_gettimeofday(&now); 281 if (now.tv_usec >= idev->last_rx.tv_usec) { 282 delta2 = now.tv_usec - idev->last_rx.tv_usec; 283 delta1 = 0; 284 } 285 else { 286 delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec; 287 delta1 = 1; 288 } 289 seq_printf(seq, "last rx: %lu.%06u sec\n", 290 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2); 291 292 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 293 idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors, 294 idev->stats.rx_dropped); 295 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 296 idev->stats.rx_over_errors, idev->stats.rx_length_errors, 297 idev->stats.rx_frame_errors, idev->stats.rx_crc_errors); 298 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 299 idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors, 300 idev->stats.tx_dropped, idev->stats.tx_fifo_errors); 301 302} 303 304static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 305{ 306 struct ring_descr *rd; 307 unsigned i, j; 308 int h, t; 309 310 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 311 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 312 h = atomic_read(&r->head) & r->mask; 313 t = atomic_read(&r->tail) & r->mask; 314 seq_printf(seq, "head = %d / tail = %d ", h, t); 315 if (h == t) 316 seq_printf(seq, "(empty)\n"); 317 else { 318 if (((t+1)&r->mask) == h) 319 seq_printf(seq, "(full)\n"); 320 else 321 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 322 rd = &r->rd[h]; 323 j = (unsigned) rd_get_count(rd); 324 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 325 h, (unsigned)rd_get_status(rd), j); 326 if (j > 0) { 327 seq_printf(seq, " data:"); 328 if (j > 20) 329 j = 20; 330 for (i = 0; i < j; i++) 331 seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]); 332 seq_printf(seq, "\n"); 333 } 334 } 335 for (i = 0; i < r->size; i++) { 336 rd = &r->rd[i]; 337 seq_printf(seq, "> ring descr %u: ", i); 338 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 339 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 340 (unsigned) rd_get_status(rd), 341 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 342 } 343} 344 345static int vlsi_seq_show(struct seq_file *seq, void *v) 346{ 347 struct net_device *ndev = seq->private; 348 vlsi_irda_dev_t *idev = ndev->priv; 349 unsigned long flags; 350 351 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 352 seq_printf(seq, "clksrc: %s\n", 353 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 354 : ((clksrc==1)?"48MHz PLL":"autodetect")); 355 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 356 ringsize[0], ringsize[1]); 357 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 358 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 359 360 spin_lock_irqsave(&idev->lock, flags); 361 if (idev->pdev != NULL) { 362 vlsi_proc_pdev(seq, idev->pdev); 363 364 if (idev->pdev->current_state == 0) 365 vlsi_proc_ndev(seq, ndev); 366 else 367 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 368 idev->resume_ok); 369 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 370 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 371 vlsi_proc_ring(seq, idev->rx_ring); 372 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 373 vlsi_proc_ring(seq, idev->tx_ring); 374 } 375 } 376 seq_printf(seq, "\n"); 377 spin_unlock_irqrestore(&idev->lock, flags); 378 379 return 0; 380} 381 382static int vlsi_seq_open(struct inode *inode, struct file *file) 383{ 384 return single_open(file, vlsi_seq_show, PDE(inode)->data); 385} 386 387static const struct file_operations vlsi_proc_fops = { 388 .owner = THIS_MODULE, 389 .open = vlsi_seq_open, 390 .read = seq_read, 391 .llseek = seq_lseek, 392 .release = single_release, 393}; 394 395#define VLSI_PROC_FOPS (&vlsi_proc_fops) 396 397#else /* CONFIG_PROC_FS */ 398#define VLSI_PROC_FOPS NULL 399#endif 400 401/********************************************************/ 402 403static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 404 unsigned size, unsigned len, int dir) 405{ 406 struct vlsi_ring *r; 407 struct ring_descr *rd; 408 unsigned i, j; 409 dma_addr_t busaddr; 410 411 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 412 return NULL; 413 414 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 415 if (!r) 416 return NULL; 417 memset(r, 0, sizeof(*r)); 418 419 r->pdev = pdev; 420 r->dir = dir; 421 r->len = len; 422 r->rd = (struct ring_descr *)(r+1); 423 r->mask = size - 1; 424 r->size = size; 425 atomic_set(&r->head, 0); 426 atomic_set(&r->tail, 0); 427 428 for (i = 0; i < size; i++) { 429 rd = r->rd + i; 430 memset(rd, 0, sizeof(*rd)); 431 rd->hw = hwmap + i; 432 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 433 if (rd->buf == NULL 434 || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 435 if (rd->buf) { 436 IRDA_ERROR("%s: failed to create PCI-MAP for %p", 437 __FUNCTION__, rd->buf); 438 kfree(rd->buf); 439 rd->buf = NULL; 440 } 441 for (j = 0; j < i; j++) { 442 rd = r->rd + j; 443 busaddr = rd_get_addr(rd); 444 rd_set_addr_status(rd, 0, 0); 445 if (busaddr) 446 pci_unmap_single(pdev, busaddr, len, dir); 447 kfree(rd->buf); 448 rd->buf = NULL; 449 } 450 kfree(r); 451 return NULL; 452 } 453 rd_set_addr_status(rd, busaddr, 0); 454 /* initially, the dma buffer is owned by the CPU */ 455 rd->skb = NULL; 456 } 457 return r; 458} 459 460static int vlsi_free_ring(struct vlsi_ring *r) 461{ 462 struct ring_descr *rd; 463 unsigned i; 464 dma_addr_t busaddr; 465 466 for (i = 0; i < r->size; i++) { 467 rd = r->rd + i; 468 if (rd->skb) 469 dev_kfree_skb_any(rd->skb); 470 busaddr = rd_get_addr(rd); 471 rd_set_addr_status(rd, 0, 0); 472 if (busaddr) 473 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 474 kfree(rd->buf); 475 } 476 kfree(r); 477 return 0; 478} 479 480static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 481{ 482 char *ringarea; 483 struct ring_descr_hw *hwmap; 484 485 idev->virtaddr = NULL; 486 idev->busaddr = 0; 487 488 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); 489 if (!ringarea) { 490 IRDA_ERROR("%s: insufficient memory for descriptor rings\n", 491 __FUNCTION__); 492 goto out; 493 } 494 memset(ringarea, 0, HW_RING_AREA_SIZE); 495 496 hwmap = (struct ring_descr_hw *)ringarea; 497 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 498 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 499 if (idev->rx_ring == NULL) 500 goto out_unmap; 501 502 hwmap += MAX_RING_DESCR; 503 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 504 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 505 if (idev->tx_ring == NULL) 506 goto out_free_rx; 507 508 idev->virtaddr = ringarea; 509 return 0; 510 511out_free_rx: 512 vlsi_free_ring(idev->rx_ring); 513out_unmap: 514 idev->rx_ring = idev->tx_ring = NULL; 515 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 516 idev->busaddr = 0; 517out: 518 return -ENOMEM; 519} 520 521static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 522{ 523 vlsi_free_ring(idev->rx_ring); 524 vlsi_free_ring(idev->tx_ring); 525 idev->rx_ring = idev->tx_ring = NULL; 526 527 if (idev->busaddr) 528 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 529 530 idev->virtaddr = NULL; 531 idev->busaddr = 0; 532 533 return 0; 534} 535 536/********************************************************/ 537 538static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 539{ 540 u16 status; 541 int crclen, len = 0; 542 struct sk_buff *skb; 543 int ret = 0; 544 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev); 545 vlsi_irda_dev_t *idev = ndev->priv; 546 547 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 548 /* dma buffer now owned by the CPU */ 549 status = rd_get_status(rd); 550 if (status & RD_RX_ERROR) { 551 if (status & RD_RX_OVER) 552 ret |= VLSI_RX_OVER; 553 if (status & RD_RX_LENGTH) 554 ret |= VLSI_RX_LENGTH; 555 if (status & RD_RX_PHYERR) 556 ret |= VLSI_RX_FRAME; 557 if (status & RD_RX_CRCERR) 558 ret |= VLSI_RX_CRC; 559 goto done; 560 } 561 562 len = rd_get_count(rd); 563 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 564 len -= crclen; /* remove trailing CRC */ 565 if (len <= 0) { 566 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len); 567 ret |= VLSI_RX_DROP; 568 goto done; 569 } 570 571 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 572 573 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 574 * endian-adjustment there just in place will dirty a cache line 575 * which belongs to the map and thus we must be sure it will 576 * get flushed before giving the buffer back to hardware. 577 * vlsi_fill_rx() will do this anyway - but here we rely on. 578 */ 579 le16_to_cpus(rd->buf+len); 580 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 581 IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__); 582 ret |= VLSI_RX_CRC; 583 goto done; 584 } 585 } 586 587 if (!rd->skb) { 588 IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__); 589 ret |= VLSI_RX_DROP; 590 goto done; 591 } 592 593 skb = rd->skb; 594 rd->skb = NULL; 595 skb->dev = ndev; 596 memcpy(skb_put(skb,len), rd->buf, len); 597 skb_reset_mac_header(skb); 598 if (in_interrupt()) 599 netif_rx(skb); 600 else 601 netif_rx_ni(skb); 602 ndev->last_rx = jiffies; 603 604done: 605 rd_set_status(rd, 0); 606 rd_set_count(rd, 0); 607 /* buffer still owned by CPU */ 608 609 return (ret) ? -ret : len; 610} 611 612static void vlsi_fill_rx(struct vlsi_ring *r) 613{ 614 struct ring_descr *rd; 615 616 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 617 if (rd_is_active(rd)) { 618 IRDA_WARNING("%s: driver bug: rx descr race with hw\n", 619 __FUNCTION__); 620 vlsi_ring_debug(r); 621 break; 622 } 623 if (!rd->skb) { 624 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 625 if (rd->skb) { 626 skb_reserve(rd->skb,1); 627 rd->skb->protocol = htons(ETH_P_IRDA); 628 } 629 else 630 break; /* probably not worth logging? */ 631 } 632 /* give dma buffer back to busmaster */ 633 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 634 rd_activate(rd); 635 } 636} 637 638static void vlsi_rx_interrupt(struct net_device *ndev) 639{ 640 vlsi_irda_dev_t *idev = ndev->priv; 641 struct vlsi_ring *r = idev->rx_ring; 642 struct ring_descr *rd; 643 int ret; 644 645 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 646 647 if (rd_is_active(rd)) 648 break; 649 650 ret = vlsi_process_rx(r, rd); 651 652 if (ret < 0) { 653 ret = -ret; 654 idev->stats.rx_errors++; 655 if (ret & VLSI_RX_DROP) 656 idev->stats.rx_dropped++; 657 if (ret & VLSI_RX_OVER) 658 idev->stats.rx_over_errors++; 659 if (ret & VLSI_RX_LENGTH) 660 idev->stats.rx_length_errors++; 661 if (ret & VLSI_RX_FRAME) 662 idev->stats.rx_frame_errors++; 663 if (ret & VLSI_RX_CRC) 664 idev->stats.rx_crc_errors++; 665 } 666 else if (ret > 0) { 667 idev->stats.rx_packets++; 668 idev->stats.rx_bytes += ret; 669 } 670 } 671 672 do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */ 673 674 vlsi_fill_rx(r); 675 676 if (ring_first(r) == NULL) { 677 /* we are in big trouble, if this should ever happen */ 678 IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__); 679 vlsi_ring_debug(r); 680 } 681 else 682 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 683} 684 685/* caller must have stopped the controller from busmastering */ 686 687static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 688{ 689 struct vlsi_ring *r = idev->rx_ring; 690 struct ring_descr *rd; 691 int ret; 692 693 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 694 695 ret = 0; 696 if (rd_is_active(rd)) { 697 rd_set_status(rd, 0); 698 if (rd_get_count(rd)) { 699 IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__); 700 ret = -VLSI_RX_DROP; 701 } 702 rd_set_count(rd, 0); 703 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 704 if (rd->skb) { 705 dev_kfree_skb_any(rd->skb); 706 rd->skb = NULL; 707 } 708 } 709 else 710 ret = vlsi_process_rx(r, rd); 711 712 if (ret < 0) { 713 ret = -ret; 714 idev->stats.rx_errors++; 715 if (ret & VLSI_RX_DROP) 716 idev->stats.rx_dropped++; 717 if (ret & VLSI_RX_OVER) 718 idev->stats.rx_over_errors++; 719 if (ret & VLSI_RX_LENGTH) 720 idev->stats.rx_length_errors++; 721 if (ret & VLSI_RX_FRAME) 722 idev->stats.rx_frame_errors++; 723 if (ret & VLSI_RX_CRC) 724 idev->stats.rx_crc_errors++; 725 } 726 else if (ret > 0) { 727 idev->stats.rx_packets++; 728 idev->stats.rx_bytes += ret; 729 } 730 } 731} 732 733/********************************************************/ 734 735static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 736{ 737 u16 status; 738 int len; 739 int ret; 740 741 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 742 /* dma buffer now owned by the CPU */ 743 status = rd_get_status(rd); 744 if (status & RD_TX_UNDRN) 745 ret = VLSI_TX_FIFO; 746 else 747 ret = 0; 748 rd_set_status(rd, 0); 749 750 if (rd->skb) { 751 len = rd->skb->len; 752 dev_kfree_skb_any(rd->skb); 753 rd->skb = NULL; 754 } 755 else /* tx-skb already freed? - should never happen */ 756 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 757 758 rd_set_count(rd, 0); 759 /* dma buffer still owned by the CPU */ 760 761 return (ret) ? -ret : len; 762} 763 764static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 765{ 766 u16 nphyctl; 767 u16 config; 768 unsigned mode; 769 int ret; 770 int baudrate; 771 int fifocnt; 772 773 baudrate = idev->new_baud; 774 IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud); 775 if (baudrate == 4000000) { 776 mode = IFF_FIR; 777 config = IRCFG_FIR; 778 nphyctl = PHYCTL_FIR; 779 } 780 else if (baudrate == 1152000) { 781 mode = IFF_MIR; 782 config = IRCFG_MIR | IRCFG_CRC16; 783 nphyctl = PHYCTL_MIR(clksrc==3); 784 } 785 else { 786 mode = IFF_SIR; 787 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 788 switch(baudrate) { 789 default: 790 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", 791 __FUNCTION__, baudrate); 792 baudrate = 9600; 793 /* fallthru */ 794 case 2400: 795 case 9600: 796 case 19200: 797 case 38400: 798 case 57600: 799 case 115200: 800 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 801 break; 802 } 803 } 804 config |= IRCFG_MSTR | IRCFG_ENRX; 805 806 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 807 if (fifocnt != 0) { 808 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); 809 } 810 811 outw(0, iobase+VLSI_PIO_IRENABLE); 812 outw(config, iobase+VLSI_PIO_IRCFG); 813 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 814 wmb(); 815 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 816 mb(); 817 818 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 819 820 /* read back settings for validation */ 821 822 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 823 824 if (mode == IFF_FIR) 825 config ^= IRENABLE_FIR_ON; 826 else if (mode == IFF_MIR) 827 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 828 else 829 config ^= IRENABLE_SIR_ON; 830 831 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 832 IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__, 833 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); 834 ret = -1; 835 } 836 else { 837 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 838 IRDA_WARNING("%s: failed to apply baudrate %d\n", 839 __FUNCTION__, baudrate); 840 ret = -1; 841 } 842 else { 843 idev->mode = mode; 844 idev->baud = baudrate; 845 idev->new_baud = 0; 846 ret = 0; 847 } 848 } 849 850 if (ret) 851 vlsi_reg_debug(iobase,__FUNCTION__); 852 853 return ret; 854} 855 856static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) 857{ 858 vlsi_irda_dev_t *idev = ndev->priv; 859 struct vlsi_ring *r = idev->tx_ring; 860 struct ring_descr *rd; 861 unsigned long flags; 862 unsigned iobase = ndev->base_addr; 863 u8 status; 864 u16 config; 865 int mtt; 866 int len, speed; 867 struct timeval now, ready; 868 char *msg = NULL; 869 870 speed = irda_get_next_speed(skb); 871 spin_lock_irqsave(&idev->lock, flags); 872 if (speed != -1 && speed != idev->baud) { 873 netif_stop_queue(ndev); 874 idev->new_baud = speed; 875 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 876 } 877 else 878 status = 0; 879 880 if (skb->len == 0) { 881 /* handle zero packets - should be speed change */ 882 if (status == 0) { 883 msg = "bogus zero-length packet"; 884 goto drop_unlock; 885 } 886 887 /* due to the completely asynch tx operation we might have 888 * IrLAP racing with the hardware here, f.e. if the controller 889 * is just sending the last packet with current speed while 890 * the LAP is already switching the speed using synchronous 891 * len=0 packet. Immediate execution would lead to hw lockup 892 * requiring a powercycle to reset. Good candidate to trigger 893 * this is the final UA:RSP packet after receiving a DISC:CMD 894 * when getting the LAP down. 895 * Note that we are not protected by the queue_stop approach 896 * because the final UA:RSP arrives _without_ request to apply 897 * new-speed-after-this-packet - hence the driver doesn't know 898 * this was the last packet and doesn't stop the queue. So the 899 * forced switch to default speed from LAP gets through as fast 900 * as only some 10 usec later while the UA:RSP is still processed 901 * by the hardware and we would get screwed. 902 */ 903 904 if (ring_first(idev->tx_ring) == NULL) { 905 /* no race - tx-ring already empty */ 906 vlsi_set_baud(idev, iobase); 907 netif_wake_queue(ndev); 908 } 909 else 910 ; 911 /* keep the speed change pending like it would 912 * for any len>0 packet. tx completion interrupt 913 * will apply it when the tx ring becomes empty. 914 */ 915 spin_unlock_irqrestore(&idev->lock, flags); 916 dev_kfree_skb_any(skb); 917 return 0; 918 } 919 920 /* sanity checks - simply drop the packet */ 921 922 rd = ring_last(r); 923 if (!rd) { 924 msg = "ring full, but queue wasn't stopped"; 925 goto drop_unlock; 926 } 927 928 if (rd_is_active(rd)) { 929 msg = "entry still owned by hw"; 930 goto drop_unlock; 931 } 932 933 if (!rd->buf) { 934 msg = "tx ring entry without pci buffer"; 935 goto drop_unlock; 936 } 937 938 if (rd->skb) { 939 msg = "ring entry with old skb still attached"; 940 goto drop_unlock; 941 } 942 943 /* no need for serialization or interrupt disable during mtt */ 944 spin_unlock_irqrestore(&idev->lock, flags); 945 946 if ((mtt = irda_get_mtt(skb)) > 0) { 947 948 ready.tv_usec = idev->last_rx.tv_usec + mtt; 949 ready.tv_sec = idev->last_rx.tv_sec; 950 if (ready.tv_usec >= 1000000) { 951 ready.tv_usec -= 1000000; 952 ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */ 953 } 954 for(;;) { 955 do_gettimeofday(&now); 956 if (now.tv_sec > ready.tv_sec 957 || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 958 break; 959 udelay(100); 960 /* must not sleep here - called under netif_tx_lock! */ 961 } 962 } 963 964 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 965 * after subsequent tx-completion 966 */ 967 968 if (idev->mode == IFF_SIR) { 969 status |= RD_TX_DISCRC; /* no hw-crc creation */ 970 len = async_wrap_skb(skb, rd->buf, r->len); 971 972 /* Some rare worst case situation in SIR mode might lead to 973 * potential buffer overflow. The wrapper detects this, returns 974 * with a shortened frame (without FCS/EOF) but doesn't provide 975 * any error indication about the invalid packet which we are 976 * going to transmit. 977 * Therefore we log if the buffer got filled to the point, where the 978 * wrapper would abort, i.e. when there are less than 5 bytes left to 979 * allow appending the FCS/EOF. 980 */ 981 982 if (len >= r->len-5) 983 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", 984 __FUNCTION__); 985 } 986 else { 987 /* hw deals with MIR/FIR mode wrapping */ 988 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 989 len = skb->len; 990 if (len > r->len) { 991 msg = "frame exceeds tx buffer length"; 992 goto drop; 993 } 994 else 995 skb_copy_from_linear_data(skb, rd->buf, len); 996 } 997 998 rd->skb = skb; /* remember skb for tx-complete stats */ 999 1000 rd_set_count(rd, len); 1001 rd_set_status(rd, status); /* not yet active! */ 1002 1003 /* give dma buffer back to busmaster-hw (flush caches to make 1004 * CPU-driven changes visible from the pci bus). 1005 */ 1006 1007 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 1008 1009/* Switching to TX mode here races with the controller 1010 * which may stop TX at any time when fetching an inactive descriptor 1011 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 1012 * _after_ the new descriptor was activated on the ring. This ensures 1013 * we will either find TX already stopped or we can be sure, there 1014 * will be a TX-complete interrupt even if the chip stopped doing 1015 * TX just after we found it still running. The ISR will then find 1016 * the non-empty ring and restart TX processing. The enclosing 1017 * spinlock provides the correct serialization to prevent race with isr. 1018 */ 1019 1020 spin_lock_irqsave(&idev->lock,flags); 1021 1022 rd_activate(rd); 1023 1024 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1025 int fifocnt; 1026 1027 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1028 if (fifocnt != 0) { 1029 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); 1030 } 1031 1032 config = inw(iobase+VLSI_PIO_IRCFG); 1033 mb(); 1034 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1035 wmb(); 1036 outw(0, iobase+VLSI_PIO_PROMPT); 1037 } 1038 ndev->trans_start = jiffies; 1039 1040 if (ring_put(r) == NULL) { 1041 netif_stop_queue(ndev); 1042 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__); 1043 } 1044 spin_unlock_irqrestore(&idev->lock, flags); 1045 1046 return 0; 1047 1048drop_unlock: 1049 spin_unlock_irqrestore(&idev->lock, flags); 1050drop: 1051 IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg); 1052 dev_kfree_skb_any(skb); 1053 idev->stats.tx_errors++; 1054 idev->stats.tx_dropped++; 1055 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1056 * In fact any retval!=0 causes the packet scheduler to requeue the 1057 * packet for later retry of transmission - which isn't exactly 1058 * what we want after we've just called dev_kfree_skb_any ;-) 1059 */ 1060 return 0; 1061} 1062 1063static void vlsi_tx_interrupt(struct net_device *ndev) 1064{ 1065 vlsi_irda_dev_t *idev = ndev->priv; 1066 struct vlsi_ring *r = idev->tx_ring; 1067 struct ring_descr *rd; 1068 unsigned iobase; 1069 int ret; 1070 u16 config; 1071 1072 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1073 1074 if (rd_is_active(rd)) 1075 break; 1076 1077 ret = vlsi_process_tx(r, rd); 1078 1079 if (ret < 0) { 1080 ret = -ret; 1081 idev->stats.tx_errors++; 1082 if (ret & VLSI_TX_DROP) 1083 idev->stats.tx_dropped++; 1084 if (ret & VLSI_TX_FIFO) 1085 idev->stats.tx_fifo_errors++; 1086 } 1087 else if (ret > 0){ 1088 idev->stats.tx_packets++; 1089 idev->stats.tx_bytes += ret; 1090 } 1091 } 1092 1093 iobase = ndev->base_addr; 1094 1095 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1096 vlsi_set_baud(idev, iobase); 1097 1098 config = inw(iobase+VLSI_PIO_IRCFG); 1099 if (rd == NULL) /* tx ring empty: re-enable rx */ 1100 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1101 1102 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1103 int fifocnt; 1104 1105 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1106 if (fifocnt != 0) { 1107 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", 1108 __FUNCTION__, fifocnt); 1109 } 1110 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1111 } 1112 1113 outw(0, iobase+VLSI_PIO_PROMPT); 1114 1115 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1116 netif_wake_queue(ndev); 1117 IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__); 1118 } 1119} 1120 1121/* caller must have stopped the controller from busmastering */ 1122 1123static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1124{ 1125 struct vlsi_ring *r = idev->tx_ring; 1126 struct ring_descr *rd; 1127 int ret; 1128 1129 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1130 1131 ret = 0; 1132 if (rd_is_active(rd)) { 1133 rd_set_status(rd, 0); 1134 rd_set_count(rd, 0); 1135 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1136 if (rd->skb) { 1137 dev_kfree_skb_any(rd->skb); 1138 rd->skb = NULL; 1139 } 1140 IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__); 1141 ret = -VLSI_TX_DROP; 1142 } 1143 else 1144 ret = vlsi_process_tx(r, rd); 1145 1146 if (ret < 0) { 1147 ret = -ret; 1148 idev->stats.tx_errors++; 1149 if (ret & VLSI_TX_DROP) 1150 idev->stats.tx_dropped++; 1151 if (ret & VLSI_TX_FIFO) 1152 idev->stats.tx_fifo_errors++; 1153 } 1154 else if (ret > 0){ 1155 idev->stats.tx_packets++; 1156 idev->stats.tx_bytes += ret; 1157 } 1158 } 1159 1160} 1161 1162/********************************************************/ 1163 1164static int vlsi_start_clock(struct pci_dev *pdev) 1165{ 1166 u8 clkctl, lock; 1167 int i, count; 1168 1169 if (clksrc < 2) { /* auto or PLL: try PLL */ 1170 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1171 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1172 1173 /* procedure to detect PLL lock synchronisation: 1174 * after 0.5 msec initial delay we expect to find 3 PLL lock 1175 * indications within 10 msec for successful PLL detection. 1176 */ 1177 udelay(500); 1178 count = 0; 1179 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1180 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1181 if (lock&CLKCTL_LOCK) { 1182 if (++count >= 3) 1183 break; 1184 } 1185 udelay(50); 1186 } 1187 if (count < 3) { 1188 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1189 IRDA_ERROR("%s: no PLL or failed to lock!\n", 1190 __FUNCTION__); 1191 clkctl = CLKCTL_CLKSTP; 1192 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1193 return -1; 1194 } 1195 else /* was: clksrc=0(auto) */ 1196 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1197 1198 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", 1199 __FUNCTION__, clksrc); 1200 } 1201 else 1202 clksrc = 1; /* got successful PLL lock */ 1203 } 1204 1205 if (clksrc != 1) { 1206 /* we get here if either no PLL detected in auto-mode or 1207 an external clock source was explicitly specified */ 1208 1209 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1210 if (clksrc == 3) 1211 clkctl |= CLKCTL_XCKSEL; 1212 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1213 1214 /* no way to test for working XCLK */ 1215 } 1216 else 1217 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1218 1219 /* ok, now going to connect the chip with the clock source */ 1220 1221 clkctl &= ~CLKCTL_CLKSTP; 1222 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1223 1224 return 0; 1225} 1226 1227static void vlsi_stop_clock(struct pci_dev *pdev) 1228{ 1229 u8 clkctl; 1230 1231 /* disconnect chip from clock source */ 1232 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1233 clkctl |= CLKCTL_CLKSTP; 1234 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1235 1236 /* disable all clock sources */ 1237 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1238 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1239} 1240 1241/********************************************************/ 1242 1243/* writing all-zero to the VLSI PCI IO register area seems to prevent 1244 * some occasional situations where the hardware fails (symptoms are 1245 * what appears as stalled tx/rx state machines, i.e. everything ok for 1246 * receive or transmit but hw makes no progress or is unable to access 1247 * the bus memory locations). 1248 * Best place to call this is immediately after/before the internal clock 1249 * gets started/stopped. 1250 */ 1251 1252static inline void vlsi_clear_regs(unsigned iobase) 1253{ 1254 unsigned i; 1255 const unsigned chip_io_extent = 32; 1256 1257 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1258 outw(0, iobase + i); 1259} 1260 1261static int vlsi_init_chip(struct pci_dev *pdev) 1262{ 1263 struct net_device *ndev = pci_get_drvdata(pdev); 1264 vlsi_irda_dev_t *idev = ndev->priv; 1265 unsigned iobase; 1266 u16 ptr; 1267 1268 /* start the clock and clean the registers */ 1269 1270 if (vlsi_start_clock(pdev)) { 1271 IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__); 1272 return -1; 1273 } 1274 iobase = ndev->base_addr; 1275 vlsi_clear_regs(iobase); 1276 1277 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1278 1279 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1280 1281 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1282 1283 outw(0, iobase+VLSI_PIO_IRCFG); 1284 wmb(); 1285 1286 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1287 1288 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1289 1290 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1291 iobase+VLSI_PIO_RINGSIZE); 1292 1293 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1294 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1295 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1296 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1297 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1298 1299 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1300 1301 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1302 wmb(); 1303 1304 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1305 * basically every received pulse fires an ACTIVITY-INT 1306 * leading to >>1000 INT's per second instead of few 10 1307 */ 1308 1309 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1310 1311 return 0; 1312} 1313 1314static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1315{ 1316 struct pci_dev *pdev = idev->pdev; 1317 struct net_device *ndev = pci_get_drvdata(pdev); 1318 unsigned iobase = ndev->base_addr; 1319 u8 byte; 1320 1321 /* we don't use the legacy UART, disable its address decoding */ 1322 1323 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1324 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1325 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1326 1327 /* enable PCI busmaster access to our 16MB page */ 1328 1329 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1330 pci_set_master(pdev); 1331 1332 if (vlsi_init_chip(pdev) < 0) { 1333 pci_disable_device(pdev); 1334 return -1; 1335 } 1336 1337 vlsi_fill_rx(idev->rx_ring); 1338 1339 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1340 1341 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1342 1343 return 0; 1344} 1345 1346static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1347{ 1348 struct pci_dev *pdev = idev->pdev; 1349 struct net_device *ndev = pci_get_drvdata(pdev); 1350 unsigned iobase = ndev->base_addr; 1351 unsigned long flags; 1352 1353 spin_lock_irqsave(&idev->lock,flags); 1354 outw(0, iobase+VLSI_PIO_IRENABLE); 1355 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1356 1357 /* disable and w/c irqs */ 1358 outb(0, iobase+VLSI_PIO_IRINTR); 1359 wmb(); 1360 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1361 spin_unlock_irqrestore(&idev->lock,flags); 1362 1363 vlsi_unarm_tx(idev); 1364 vlsi_unarm_rx(idev); 1365 1366 vlsi_clear_regs(iobase); 1367 vlsi_stop_clock(pdev); 1368 1369 pci_disable_device(pdev); 1370 1371 return 0; 1372} 1373 1374/**************************************************************/ 1375 1376static struct net_device_stats * vlsi_get_stats(struct net_device *ndev) 1377{ 1378 vlsi_irda_dev_t *idev = ndev->priv; 1379 1380 return &idev->stats; 1381} 1382 1383static void vlsi_tx_timeout(struct net_device *ndev) 1384{ 1385 vlsi_irda_dev_t *idev = ndev->priv; 1386 1387 1388 vlsi_reg_debug(ndev->base_addr, __FUNCTION__); 1389 vlsi_ring_debug(idev->tx_ring); 1390 1391 if (netif_running(ndev)) 1392 netif_stop_queue(ndev); 1393 1394 vlsi_stop_hw(idev); 1395 1396 /* now simply restart the whole thing */ 1397 1398 if (!idev->new_baud) 1399 idev->new_baud = idev->baud; /* keep current baudrate */ 1400 1401 if (vlsi_start_hw(idev)) 1402 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", 1403 __FUNCTION__, pci_name(idev->pdev), ndev->name); 1404 else 1405 netif_start_queue(ndev); 1406} 1407 1408static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1409{ 1410 vlsi_irda_dev_t *idev = ndev->priv; 1411 struct if_irda_req *irq = (struct if_irda_req *) rq; 1412 unsigned long flags; 1413 u16 fifocnt; 1414 int ret = 0; 1415 1416 switch (cmd) { 1417 case SIOCSBANDWIDTH: 1418 if (!capable(CAP_NET_ADMIN)) { 1419 ret = -EPERM; 1420 break; 1421 } 1422 spin_lock_irqsave(&idev->lock, flags); 1423 idev->new_baud = irq->ifr_baudrate; 1424 /* when called from userland there might be a minor race window here 1425 * if the stack tries to change speed concurrently - which would be 1426 * pretty strange anyway with the userland having full control... 1427 */ 1428 vlsi_set_baud(idev, ndev->base_addr); 1429 spin_unlock_irqrestore(&idev->lock, flags); 1430 break; 1431 case SIOCSMEDIABUSY: 1432 if (!capable(CAP_NET_ADMIN)) { 1433 ret = -EPERM; 1434 break; 1435 } 1436 irda_device_set_media_busy(ndev, TRUE); 1437 break; 1438 case SIOCGRECEIVING: 1439 /* the best we can do: check whether there are any bytes in rx fifo. 1440 * The trustable window (in case some data arrives just afterwards) 1441 * may be as short as 1usec or so at 4Mbps. 1442 */ 1443 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1444 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1445 break; 1446 default: 1447 IRDA_WARNING("%s: notsupp - cmd=%04x\n", 1448 __FUNCTION__, cmd); 1449 ret = -EOPNOTSUPP; 1450 } 1451 1452 return ret; 1453} 1454 1455/********************************************************/ 1456 1457static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1458{ 1459 struct net_device *ndev = dev_instance; 1460 vlsi_irda_dev_t *idev = ndev->priv; 1461 unsigned iobase; 1462 u8 irintr; 1463 int boguscount = 5; 1464 unsigned long flags; 1465 int handled = 0; 1466 1467 iobase = ndev->base_addr; 1468 spin_lock_irqsave(&idev->lock,flags); 1469 do { 1470 irintr = inb(iobase+VLSI_PIO_IRINTR); 1471 mb(); 1472 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1473 1474 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1475 break; 1476 1477 handled = 1; 1478 1479 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1480 break; /* nothing todo if only activity */ 1481 1482 if (irintr&IRINTR_RPKTINT) 1483 vlsi_rx_interrupt(ndev); 1484 1485 if (irintr&IRINTR_TPKTINT) 1486 vlsi_tx_interrupt(ndev); 1487 1488 } while (--boguscount > 0); 1489 spin_unlock_irqrestore(&idev->lock,flags); 1490 1491 if (boguscount <= 0) 1492 IRDA_MESSAGE("%s: too much work in interrupt!\n", 1493 __FUNCTION__); 1494 return IRQ_RETVAL(handled); 1495} 1496 1497/********************************************************/ 1498 1499static int vlsi_open(struct net_device *ndev) 1500{ 1501 vlsi_irda_dev_t *idev = ndev->priv; 1502 int err = -EAGAIN; 1503 char hwname[32]; 1504 1505 if (pci_request_regions(idev->pdev, drivername)) { 1506 IRDA_WARNING("%s: io resource busy\n", __FUNCTION__); 1507 goto errout; 1508 } 1509 ndev->base_addr = pci_resource_start(idev->pdev,0); 1510 ndev->irq = idev->pdev->irq; 1511 1512 /* under some rare occasions the chip apparently comes up with 1513 * IRQ's pending. We better w/c pending IRQ and disable them all 1514 */ 1515 1516 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1517 1518 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1519 drivername, ndev)) { 1520 IRDA_WARNING("%s: couldn't get IRQ: %d\n", 1521 __FUNCTION__, ndev->irq); 1522 goto errout_io; 1523 } 1524 1525 if ((err = vlsi_create_hwif(idev)) != 0) 1526 goto errout_irq; 1527 1528 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1529 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1530 if (!idev->irlap) 1531 goto errout_free_ring; 1532 1533 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1534 1535 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1536 1537 if ((err = vlsi_start_hw(idev)) != 0) 1538 goto errout_close_irlap; 1539 1540 netif_start_queue(ndev); 1541 1542 IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name); 1543 1544 return 0; 1545 1546errout_close_irlap: 1547 irlap_close(idev->irlap); 1548errout_free_ring: 1549 vlsi_destroy_hwif(idev); 1550errout_irq: 1551 free_irq(ndev->irq,ndev); 1552errout_io: 1553 pci_release_regions(idev->pdev); 1554errout: 1555 return err; 1556} 1557 1558static int vlsi_close(struct net_device *ndev) 1559{ 1560 vlsi_irda_dev_t *idev = ndev->priv; 1561 1562 netif_stop_queue(ndev); 1563 1564 if (idev->irlap) 1565 irlap_close(idev->irlap); 1566 idev->irlap = NULL; 1567 1568 vlsi_stop_hw(idev); 1569 1570 vlsi_destroy_hwif(idev); 1571 1572 free_irq(ndev->irq,ndev); 1573 1574 pci_release_regions(idev->pdev); 1575 1576 IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name); 1577 1578 return 0; 1579} 1580 1581static int vlsi_irda_init(struct net_device *ndev) 1582{ 1583 vlsi_irda_dev_t *idev = ndev->priv; 1584 struct pci_dev *pdev = idev->pdev; 1585 1586 SET_MODULE_OWNER(ndev); 1587 1588 ndev->irq = pdev->irq; 1589 ndev->base_addr = pci_resource_start(pdev,0); 1590 1591 /* PCI busmastering 1592 * see include file for details why we need these 2 masks, in this order! 1593 */ 1594 1595 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) 1596 || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1597 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__); 1598 return -1; 1599 } 1600 1601 irda_init_max_qos_capabilies(&idev->qos); 1602 1603 /* the VLSI82C147 does not support 576000! */ 1604 1605 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1606 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1607 | IR_1152000 | (IR_4000000 << 8); 1608 1609 idev->qos.min_turn_time.bits = qos_mtt_bits; 1610 1611 irda_qos_bits_to_value(&idev->qos); 1612 1613 /* currently no public media definitions for IrDA */ 1614 1615 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1616 ndev->if_port = IF_PORT_UNKNOWN; 1617 1618 ndev->open = vlsi_open; 1619 ndev->stop = vlsi_close; 1620 ndev->get_stats = vlsi_get_stats; 1621 ndev->hard_start_xmit = vlsi_hard_start_xmit; 1622 ndev->do_ioctl = vlsi_ioctl; 1623 ndev->tx_timeout = vlsi_tx_timeout; 1624 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1625 1626 SET_NETDEV_DEV(ndev, &pdev->dev); 1627 1628 return 0; 1629} 1630 1631/**************************************************************/ 1632 1633static int __devinit 1634vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1635{ 1636 struct net_device *ndev; 1637 vlsi_irda_dev_t *idev; 1638 1639 if (pci_enable_device(pdev)) 1640 goto out; 1641 else 1642 pdev->current_state = 0; /* hw must be running now */ 1643 1644 IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n", 1645 drivername, pci_name(pdev)); 1646 1647 if ( !pci_resource_start(pdev,0) 1648 || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1649 IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__); 1650 goto out_disable; 1651 } 1652 1653 ndev = alloc_irdadev(sizeof(*idev)); 1654 if (ndev==NULL) { 1655 IRDA_ERROR("%s: Unable to allocate device memory.\n", 1656 __FUNCTION__); 1657 goto out_disable; 1658 } 1659 1660 idev = ndev->priv; 1661 1662 spin_lock_init(&idev->lock); 1663 init_MUTEX(&idev->sem); 1664 down(&idev->sem); 1665 idev->pdev = pdev; 1666 1667 if (vlsi_irda_init(ndev) < 0) 1668 goto out_freedev; 1669 1670 if (register_netdev(ndev) < 0) { 1671 IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__); 1672 goto out_freedev; 1673 } 1674 1675 if (vlsi_proc_root != NULL) { 1676 struct proc_dir_entry *ent; 1677 1678 ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root); 1679 if (!ent) { 1680 IRDA_WARNING("%s: failed to create proc entry\n", 1681 __FUNCTION__); 1682 } else { 1683 ent->data = ndev; 1684 ent->proc_fops = VLSI_PROC_FOPS; 1685 ent->size = 0; 1686 } 1687 idev->proc_entry = ent; 1688 } 1689 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); 1690 1691 pci_set_drvdata(pdev, ndev); 1692 up(&idev->sem); 1693 1694 return 0; 1695 1696out_freedev: 1697 up(&idev->sem); 1698 free_netdev(ndev); 1699out_disable: 1700 pci_disable_device(pdev); 1701out: 1702 pci_set_drvdata(pdev, NULL); 1703 return -ENODEV; 1704} 1705 1706static void __devexit vlsi_irda_remove(struct pci_dev *pdev) 1707{ 1708 struct net_device *ndev = pci_get_drvdata(pdev); 1709 vlsi_irda_dev_t *idev; 1710 1711 if (!ndev) { 1712 IRDA_ERROR("%s: lost netdevice?\n", drivername); 1713 return; 1714 } 1715 1716 unregister_netdev(ndev); 1717 1718 idev = ndev->priv; 1719 down(&idev->sem); 1720 if (idev->proc_entry) { 1721 remove_proc_entry(ndev->name, vlsi_proc_root); 1722 idev->proc_entry = NULL; 1723 } 1724 up(&idev->sem); 1725 1726 free_netdev(ndev); 1727 1728 pci_set_drvdata(pdev, NULL); 1729 1730 IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev)); 1731} 1732 1733#ifdef CONFIG_PM 1734 1735/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1736 * Some of the Linux PCI-PM code however depends on this, for example in 1737 * pci_set_power_state(). So we have to take care to perform the required 1738 * operations on our own (particularly reflecting the pdev->current_state) 1739 * otherwise we might get cheated by pci-pm. 1740 */ 1741 1742 1743static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1744{ 1745 struct net_device *ndev = pci_get_drvdata(pdev); 1746 vlsi_irda_dev_t *idev; 1747 1748 if (!ndev) { 1749 IRDA_ERROR("%s - %s: no netdevice \n", 1750 __FUNCTION__, pci_name(pdev)); 1751 return 0; 1752 } 1753 idev = ndev->priv; 1754 down(&idev->sem); 1755 if (pdev->current_state != 0) { /* already suspended */ 1756 if (state.event > pdev->current_state) { /* simply go deeper */ 1757 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1758 pdev->current_state = state.event; 1759 } 1760 else 1761 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); 1762 up(&idev->sem); 1763 return 0; 1764 } 1765 1766 if (netif_running(ndev)) { 1767 netif_device_detach(ndev); 1768 vlsi_stop_hw(idev); 1769 pci_save_state(pdev); 1770 if (!idev->new_baud) 1771 /* remember speed settings to restore on resume */ 1772 idev->new_baud = idev->baud; 1773 } 1774 1775 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1776 pdev->current_state = state.event; 1777 idev->resume_ok = 1; 1778 up(&idev->sem); 1779 return 0; 1780} 1781 1782static int vlsi_irda_resume(struct pci_dev *pdev) 1783{ 1784 struct net_device *ndev = pci_get_drvdata(pdev); 1785 vlsi_irda_dev_t *idev; 1786 1787 if (!ndev) { 1788 IRDA_ERROR("%s - %s: no netdevice \n", 1789 __FUNCTION__, pci_name(pdev)); 1790 return 0; 1791 } 1792 idev = ndev->priv; 1793 down(&idev->sem); 1794 if (pdev->current_state == 0) { 1795 up(&idev->sem); 1796 IRDA_WARNING("%s - %s: already resumed\n", 1797 __FUNCTION__, pci_name(pdev)); 1798 return 0; 1799 } 1800 1801 pci_set_power_state(pdev, PCI_D0); 1802 pdev->current_state = PM_EVENT_ON; 1803 1804 if (!idev->resume_ok) { 1805 /* should be obsolete now - but used to happen due to: 1806 * - pci layer initially setting pdev->current_state = 4 (unknown) 1807 * - pci layer did not walk the save_state-tree (might be APM problem) 1808 * so we could not refuse to suspend from undefined state 1809 * - vlsi_irda_suspend detected invalid state and refused to save 1810 * configuration for resume - but was too late to stop suspending 1811 * - vlsi_irda_resume got screwed when trying to resume from garbage 1812 * 1813 * now we explicitly set pdev->current_state = 0 after enabling the 1814 * device and independently resume_ok should catch any garbage config. 1815 */ 1816 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); 1817 up(&idev->sem); 1818 return 0; 1819 } 1820 1821 if (netif_running(ndev)) { 1822 pci_restore_state(pdev); 1823 vlsi_start_hw(idev); 1824 netif_device_attach(ndev); 1825 } 1826 idev->resume_ok = 0; 1827 up(&idev->sem); 1828 return 0; 1829} 1830 1831#endif /* CONFIG_PM */ 1832 1833/*********************************************************/ 1834 1835static struct pci_driver vlsi_irda_driver = { 1836 .name = drivername, 1837 .id_table = vlsi_irda_table, 1838 .probe = vlsi_irda_probe, 1839 .remove = __devexit_p(vlsi_irda_remove), 1840#ifdef CONFIG_PM 1841 .suspend = vlsi_irda_suspend, 1842 .resume = vlsi_irda_resume, 1843#endif 1844}; 1845 1846#define PROC_DIR ("driver/" DRIVER_NAME) 1847 1848static int __init vlsi_mod_init(void) 1849{ 1850 int i, ret; 1851 1852 if (clksrc < 0 || clksrc > 3) { 1853 IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc); 1854 return -1; 1855 } 1856 1857 for (i = 0; i < 2; i++) { 1858 switch(ringsize[i]) { 1859 case 4: 1860 case 8: 1861 case 16: 1862 case 32: 1863 case 64: 1864 break; 1865 default: 1866 IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]); 1867 ringsize[i] = 8; 1868 break; 1869 } 1870 } 1871 1872 sirpulse = !!sirpulse; 1873 1874 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1875 * Failure to create the procfs entry is handled like running 1876 * without procfs - it's not required for the driver to work. 1877 */ 1878 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1879 if (vlsi_proc_root) { 1880 /* protect registered procdir against module removal. 1881 * Because we are in the module init path there's no race 1882 * window after create_proc_entry (and no barrier needed). 1883 */ 1884 vlsi_proc_root->owner = THIS_MODULE; 1885 } 1886 1887 ret = pci_register_driver(&vlsi_irda_driver); 1888 1889 if (ret && vlsi_proc_root) 1890 remove_proc_entry(PROC_DIR, NULL); 1891 return ret; 1892 1893} 1894 1895static void __exit vlsi_mod_exit(void) 1896{ 1897 pci_unregister_driver(&vlsi_irda_driver); 1898 if (vlsi_proc_root) 1899 remove_proc_entry(PROC_DIR, NULL); 1900} 1901 1902module_init(vlsi_mod_init); 1903module_exit(vlsi_mod_exit); 1904