1/********************************************************************* 2 * 3 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 4 * 5 * Copyright (c) 2001-2003 Martin Diehl 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 20 * MA 02111-1307 USA 21 * 22 ********************************************************************/ 23 24#include <linux/module.h> 25 26#define DRIVER_NAME "vlsi_ir" 27#define DRIVER_VERSION "v0.5" 28#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 29#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 30 31MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 32MODULE_AUTHOR(DRIVER_AUTHOR); 33MODULE_LICENSE("GPL"); 34 35/********************************************************/ 36 37#include <linux/kernel.h> 38#include <linux/init.h> 39#include <linux/pci.h> 40#include <linux/slab.h> 41#include <linux/netdevice.h> 42#include <linux/skbuff.h> 43#include <linux/delay.h> 44#include <linux/time.h> 45#include <linux/proc_fs.h> 46#include <linux/seq_file.h> 47#include <linux/mutex.h> 48#include <asm/uaccess.h> 49#include <asm/byteorder.h> 50 51#include <net/irda/irda.h> 52#include <net/irda/irda_device.h> 53#include <net/irda/wrapper.h> 54#include <net/irda/crc.h> 55 56#include "vlsi_ir.h" 57 58/********************************************************/ 59 60static /* const */ char drivername[] = DRIVER_NAME; 61 62static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = { 63 { 64 .class = PCI_CLASS_WIRELESS_IRDA << 8, 65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 66 .vendor = PCI_VENDOR_ID_VLSI, 67 .device = PCI_DEVICE_ID_VLSI_82C147, 68 .subvendor = PCI_ANY_ID, 69 .subdevice = PCI_ANY_ID, 70 }, 71 { /* all zeroes */ } 72}; 73 74MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 75 76/********************************************************/ 77 78/* clksrc: which clock source to be used 79 * 0: auto - try PLL, fallback to 40MHz XCLK 80 * 1: on-chip 48MHz PLL 81 * 2: external 48MHz XCLK 82 * 3: external 40MHz XCLK (HP OB-800) 83 */ 84 85static int clksrc = 0; /* default is 0(auto) */ 86module_param(clksrc, int, 0); 87MODULE_PARM_DESC(clksrc, "clock input source selection"); 88 89/* ringsize: size of the tx and rx descriptor rings 90 * independent for tx and rx 91 * specify as ringsize=tx[,rx] 92 * allowed values: 4, 8, 16, 32, 64 93 * Due to the IrDA 1.x max. allowed window size=7, 94 * there should be no gain when using rings larger than 8 95 */ 96 97static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 98module_param_array(ringsize, int, NULL, 0); 99MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 100 101/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 102 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 103 * 1: nominal 3/16 bittime width 104 * note: IrDA compliant peer devices should be happy regardless 105 * which one is used. Primary goal is to save some power 106 * on the sender's side - at 9.6kbaud for example the short 107 * pulse width saves more than 90% of the transmitted IR power. 108 */ 109 110static int sirpulse = 1; /* default is 3/16 bittime */ 111module_param(sirpulse, int, 0); 112MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 113 114/* qos_mtt_bits: encoded min-turn-time value we require the peer device 115 * to use before transmitting to us. "Type 1" (per-station) 116 * bitfield according to IrLAP definition (section 6.6.8) 117 * Don't know which transceiver is used by my OB800 - the 118 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 119 */ 120 121static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 122module_param(qos_mtt_bits, int, 0); 123MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 124 125/********************************************************/ 126 127static void vlsi_reg_debug(unsigned iobase, const char *s) 128{ 129 int i; 130 131 printk(KERN_DEBUG "%s: ", s); 132 for (i = 0; i < 0x20; i++) 133 printk("%02x", (unsigned)inb((iobase+i))); 134 printk("\n"); 135} 136 137static void vlsi_ring_debug(struct vlsi_ring *r) 138{ 139 struct ring_descr *rd; 140 unsigned i; 141 142 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 143 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 144 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, 145 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 146 for (i = 0; i < r->size; i++) { 147 rd = &r->rd[i]; 148 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); 149 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 150 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 151 __func__, (unsigned) rd_get_status(rd), 152 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 153 } 154} 155 156/********************************************************/ 157 158/* needed regardless of CONFIG_PROC_FS */ 159static struct proc_dir_entry *vlsi_proc_root = NULL; 160 161#ifdef CONFIG_PROC_FS 162 163static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 164{ 165 unsigned iobase = pci_resource_start(pdev, 0); 166 unsigned i; 167 168 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n", 169 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 170 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 171 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 172 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 173 seq_printf(seq, "hw registers: "); 174 for (i = 0; i < 0x20; i++) 175 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 176 seq_printf(seq, "\n"); 177} 178 179static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 180{ 181 vlsi_irda_dev_t *idev = netdev_priv(ndev); 182 u8 byte; 183 u16 word; 184 unsigned delta1, delta2; 185 struct timeval now; 186 unsigned iobase = ndev->base_addr; 187 188 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 189 netif_device_present(ndev) ? "attached" : "detached", 190 netif_running(ndev) ? "running" : "not running", 191 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 192 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 193 194 if (!netif_running(ndev)) 195 return; 196 197 seq_printf(seq, "\nhw-state:\n"); 198 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 199 seq_printf(seq, "IRMISC:%s%s%s uart%s", 200 (byte&IRMISC_IRRAIL) ? " irrail" : "", 201 (byte&IRMISC_IRPD) ? " irpd" : "", 202 (byte&IRMISC_UARTTST) ? " uarttest" : "", 203 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 204 if (byte&IRMISC_UARTEN) { 205 seq_printf(seq, "0x%s\n", 206 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 207 : ((byte&1) ? "3f8" : "2f8")); 208 } 209 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 210 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 211 (byte&CLKCTL_PD_INV) ? "powered" : "down", 212 (byte&CLKCTL_LOCK) ? " locked" : "", 213 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 214 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 215 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 216 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 217 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 218 219 byte = inb(iobase+VLSI_PIO_IRINTR); 220 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 221 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 222 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 223 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 224 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 225 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 226 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 227 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 228 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 229 word = inw(iobase+VLSI_PIO_RINGPTR); 230 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 231 word = inw(iobase+VLSI_PIO_RINGBASE); 232 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 233 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 234 word = inw(iobase+VLSI_PIO_RINGSIZE); 235 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 236 RINGSIZE_TO_TXSIZE(word)); 237 238 word = inw(iobase+VLSI_PIO_IRCFG); 239 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 240 (word&IRCFG_LOOP) ? " LOOP" : "", 241 (word&IRCFG_ENTX) ? " ENTX" : "", 242 (word&IRCFG_ENRX) ? " ENRX" : "", 243 (word&IRCFG_MSTR) ? " MSTR" : "", 244 (word&IRCFG_RXANY) ? " RXANY" : "", 245 (word&IRCFG_CRC16) ? " CRC16" : "", 246 (word&IRCFG_FIR) ? " FIR" : "", 247 (word&IRCFG_MIR) ? " MIR" : "", 248 (word&IRCFG_SIR) ? " SIR" : "", 249 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 250 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 251 (word&IRCFG_TXPOL) ? " TXPOL" : "", 252 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 253 word = inw(iobase+VLSI_PIO_IRENABLE); 254 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 255 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 256 (word&IRENABLE_CFGER) ? " CFGERR" : "", 257 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 258 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 259 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 260 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 261 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 262 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 263 word = inw(iobase+VLSI_PIO_PHYCTL); 264 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 265 (unsigned)PHYCTL_TO_BAUD(word), 266 (unsigned)PHYCTL_TO_PLSWID(word), 267 (unsigned)PHYCTL_TO_PREAMB(word)); 268 word = inw(iobase+VLSI_PIO_NPHYCTL); 269 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 270 (unsigned)PHYCTL_TO_BAUD(word), 271 (unsigned)PHYCTL_TO_PLSWID(word), 272 (unsigned)PHYCTL_TO_PREAMB(word)); 273 word = inw(iobase+VLSI_PIO_MAXPKT); 274 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 275 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 276 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 277 278 seq_printf(seq, "\nsw-state:\n"); 279 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 280 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 281 do_gettimeofday(&now); 282 if (now.tv_usec >= idev->last_rx.tv_usec) { 283 delta2 = now.tv_usec - idev->last_rx.tv_usec; 284 delta1 = 0; 285 } 286 else { 287 delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec; 288 delta1 = 1; 289 } 290 seq_printf(seq, "last rx: %lu.%06u sec\n", 291 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2); 292 293 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 294 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 295 ndev->stats.rx_dropped); 296 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 297 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 298 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 299 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 300 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 301 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 302 303} 304 305static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 306{ 307 struct ring_descr *rd; 308 unsigned i, j; 309 int h, t; 310 311 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 312 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 313 h = atomic_read(&r->head) & r->mask; 314 t = atomic_read(&r->tail) & r->mask; 315 seq_printf(seq, "head = %d / tail = %d ", h, t); 316 if (h == t) 317 seq_printf(seq, "(empty)\n"); 318 else { 319 if (((t+1)&r->mask) == h) 320 seq_printf(seq, "(full)\n"); 321 else 322 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 323 rd = &r->rd[h]; 324 j = (unsigned) rd_get_count(rd); 325 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 326 h, (unsigned)rd_get_status(rd), j); 327 if (j > 0) { 328 seq_printf(seq, " data:"); 329 if (j > 20) 330 j = 20; 331 for (i = 0; i < j; i++) 332 seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]); 333 seq_printf(seq, "\n"); 334 } 335 } 336 for (i = 0; i < r->size; i++) { 337 rd = &r->rd[i]; 338 seq_printf(seq, "> ring descr %u: ", i); 339 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 340 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 341 (unsigned) rd_get_status(rd), 342 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 343 } 344} 345 346static int vlsi_seq_show(struct seq_file *seq, void *v) 347{ 348 struct net_device *ndev = seq->private; 349 vlsi_irda_dev_t *idev = netdev_priv(ndev); 350 unsigned long flags; 351 352 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 353 seq_printf(seq, "clksrc: %s\n", 354 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 355 : ((clksrc==1)?"48MHz PLL":"autodetect")); 356 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 357 ringsize[0], ringsize[1]); 358 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 359 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 360 361 spin_lock_irqsave(&idev->lock, flags); 362 if (idev->pdev != NULL) { 363 vlsi_proc_pdev(seq, idev->pdev); 364 365 if (idev->pdev->current_state == 0) 366 vlsi_proc_ndev(seq, ndev); 367 else 368 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 369 idev->resume_ok); 370 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 371 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 372 vlsi_proc_ring(seq, idev->rx_ring); 373 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 374 vlsi_proc_ring(seq, idev->tx_ring); 375 } 376 } 377 seq_printf(seq, "\n"); 378 spin_unlock_irqrestore(&idev->lock, flags); 379 380 return 0; 381} 382 383static int vlsi_seq_open(struct inode *inode, struct file *file) 384{ 385 return single_open(file, vlsi_seq_show, PDE(inode)->data); 386} 387 388static const struct file_operations vlsi_proc_fops = { 389 .owner = THIS_MODULE, 390 .open = vlsi_seq_open, 391 .read = seq_read, 392 .llseek = seq_lseek, 393 .release = single_release, 394}; 395 396#define VLSI_PROC_FOPS (&vlsi_proc_fops) 397 398#else /* CONFIG_PROC_FS */ 399#define VLSI_PROC_FOPS NULL 400#endif 401 402/********************************************************/ 403 404static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 405 unsigned size, unsigned len, int dir) 406{ 407 struct vlsi_ring *r; 408 struct ring_descr *rd; 409 unsigned i, j; 410 dma_addr_t busaddr; 411 412 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 413 return NULL; 414 415 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 416 if (!r) 417 return NULL; 418 memset(r, 0, sizeof(*r)); 419 420 r->pdev = pdev; 421 r->dir = dir; 422 r->len = len; 423 r->rd = (struct ring_descr *)(r+1); 424 r->mask = size - 1; 425 r->size = size; 426 atomic_set(&r->head, 0); 427 atomic_set(&r->tail, 0); 428 429 for (i = 0; i < size; i++) { 430 rd = r->rd + i; 431 memset(rd, 0, sizeof(*rd)); 432 rd->hw = hwmap + i; 433 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 434 if (rd->buf == NULL || 435 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 436 if (rd->buf) { 437 IRDA_ERROR("%s: failed to create PCI-MAP for %p", 438 __func__, rd->buf); 439 kfree(rd->buf); 440 rd->buf = NULL; 441 } 442 for (j = 0; j < i; j++) { 443 rd = r->rd + j; 444 busaddr = rd_get_addr(rd); 445 rd_set_addr_status(rd, 0, 0); 446 if (busaddr) 447 pci_unmap_single(pdev, busaddr, len, dir); 448 kfree(rd->buf); 449 rd->buf = NULL; 450 } 451 kfree(r); 452 return NULL; 453 } 454 rd_set_addr_status(rd, busaddr, 0); 455 /* initially, the dma buffer is owned by the CPU */ 456 rd->skb = NULL; 457 } 458 return r; 459} 460 461static int vlsi_free_ring(struct vlsi_ring *r) 462{ 463 struct ring_descr *rd; 464 unsigned i; 465 dma_addr_t busaddr; 466 467 for (i = 0; i < r->size; i++) { 468 rd = r->rd + i; 469 if (rd->skb) 470 dev_kfree_skb_any(rd->skb); 471 busaddr = rd_get_addr(rd); 472 rd_set_addr_status(rd, 0, 0); 473 if (busaddr) 474 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 475 kfree(rd->buf); 476 } 477 kfree(r); 478 return 0; 479} 480 481static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 482{ 483 char *ringarea; 484 struct ring_descr_hw *hwmap; 485 486 idev->virtaddr = NULL; 487 idev->busaddr = 0; 488 489 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); 490 if (!ringarea) { 491 IRDA_ERROR("%s: insufficient memory for descriptor rings\n", 492 __func__); 493 goto out; 494 } 495 memset(ringarea, 0, HW_RING_AREA_SIZE); 496 497 hwmap = (struct ring_descr_hw *)ringarea; 498 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 499 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 500 if (idev->rx_ring == NULL) 501 goto out_unmap; 502 503 hwmap += MAX_RING_DESCR; 504 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 505 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 506 if (idev->tx_ring == NULL) 507 goto out_free_rx; 508 509 idev->virtaddr = ringarea; 510 return 0; 511 512out_free_rx: 513 vlsi_free_ring(idev->rx_ring); 514out_unmap: 515 idev->rx_ring = idev->tx_ring = NULL; 516 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 517 idev->busaddr = 0; 518out: 519 return -ENOMEM; 520} 521 522static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 523{ 524 vlsi_free_ring(idev->rx_ring); 525 vlsi_free_ring(idev->tx_ring); 526 idev->rx_ring = idev->tx_ring = NULL; 527 528 if (idev->busaddr) 529 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 530 531 idev->virtaddr = NULL; 532 idev->busaddr = 0; 533 534 return 0; 535} 536 537/********************************************************/ 538 539static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 540{ 541 u16 status; 542 int crclen, len = 0; 543 struct sk_buff *skb; 544 int ret = 0; 545 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev); 546 vlsi_irda_dev_t *idev = netdev_priv(ndev); 547 548 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 549 /* dma buffer now owned by the CPU */ 550 status = rd_get_status(rd); 551 if (status & RD_RX_ERROR) { 552 if (status & RD_RX_OVER) 553 ret |= VLSI_RX_OVER; 554 if (status & RD_RX_LENGTH) 555 ret |= VLSI_RX_LENGTH; 556 if (status & RD_RX_PHYERR) 557 ret |= VLSI_RX_FRAME; 558 if (status & RD_RX_CRCERR) 559 ret |= VLSI_RX_CRC; 560 goto done; 561 } 562 563 len = rd_get_count(rd); 564 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 565 len -= crclen; /* remove trailing CRC */ 566 if (len <= 0) { 567 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len); 568 ret |= VLSI_RX_DROP; 569 goto done; 570 } 571 572 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 573 574 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 575 * endian-adjustment there just in place will dirty a cache line 576 * which belongs to the map and thus we must be sure it will 577 * get flushed before giving the buffer back to hardware. 578 * vlsi_fill_rx() will do this anyway - but here we rely on. 579 */ 580 le16_to_cpus(rd->buf+len); 581 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 582 IRDA_DEBUG(0, "%s: crc error\n", __func__); 583 ret |= VLSI_RX_CRC; 584 goto done; 585 } 586 } 587 588 if (!rd->skb) { 589 IRDA_WARNING("%s: rx packet lost\n", __func__); 590 ret |= VLSI_RX_DROP; 591 goto done; 592 } 593 594 skb = rd->skb; 595 rd->skb = NULL; 596 skb->dev = ndev; 597 memcpy(skb_put(skb,len), rd->buf, len); 598 skb_reset_mac_header(skb); 599 if (in_interrupt()) 600 netif_rx(skb); 601 else 602 netif_rx_ni(skb); 603 604done: 605 rd_set_status(rd, 0); 606 rd_set_count(rd, 0); 607 /* buffer still owned by CPU */ 608 609 return (ret) ? -ret : len; 610} 611 612static void vlsi_fill_rx(struct vlsi_ring *r) 613{ 614 struct ring_descr *rd; 615 616 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 617 if (rd_is_active(rd)) { 618 IRDA_WARNING("%s: driver bug: rx descr race with hw\n", 619 __func__); 620 vlsi_ring_debug(r); 621 break; 622 } 623 if (!rd->skb) { 624 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 625 if (rd->skb) { 626 skb_reserve(rd->skb,1); 627 rd->skb->protocol = htons(ETH_P_IRDA); 628 } 629 else 630 break; /* probably not worth logging? */ 631 } 632 /* give dma buffer back to busmaster */ 633 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 634 rd_activate(rd); 635 } 636} 637 638static void vlsi_rx_interrupt(struct net_device *ndev) 639{ 640 vlsi_irda_dev_t *idev = netdev_priv(ndev); 641 struct vlsi_ring *r = idev->rx_ring; 642 struct ring_descr *rd; 643 int ret; 644 645 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 646 647 if (rd_is_active(rd)) 648 break; 649 650 ret = vlsi_process_rx(r, rd); 651 652 if (ret < 0) { 653 ret = -ret; 654 ndev->stats.rx_errors++; 655 if (ret & VLSI_RX_DROP) 656 ndev->stats.rx_dropped++; 657 if (ret & VLSI_RX_OVER) 658 ndev->stats.rx_over_errors++; 659 if (ret & VLSI_RX_LENGTH) 660 ndev->stats.rx_length_errors++; 661 if (ret & VLSI_RX_FRAME) 662 ndev->stats.rx_frame_errors++; 663 if (ret & VLSI_RX_CRC) 664 ndev->stats.rx_crc_errors++; 665 } 666 else if (ret > 0) { 667 ndev->stats.rx_packets++; 668 ndev->stats.rx_bytes += ret; 669 } 670 } 671 672 do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */ 673 674 vlsi_fill_rx(r); 675 676 if (ring_first(r) == NULL) { 677 /* we are in big trouble, if this should ever happen */ 678 IRDA_ERROR("%s: rx ring exhausted!\n", __func__); 679 vlsi_ring_debug(r); 680 } 681 else 682 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 683} 684 685/* caller must have stopped the controller from busmastering */ 686 687static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 688{ 689 struct net_device *ndev = pci_get_drvdata(idev->pdev); 690 struct vlsi_ring *r = idev->rx_ring; 691 struct ring_descr *rd; 692 int ret; 693 694 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 695 696 ret = 0; 697 if (rd_is_active(rd)) { 698 rd_set_status(rd, 0); 699 if (rd_get_count(rd)) { 700 IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__); 701 ret = -VLSI_RX_DROP; 702 } 703 rd_set_count(rd, 0); 704 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 705 if (rd->skb) { 706 dev_kfree_skb_any(rd->skb); 707 rd->skb = NULL; 708 } 709 } 710 else 711 ret = vlsi_process_rx(r, rd); 712 713 if (ret < 0) { 714 ret = -ret; 715 ndev->stats.rx_errors++; 716 if (ret & VLSI_RX_DROP) 717 ndev->stats.rx_dropped++; 718 if (ret & VLSI_RX_OVER) 719 ndev->stats.rx_over_errors++; 720 if (ret & VLSI_RX_LENGTH) 721 ndev->stats.rx_length_errors++; 722 if (ret & VLSI_RX_FRAME) 723 ndev->stats.rx_frame_errors++; 724 if (ret & VLSI_RX_CRC) 725 ndev->stats.rx_crc_errors++; 726 } 727 else if (ret > 0) { 728 ndev->stats.rx_packets++; 729 ndev->stats.rx_bytes += ret; 730 } 731 } 732} 733 734/********************************************************/ 735 736static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 737{ 738 u16 status; 739 int len; 740 int ret; 741 742 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 743 /* dma buffer now owned by the CPU */ 744 status = rd_get_status(rd); 745 if (status & RD_TX_UNDRN) 746 ret = VLSI_TX_FIFO; 747 else 748 ret = 0; 749 rd_set_status(rd, 0); 750 751 if (rd->skb) { 752 len = rd->skb->len; 753 dev_kfree_skb_any(rd->skb); 754 rd->skb = NULL; 755 } 756 else /* tx-skb already freed? - should never happen */ 757 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 758 759 rd_set_count(rd, 0); 760 /* dma buffer still owned by the CPU */ 761 762 return (ret) ? -ret : len; 763} 764 765static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 766{ 767 u16 nphyctl; 768 u16 config; 769 unsigned mode; 770 int ret; 771 int baudrate; 772 int fifocnt; 773 774 baudrate = idev->new_baud; 775 IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); 776 if (baudrate == 4000000) { 777 mode = IFF_FIR; 778 config = IRCFG_FIR; 779 nphyctl = PHYCTL_FIR; 780 } 781 else if (baudrate == 1152000) { 782 mode = IFF_MIR; 783 config = IRCFG_MIR | IRCFG_CRC16; 784 nphyctl = PHYCTL_MIR(clksrc==3); 785 } 786 else { 787 mode = IFF_SIR; 788 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 789 switch(baudrate) { 790 default: 791 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", 792 __func__, baudrate); 793 baudrate = 9600; 794 /* fallthru */ 795 case 2400: 796 case 9600: 797 case 19200: 798 case 38400: 799 case 57600: 800 case 115200: 801 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 802 break; 803 } 804 } 805 config |= IRCFG_MSTR | IRCFG_ENRX; 806 807 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 808 if (fifocnt != 0) { 809 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); 810 } 811 812 outw(0, iobase+VLSI_PIO_IRENABLE); 813 outw(config, iobase+VLSI_PIO_IRCFG); 814 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 815 wmb(); 816 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 817 mb(); 818 819 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 820 821 /* read back settings for validation */ 822 823 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 824 825 if (mode == IFF_FIR) 826 config ^= IRENABLE_FIR_ON; 827 else if (mode == IFF_MIR) 828 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 829 else 830 config ^= IRENABLE_SIR_ON; 831 832 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 833 IRDA_WARNING("%s: failed to set %s mode!\n", __func__, 834 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); 835 ret = -1; 836 } 837 else { 838 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 839 IRDA_WARNING("%s: failed to apply baudrate %d\n", 840 __func__, baudrate); 841 ret = -1; 842 } 843 else { 844 idev->mode = mode; 845 idev->baud = baudrate; 846 idev->new_baud = 0; 847 ret = 0; 848 } 849 } 850 851 if (ret) 852 vlsi_reg_debug(iobase,__func__); 853 854 return ret; 855} 856 857static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, 858 struct net_device *ndev) 859{ 860 vlsi_irda_dev_t *idev = netdev_priv(ndev); 861 struct vlsi_ring *r = idev->tx_ring; 862 struct ring_descr *rd; 863 unsigned long flags; 864 unsigned iobase = ndev->base_addr; 865 u8 status; 866 u16 config; 867 int mtt; 868 int len, speed; 869 struct timeval now, ready; 870 char *msg = NULL; 871 872 speed = irda_get_next_speed(skb); 873 spin_lock_irqsave(&idev->lock, flags); 874 if (speed != -1 && speed != idev->baud) { 875 netif_stop_queue(ndev); 876 idev->new_baud = speed; 877 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 878 } 879 else 880 status = 0; 881 882 if (skb->len == 0) { 883 /* handle zero packets - should be speed change */ 884 if (status == 0) { 885 msg = "bogus zero-length packet"; 886 goto drop_unlock; 887 } 888 889 /* due to the completely asynch tx operation we might have 890 * IrLAP racing with the hardware here, f.e. if the controller 891 * is just sending the last packet with current speed while 892 * the LAP is already switching the speed using synchronous 893 * len=0 packet. Immediate execution would lead to hw lockup 894 * requiring a powercycle to reset. Good candidate to trigger 895 * this is the final UA:RSP packet after receiving a DISC:CMD 896 * when getting the LAP down. 897 * Note that we are not protected by the queue_stop approach 898 * because the final UA:RSP arrives _without_ request to apply 899 * new-speed-after-this-packet - hence the driver doesn't know 900 * this was the last packet and doesn't stop the queue. So the 901 * forced switch to default speed from LAP gets through as fast 902 * as only some 10 usec later while the UA:RSP is still processed 903 * by the hardware and we would get screwed. 904 */ 905 906 if (ring_first(idev->tx_ring) == NULL) { 907 /* no race - tx-ring already empty */ 908 vlsi_set_baud(idev, iobase); 909 netif_wake_queue(ndev); 910 } 911 else 912 ; 913 /* keep the speed change pending like it would 914 * for any len>0 packet. tx completion interrupt 915 * will apply it when the tx ring becomes empty. 916 */ 917 spin_unlock_irqrestore(&idev->lock, flags); 918 dev_kfree_skb_any(skb); 919 return NETDEV_TX_OK; 920 } 921 922 /* sanity checks - simply drop the packet */ 923 924 rd = ring_last(r); 925 if (!rd) { 926 msg = "ring full, but queue wasn't stopped"; 927 goto drop_unlock; 928 } 929 930 if (rd_is_active(rd)) { 931 msg = "entry still owned by hw"; 932 goto drop_unlock; 933 } 934 935 if (!rd->buf) { 936 msg = "tx ring entry without pci buffer"; 937 goto drop_unlock; 938 } 939 940 if (rd->skb) { 941 msg = "ring entry with old skb still attached"; 942 goto drop_unlock; 943 } 944 945 /* no need for serialization or interrupt disable during mtt */ 946 spin_unlock_irqrestore(&idev->lock, flags); 947 948 if ((mtt = irda_get_mtt(skb)) > 0) { 949 950 ready.tv_usec = idev->last_rx.tv_usec + mtt; 951 ready.tv_sec = idev->last_rx.tv_sec; 952 if (ready.tv_usec >= 1000000) { 953 ready.tv_usec -= 1000000; 954 ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */ 955 } 956 for(;;) { 957 do_gettimeofday(&now); 958 if (now.tv_sec > ready.tv_sec || 959 (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 960 break; 961 udelay(100); 962 /* must not sleep here - called under netif_tx_lock! */ 963 } 964 } 965 966 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 967 * after subsequent tx-completion 968 */ 969 970 if (idev->mode == IFF_SIR) { 971 status |= RD_TX_DISCRC; /* no hw-crc creation */ 972 len = async_wrap_skb(skb, rd->buf, r->len); 973 974 /* Some rare worst case situation in SIR mode might lead to 975 * potential buffer overflow. The wrapper detects this, returns 976 * with a shortened frame (without FCS/EOF) but doesn't provide 977 * any error indication about the invalid packet which we are 978 * going to transmit. 979 * Therefore we log if the buffer got filled to the point, where the 980 * wrapper would abort, i.e. when there are less than 5 bytes left to 981 * allow appending the FCS/EOF. 982 */ 983 984 if (len >= r->len-5) 985 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", 986 __func__); 987 } 988 else { 989 /* hw deals with MIR/FIR mode wrapping */ 990 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 991 len = skb->len; 992 if (len > r->len) { 993 msg = "frame exceeds tx buffer length"; 994 goto drop; 995 } 996 else 997 skb_copy_from_linear_data(skb, rd->buf, len); 998 } 999 1000 rd->skb = skb; /* remember skb for tx-complete stats */ 1001 1002 rd_set_count(rd, len); 1003 rd_set_status(rd, status); /* not yet active! */ 1004 1005 /* give dma buffer back to busmaster-hw (flush caches to make 1006 * CPU-driven changes visible from the pci bus). 1007 */ 1008 1009 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 1010 1011/* Switching to TX mode here races with the controller 1012 * which may stop TX at any time when fetching an inactive descriptor 1013 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 1014 * _after_ the new descriptor was activated on the ring. This ensures 1015 * we will either find TX already stopped or we can be sure, there 1016 * will be a TX-complete interrupt even if the chip stopped doing 1017 * TX just after we found it still running. The ISR will then find 1018 * the non-empty ring and restart TX processing. The enclosing 1019 * spinlock provides the correct serialization to prevent race with isr. 1020 */ 1021 1022 spin_lock_irqsave(&idev->lock,flags); 1023 1024 rd_activate(rd); 1025 1026 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1027 int fifocnt; 1028 1029 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1030 if (fifocnt != 0) { 1031 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt); 1032 } 1033 1034 config = inw(iobase+VLSI_PIO_IRCFG); 1035 mb(); 1036 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1037 wmb(); 1038 outw(0, iobase+VLSI_PIO_PROMPT); 1039 } 1040 1041 if (ring_put(r) == NULL) { 1042 netif_stop_queue(ndev); 1043 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__); 1044 } 1045 spin_unlock_irqrestore(&idev->lock, flags); 1046 1047 return NETDEV_TX_OK; 1048 1049drop_unlock: 1050 spin_unlock_irqrestore(&idev->lock, flags); 1051drop: 1052 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); 1053 dev_kfree_skb_any(skb); 1054 ndev->stats.tx_errors++; 1055 ndev->stats.tx_dropped++; 1056 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1057 * In fact any retval!=0 causes the packet scheduler to requeue the 1058 * packet for later retry of transmission - which isn't exactly 1059 * what we want after we've just called dev_kfree_skb_any ;-) 1060 */ 1061 return NETDEV_TX_OK; 1062} 1063 1064static void vlsi_tx_interrupt(struct net_device *ndev) 1065{ 1066 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1067 struct vlsi_ring *r = idev->tx_ring; 1068 struct ring_descr *rd; 1069 unsigned iobase; 1070 int ret; 1071 u16 config; 1072 1073 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1074 1075 if (rd_is_active(rd)) 1076 break; 1077 1078 ret = vlsi_process_tx(r, rd); 1079 1080 if (ret < 0) { 1081 ret = -ret; 1082 ndev->stats.tx_errors++; 1083 if (ret & VLSI_TX_DROP) 1084 ndev->stats.tx_dropped++; 1085 if (ret & VLSI_TX_FIFO) 1086 ndev->stats.tx_fifo_errors++; 1087 } 1088 else if (ret > 0){ 1089 ndev->stats.tx_packets++; 1090 ndev->stats.tx_bytes += ret; 1091 } 1092 } 1093 1094 iobase = ndev->base_addr; 1095 1096 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1097 vlsi_set_baud(idev, iobase); 1098 1099 config = inw(iobase+VLSI_PIO_IRCFG); 1100 if (rd == NULL) /* tx ring empty: re-enable rx */ 1101 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1102 1103 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1104 int fifocnt; 1105 1106 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1107 if (fifocnt != 0) { 1108 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", 1109 __func__, fifocnt); 1110 } 1111 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1112 } 1113 1114 outw(0, iobase+VLSI_PIO_PROMPT); 1115 1116 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1117 netif_wake_queue(ndev); 1118 IRDA_DEBUG(3, "%s: queue awoken\n", __func__); 1119 } 1120} 1121 1122/* caller must have stopped the controller from busmastering */ 1123 1124static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1125{ 1126 struct net_device *ndev = pci_get_drvdata(idev->pdev); 1127 struct vlsi_ring *r = idev->tx_ring; 1128 struct ring_descr *rd; 1129 int ret; 1130 1131 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1132 1133 ret = 0; 1134 if (rd_is_active(rd)) { 1135 rd_set_status(rd, 0); 1136 rd_set_count(rd, 0); 1137 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1138 if (rd->skb) { 1139 dev_kfree_skb_any(rd->skb); 1140 rd->skb = NULL; 1141 } 1142 IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__); 1143 ret = -VLSI_TX_DROP; 1144 } 1145 else 1146 ret = vlsi_process_tx(r, rd); 1147 1148 if (ret < 0) { 1149 ret = -ret; 1150 ndev->stats.tx_errors++; 1151 if (ret & VLSI_TX_DROP) 1152 ndev->stats.tx_dropped++; 1153 if (ret & VLSI_TX_FIFO) 1154 ndev->stats.tx_fifo_errors++; 1155 } 1156 else if (ret > 0){ 1157 ndev->stats.tx_packets++; 1158 ndev->stats.tx_bytes += ret; 1159 } 1160 } 1161 1162} 1163 1164/********************************************************/ 1165 1166static int vlsi_start_clock(struct pci_dev *pdev) 1167{ 1168 u8 clkctl, lock; 1169 int i, count; 1170 1171 if (clksrc < 2) { /* auto or PLL: try PLL */ 1172 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1173 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1174 1175 /* procedure to detect PLL lock synchronisation: 1176 * after 0.5 msec initial delay we expect to find 3 PLL lock 1177 * indications within 10 msec for successful PLL detection. 1178 */ 1179 udelay(500); 1180 count = 0; 1181 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1182 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1183 if (lock&CLKCTL_LOCK) { 1184 if (++count >= 3) 1185 break; 1186 } 1187 udelay(50); 1188 } 1189 if (count < 3) { 1190 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1191 IRDA_ERROR("%s: no PLL or failed to lock!\n", 1192 __func__); 1193 clkctl = CLKCTL_CLKSTP; 1194 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1195 return -1; 1196 } 1197 else /* was: clksrc=0(auto) */ 1198 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1199 1200 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", 1201 __func__, clksrc); 1202 } 1203 else 1204 clksrc = 1; /* got successful PLL lock */ 1205 } 1206 1207 if (clksrc != 1) { 1208 /* we get here if either no PLL detected in auto-mode or 1209 an external clock source was explicitly specified */ 1210 1211 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1212 if (clksrc == 3) 1213 clkctl |= CLKCTL_XCKSEL; 1214 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1215 1216 /* no way to test for working XCLK */ 1217 } 1218 else 1219 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1220 1221 /* ok, now going to connect the chip with the clock source */ 1222 1223 clkctl &= ~CLKCTL_CLKSTP; 1224 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1225 1226 return 0; 1227} 1228 1229static void vlsi_stop_clock(struct pci_dev *pdev) 1230{ 1231 u8 clkctl; 1232 1233 /* disconnect chip from clock source */ 1234 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1235 clkctl |= CLKCTL_CLKSTP; 1236 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1237 1238 /* disable all clock sources */ 1239 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1240 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1241} 1242 1243/********************************************************/ 1244 1245/* writing all-zero to the VLSI PCI IO register area seems to prevent 1246 * some occasional situations where the hardware fails (symptoms are 1247 * what appears as stalled tx/rx state machines, i.e. everything ok for 1248 * receive or transmit but hw makes no progress or is unable to access 1249 * the bus memory locations). 1250 * Best place to call this is immediately after/before the internal clock 1251 * gets started/stopped. 1252 */ 1253 1254static inline void vlsi_clear_regs(unsigned iobase) 1255{ 1256 unsigned i; 1257 const unsigned chip_io_extent = 32; 1258 1259 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1260 outw(0, iobase + i); 1261} 1262 1263static int vlsi_init_chip(struct pci_dev *pdev) 1264{ 1265 struct net_device *ndev = pci_get_drvdata(pdev); 1266 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1267 unsigned iobase; 1268 u16 ptr; 1269 1270 /* start the clock and clean the registers */ 1271 1272 if (vlsi_start_clock(pdev)) { 1273 IRDA_ERROR("%s: no valid clock source\n", __func__); 1274 return -1; 1275 } 1276 iobase = ndev->base_addr; 1277 vlsi_clear_regs(iobase); 1278 1279 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1280 1281 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1282 1283 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1284 1285 outw(0, iobase+VLSI_PIO_IRCFG); 1286 wmb(); 1287 1288 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1289 1290 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1291 1292 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1293 iobase+VLSI_PIO_RINGSIZE); 1294 1295 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1296 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1297 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1298 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1299 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1300 1301 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1302 1303 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1304 wmb(); 1305 1306 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1307 * basically every received pulse fires an ACTIVITY-INT 1308 * leading to >>1000 INT's per second instead of few 10 1309 */ 1310 1311 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1312 1313 return 0; 1314} 1315 1316static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1317{ 1318 struct pci_dev *pdev = idev->pdev; 1319 struct net_device *ndev = pci_get_drvdata(pdev); 1320 unsigned iobase = ndev->base_addr; 1321 u8 byte; 1322 1323 /* we don't use the legacy UART, disable its address decoding */ 1324 1325 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1326 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1327 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1328 1329 /* enable PCI busmaster access to our 16MB page */ 1330 1331 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1332 pci_set_master(pdev); 1333 1334 if (vlsi_init_chip(pdev) < 0) { 1335 pci_disable_device(pdev); 1336 return -1; 1337 } 1338 1339 vlsi_fill_rx(idev->rx_ring); 1340 1341 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1342 1343 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1344 1345 return 0; 1346} 1347 1348static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1349{ 1350 struct pci_dev *pdev = idev->pdev; 1351 struct net_device *ndev = pci_get_drvdata(pdev); 1352 unsigned iobase = ndev->base_addr; 1353 unsigned long flags; 1354 1355 spin_lock_irqsave(&idev->lock,flags); 1356 outw(0, iobase+VLSI_PIO_IRENABLE); 1357 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1358 1359 /* disable and w/c irqs */ 1360 outb(0, iobase+VLSI_PIO_IRINTR); 1361 wmb(); 1362 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1363 spin_unlock_irqrestore(&idev->lock,flags); 1364 1365 vlsi_unarm_tx(idev); 1366 vlsi_unarm_rx(idev); 1367 1368 vlsi_clear_regs(iobase); 1369 vlsi_stop_clock(pdev); 1370 1371 pci_disable_device(pdev); 1372 1373 return 0; 1374} 1375 1376/**************************************************************/ 1377 1378static void vlsi_tx_timeout(struct net_device *ndev) 1379{ 1380 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1381 1382 1383 vlsi_reg_debug(ndev->base_addr, __func__); 1384 vlsi_ring_debug(idev->tx_ring); 1385 1386 if (netif_running(ndev)) 1387 netif_stop_queue(ndev); 1388 1389 vlsi_stop_hw(idev); 1390 1391 /* now simply restart the whole thing */ 1392 1393 if (!idev->new_baud) 1394 idev->new_baud = idev->baud; /* keep current baudrate */ 1395 1396 if (vlsi_start_hw(idev)) 1397 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", 1398 __func__, pci_name(idev->pdev), ndev->name); 1399 else 1400 netif_start_queue(ndev); 1401} 1402 1403static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1404{ 1405 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1406 struct if_irda_req *irq = (struct if_irda_req *) rq; 1407 unsigned long flags; 1408 u16 fifocnt; 1409 int ret = 0; 1410 1411 switch (cmd) { 1412 case SIOCSBANDWIDTH: 1413 if (!capable(CAP_NET_ADMIN)) { 1414 ret = -EPERM; 1415 break; 1416 } 1417 spin_lock_irqsave(&idev->lock, flags); 1418 idev->new_baud = irq->ifr_baudrate; 1419 /* when called from userland there might be a minor race window here 1420 * if the stack tries to change speed concurrently - which would be 1421 * pretty strange anyway with the userland having full control... 1422 */ 1423 vlsi_set_baud(idev, ndev->base_addr); 1424 spin_unlock_irqrestore(&idev->lock, flags); 1425 break; 1426 case SIOCSMEDIABUSY: 1427 if (!capable(CAP_NET_ADMIN)) { 1428 ret = -EPERM; 1429 break; 1430 } 1431 irda_device_set_media_busy(ndev, TRUE); 1432 break; 1433 case SIOCGRECEIVING: 1434 /* the best we can do: check whether there are any bytes in rx fifo. 1435 * The trustable window (in case some data arrives just afterwards) 1436 * may be as short as 1usec or so at 4Mbps. 1437 */ 1438 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1439 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1440 break; 1441 default: 1442 IRDA_WARNING("%s: notsupp - cmd=%04x\n", 1443 __func__, cmd); 1444 ret = -EOPNOTSUPP; 1445 } 1446 1447 return ret; 1448} 1449 1450/********************************************************/ 1451 1452static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1453{ 1454 struct net_device *ndev = dev_instance; 1455 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1456 unsigned iobase; 1457 u8 irintr; 1458 int boguscount = 5; 1459 unsigned long flags; 1460 int handled = 0; 1461 1462 iobase = ndev->base_addr; 1463 spin_lock_irqsave(&idev->lock,flags); 1464 do { 1465 irintr = inb(iobase+VLSI_PIO_IRINTR); 1466 mb(); 1467 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1468 1469 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1470 break; 1471 1472 handled = 1; 1473 1474 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1475 break; /* nothing todo if only activity */ 1476 1477 if (irintr&IRINTR_RPKTINT) 1478 vlsi_rx_interrupt(ndev); 1479 1480 if (irintr&IRINTR_TPKTINT) 1481 vlsi_tx_interrupt(ndev); 1482 1483 } while (--boguscount > 0); 1484 spin_unlock_irqrestore(&idev->lock,flags); 1485 1486 if (boguscount <= 0) 1487 IRDA_MESSAGE("%s: too much work in interrupt!\n", 1488 __func__); 1489 return IRQ_RETVAL(handled); 1490} 1491 1492/********************************************************/ 1493 1494static int vlsi_open(struct net_device *ndev) 1495{ 1496 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1497 int err = -EAGAIN; 1498 char hwname[32]; 1499 1500 if (pci_request_regions(idev->pdev, drivername)) { 1501 IRDA_WARNING("%s: io resource busy\n", __func__); 1502 goto errout; 1503 } 1504 ndev->base_addr = pci_resource_start(idev->pdev,0); 1505 ndev->irq = idev->pdev->irq; 1506 1507 /* under some rare occasions the chip apparently comes up with 1508 * IRQ's pending. We better w/c pending IRQ and disable them all 1509 */ 1510 1511 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1512 1513 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1514 drivername, ndev)) { 1515 IRDA_WARNING("%s: couldn't get IRQ: %d\n", 1516 __func__, ndev->irq); 1517 goto errout_io; 1518 } 1519 1520 if ((err = vlsi_create_hwif(idev)) != 0) 1521 goto errout_irq; 1522 1523 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1524 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1525 if (!idev->irlap) 1526 goto errout_free_ring; 1527 1528 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1529 1530 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1531 1532 if ((err = vlsi_start_hw(idev)) != 0) 1533 goto errout_close_irlap; 1534 1535 netif_start_queue(ndev); 1536 1537 IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name); 1538 1539 return 0; 1540 1541errout_close_irlap: 1542 irlap_close(idev->irlap); 1543errout_free_ring: 1544 vlsi_destroy_hwif(idev); 1545errout_irq: 1546 free_irq(ndev->irq,ndev); 1547errout_io: 1548 pci_release_regions(idev->pdev); 1549errout: 1550 return err; 1551} 1552 1553static int vlsi_close(struct net_device *ndev) 1554{ 1555 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1556 1557 netif_stop_queue(ndev); 1558 1559 if (idev->irlap) 1560 irlap_close(idev->irlap); 1561 idev->irlap = NULL; 1562 1563 vlsi_stop_hw(idev); 1564 1565 vlsi_destroy_hwif(idev); 1566 1567 free_irq(ndev->irq,ndev); 1568 1569 pci_release_regions(idev->pdev); 1570 1571 IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name); 1572 1573 return 0; 1574} 1575 1576static const struct net_device_ops vlsi_netdev_ops = { 1577 .ndo_open = vlsi_open, 1578 .ndo_stop = vlsi_close, 1579 .ndo_start_xmit = vlsi_hard_start_xmit, 1580 .ndo_do_ioctl = vlsi_ioctl, 1581 .ndo_tx_timeout = vlsi_tx_timeout, 1582}; 1583 1584static int vlsi_irda_init(struct net_device *ndev) 1585{ 1586 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1587 struct pci_dev *pdev = idev->pdev; 1588 1589 ndev->irq = pdev->irq; 1590 ndev->base_addr = pci_resource_start(pdev,0); 1591 1592 /* PCI busmastering 1593 * see include file for details why we need these 2 masks, in this order! 1594 */ 1595 1596 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || 1597 pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1598 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__); 1599 return -1; 1600 } 1601 1602 irda_init_max_qos_capabilies(&idev->qos); 1603 1604 /* the VLSI82C147 does not support 576000! */ 1605 1606 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1607 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1608 | IR_1152000 | (IR_4000000 << 8); 1609 1610 idev->qos.min_turn_time.bits = qos_mtt_bits; 1611 1612 irda_qos_bits_to_value(&idev->qos); 1613 1614 /* currently no public media definitions for IrDA */ 1615 1616 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1617 ndev->if_port = IF_PORT_UNKNOWN; 1618 1619 ndev->netdev_ops = &vlsi_netdev_ops; 1620 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1621 1622 SET_NETDEV_DEV(ndev, &pdev->dev); 1623 1624 return 0; 1625} 1626 1627/**************************************************************/ 1628 1629static int __devinit 1630vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1631{ 1632 struct net_device *ndev; 1633 vlsi_irda_dev_t *idev; 1634 1635 if (pci_enable_device(pdev)) 1636 goto out; 1637 else 1638 pdev->current_state = 0; /* hw must be running now */ 1639 1640 IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n", 1641 drivername, pci_name(pdev)); 1642 1643 if ( !pci_resource_start(pdev,0) || 1644 !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1645 IRDA_ERROR("%s: bar 0 invalid", __func__); 1646 goto out_disable; 1647 } 1648 1649 ndev = alloc_irdadev(sizeof(*idev)); 1650 if (ndev==NULL) { 1651 IRDA_ERROR("%s: Unable to allocate device memory.\n", 1652 __func__); 1653 goto out_disable; 1654 } 1655 1656 idev = netdev_priv(ndev); 1657 1658 spin_lock_init(&idev->lock); 1659 mutex_init(&idev->mtx); 1660 mutex_lock(&idev->mtx); 1661 idev->pdev = pdev; 1662 1663 if (vlsi_irda_init(ndev) < 0) 1664 goto out_freedev; 1665 1666 if (register_netdev(ndev) < 0) { 1667 IRDA_ERROR("%s: register_netdev failed\n", __func__); 1668 goto out_freedev; 1669 } 1670 1671 if (vlsi_proc_root != NULL) { 1672 struct proc_dir_entry *ent; 1673 1674 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, 1675 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1676 if (!ent) { 1677 IRDA_WARNING("%s: failed to create proc entry\n", 1678 __func__); 1679 } else { 1680 ent->size = 0; 1681 } 1682 idev->proc_entry = ent; 1683 } 1684 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); 1685 1686 pci_set_drvdata(pdev, ndev); 1687 mutex_unlock(&idev->mtx); 1688 1689 return 0; 1690 1691out_freedev: 1692 mutex_unlock(&idev->mtx); 1693 free_netdev(ndev); 1694out_disable: 1695 pci_disable_device(pdev); 1696out: 1697 pci_set_drvdata(pdev, NULL); 1698 return -ENODEV; 1699} 1700 1701static void __devexit vlsi_irda_remove(struct pci_dev *pdev) 1702{ 1703 struct net_device *ndev = pci_get_drvdata(pdev); 1704 vlsi_irda_dev_t *idev; 1705 1706 if (!ndev) { 1707 IRDA_ERROR("%s: lost netdevice?\n", drivername); 1708 return; 1709 } 1710 1711 unregister_netdev(ndev); 1712 1713 idev = netdev_priv(ndev); 1714 mutex_lock(&idev->mtx); 1715 if (idev->proc_entry) { 1716 remove_proc_entry(ndev->name, vlsi_proc_root); 1717 idev->proc_entry = NULL; 1718 } 1719 mutex_unlock(&idev->mtx); 1720 1721 free_netdev(ndev); 1722 1723 pci_set_drvdata(pdev, NULL); 1724 1725 IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev)); 1726} 1727 1728#ifdef CONFIG_PM 1729 1730/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1731 * Some of the Linux PCI-PM code however depends on this, for example in 1732 * pci_set_power_state(). So we have to take care to perform the required 1733 * operations on our own (particularly reflecting the pdev->current_state) 1734 * otherwise we might get cheated by pci-pm. 1735 */ 1736 1737 1738static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1739{ 1740 struct net_device *ndev = pci_get_drvdata(pdev); 1741 vlsi_irda_dev_t *idev; 1742 1743 if (!ndev) { 1744 IRDA_ERROR("%s - %s: no netdevice\n", 1745 __func__, pci_name(pdev)); 1746 return 0; 1747 } 1748 idev = netdev_priv(ndev); 1749 mutex_lock(&idev->mtx); 1750 if (pdev->current_state != 0) { /* already suspended */ 1751 if (state.event > pdev->current_state) { /* simply go deeper */ 1752 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1753 pdev->current_state = state.event; 1754 } 1755 else 1756 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event); 1757 mutex_unlock(&idev->mtx); 1758 return 0; 1759 } 1760 1761 if (netif_running(ndev)) { 1762 netif_device_detach(ndev); 1763 vlsi_stop_hw(idev); 1764 pci_save_state(pdev); 1765 if (!idev->new_baud) 1766 /* remember speed settings to restore on resume */ 1767 idev->new_baud = idev->baud; 1768 } 1769 1770 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1771 pdev->current_state = state.event; 1772 idev->resume_ok = 1; 1773 mutex_unlock(&idev->mtx); 1774 return 0; 1775} 1776 1777static int vlsi_irda_resume(struct pci_dev *pdev) 1778{ 1779 struct net_device *ndev = pci_get_drvdata(pdev); 1780 vlsi_irda_dev_t *idev; 1781 1782 if (!ndev) { 1783 IRDA_ERROR("%s - %s: no netdevice\n", 1784 __func__, pci_name(pdev)); 1785 return 0; 1786 } 1787 idev = netdev_priv(ndev); 1788 mutex_lock(&idev->mtx); 1789 if (pdev->current_state == 0) { 1790 mutex_unlock(&idev->mtx); 1791 IRDA_WARNING("%s - %s: already resumed\n", 1792 __func__, pci_name(pdev)); 1793 return 0; 1794 } 1795 1796 pci_set_power_state(pdev, PCI_D0); 1797 pdev->current_state = PM_EVENT_ON; 1798 1799 if (!idev->resume_ok) { 1800 /* should be obsolete now - but used to happen due to: 1801 * - pci layer initially setting pdev->current_state = 4 (unknown) 1802 * - pci layer did not walk the save_state-tree (might be APM problem) 1803 * so we could not refuse to suspend from undefined state 1804 * - vlsi_irda_suspend detected invalid state and refused to save 1805 * configuration for resume - but was too late to stop suspending 1806 * - vlsi_irda_resume got screwed when trying to resume from garbage 1807 * 1808 * now we explicitly set pdev->current_state = 0 after enabling the 1809 * device and independently resume_ok should catch any garbage config. 1810 */ 1811 IRDA_WARNING("%s - hm, nothing to resume?\n", __func__); 1812 mutex_unlock(&idev->mtx); 1813 return 0; 1814 } 1815 1816 if (netif_running(ndev)) { 1817 pci_restore_state(pdev); 1818 vlsi_start_hw(idev); 1819 netif_device_attach(ndev); 1820 } 1821 idev->resume_ok = 0; 1822 mutex_unlock(&idev->mtx); 1823 return 0; 1824} 1825 1826#endif /* CONFIG_PM */ 1827 1828/*********************************************************/ 1829 1830static struct pci_driver vlsi_irda_driver = { 1831 .name = drivername, 1832 .id_table = vlsi_irda_table, 1833 .probe = vlsi_irda_probe, 1834 .remove = __devexit_p(vlsi_irda_remove), 1835#ifdef CONFIG_PM 1836 .suspend = vlsi_irda_suspend, 1837 .resume = vlsi_irda_resume, 1838#endif 1839}; 1840 1841#define PROC_DIR ("driver/" DRIVER_NAME) 1842 1843static int __init vlsi_mod_init(void) 1844{ 1845 int i, ret; 1846 1847 if (clksrc < 0 || clksrc > 3) { 1848 IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc); 1849 return -1; 1850 } 1851 1852 for (i = 0; i < 2; i++) { 1853 switch(ringsize[i]) { 1854 case 4: 1855 case 8: 1856 case 16: 1857 case 32: 1858 case 64: 1859 break; 1860 default: 1861 IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]); 1862 ringsize[i] = 8; 1863 break; 1864 } 1865 } 1866 1867 sirpulse = !!sirpulse; 1868 1869 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1870 * Failure to create the procfs entry is handled like running 1871 * without procfs - it's not required for the driver to work. 1872 */ 1873 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1874 1875 ret = pci_register_driver(&vlsi_irda_driver); 1876 1877 if (ret && vlsi_proc_root) 1878 remove_proc_entry(PROC_DIR, NULL); 1879 return ret; 1880 1881} 1882 1883static void __exit vlsi_mod_exit(void) 1884{ 1885 pci_unregister_driver(&vlsi_irda_driver); 1886 if (vlsi_proc_root) 1887 remove_proc_entry(PROC_DIR, NULL); 1888} 1889 1890module_init(vlsi_mod_init); 1891module_exit(vlsi_mod_exit); 1892