1/* 2 * drivers/net/wan/dscc4/dscc4_main.c: a DSCC4 HDLC driver for Linux 3 * 4 * This software may be used and distributed according to the terms of the 5 * GNU General Public License. 6 * 7 * The author may be reached as romieu@cogenit.fr. 8 * Specific bug reports/asian food will be welcome. 9 * 10 * Special thanks to the nice people at CS-Telecom for the hardware and the 11 * access to the test/measure tools. 12 * 13 * 14 * Theory of Operation 15 * 16 * I. Board Compatibility 17 * 18 * This device driver is designed for the Siemens PEB20534 4 ports serial 19 * controller as found on Etinc PCISYNC cards. The documentation for the 20 * chipset is available at http://www.infineon.com: 21 * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with 22 * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1"; 23 * - Application Hint "Management of DSCC4 on-chip FIFO resources". 24 * Jens David has built an adapter based on the same chipset. Take a look 25 * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific 26 * driver. 27 * Sample code (2 revisions) is available at Infineon. 28 * 29 * II. Board-specific settings 30 * 31 * Pcisync can transmit some clock signal to the outside world on the 32 * *first two* ports provided you put a quartz and a line driver on it and 33 * remove the jumpers. The operation is described on Etinc web site. If you 34 * go DCE on these ports, don't forget to use an adequate cable. 35 * 36 * Sharing of the PCI interrupt line for this board is possible. 37 * 38 * III. Driver operation 39 * 40 * The rx/tx operations are based on a linked list of descriptor. I haven't 41 * tried the start/stop descriptor method as this one looks like the cheapest 42 * in terms of PCI manipulation. 43 * 44 * Tx direction 45 * Once the data section of the current descriptor processed, the next linked 46 * descriptor is loaded if the HOLD bit isn't set in the current descriptor. 47 * If HOLD is met, the transmission is stopped until the host unsets it and 48 * signals the change via TxPOLL. 49 * When the tx ring is full, the xmit routine issues a call to netdev_stop. 50 * The device is supposed to be enabled again during an ALLS irq (we could 51 * use HI but as it's easy to loose events, it's fscked). 52 * 53 * Rx direction 54 * The received frames aren't supposed to span over multiple receiving areas. 55 * I may implement it some day but it isn't the highest ranked item. 56 * 57 * IV. Notes 58 * The chipset is buggy. Typically, under some specific load patterns (I 59 * wouldn't call them "high"), the irq queues and the descriptors look like 60 * some event has been lost. Even assuming some fancy PCI feature, it won't 61 * explain the reproductible missing "C" bit in the descriptors. Faking an 62 * irq in the periodic timer isn't really elegant but at least it seems 63 * reliable. 64 * The current error (XDU, RFO) recovery code is untested. 65 * So far, RDO takes his RX channel down and the right sequence to enable it 66 * again is still a mistery. If RDO happens, plan a reboot. More details 67 * in the code (NB: as this happens, TX still works). 68 * Don't mess the cables during operation, especially on DTE ports. I don't 69 * suggest it for DCE either but at least one can get some messages instead 70 * of a complete instant freeze. 71 * Tests are done on Rev. 20 of the silicium. The RDO handling changes with 72 * the documentation/chipset releases. An on-line errata would be welcome. 73 * 74 * TODO: 75 * - some trivial error lurk, 76 * - the stats are fscked, 77 * - use polling at high irq/s, 78 * - performance analysis, 79 * - endianness. 80 * 81 */ 82 83#include <linux/version.h> 84#include <linux/module.h> 85#include <linux/types.h> 86#include <linux/errno.h> 87#include <linux/ioport.h> 88#include <linux/pci.h> 89#include <linux/kernel.h> 90#include <linux/mm.h> 91 92#include <asm/system.h> 93#include <asm/cache.h> 94#include <asm/byteorder.h> 95#include <asm/uaccess.h> 96#include <asm/io.h> 97#include <asm/irq.h> 98 99#include <linux/init.h> 100#include <linux/string.h> 101 102#include <linux/if_arp.h> 103#include <linux/netdevice.h> 104#include <linux/skbuff.h> 105#include <linux/delay.h> 106#include <net/syncppp.h> 107#include <linux/hdlc.h> 108 109/* Version */ 110static const char version[] = "$Id: dscc4.c,v 1.1.1.1 2008/10/15 03:26:45 james26_jang Exp $\n"; 111static int debug; 112 113 114/* Module parameters */ 115MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>"); 116MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller"); 117MODULE_LICENSE("GPL"); 118MODULE_PARM(debug,"i"); 119 120/* Structures */ 121struct TxFD { 122 u32 state; 123 u32 next; 124 u32 data; 125 u32 complete; 126 u32 jiffies; /* more hack to come :o) */ 127}; 128 129struct RxFD { 130 u32 state1; 131 u32 next; 132 u32 data; 133 u32 state2; 134 u32 end; 135}; 136 137#define DEBUG 138#define DEBUG_PARANOID 139#define TX_RING_SIZE 32 140#define RX_RING_SIZE 32 141#define IRQ_RING_SIZE 64 /* Keep it A multiple of 32 */ 142#define TX_TIMEOUT (HZ/10) 143#define BRR_DIVIDER_MAX 64*0x00008000 144#define dev_per_card 4 145 146#define SOURCE_ID(flags) ((flags >> 28 ) & 0x03) 147#define TO_SIZE(state) ((state >> 16) & 0x1fff) 148#define TO_STATE(len) cpu_to_le32((len & TxSizeMax) << 16) 149#define RX_MAX(len) ((((len) >> 5) + 1) << 5) 150#define SCC_REG_START(id) SCC_START+(id)*SCC_OFFSET 151 152#undef DEBUG 153 154struct dscc4_pci_priv { 155 u32 *iqcfg; 156 int cfg_cur; 157 spinlock_t lock; 158 struct pci_dev *pdev; 159 160 struct net_device *root; 161 dma_addr_t iqcfg_dma; 162 u32 xtal_hz; 163}; 164 165struct dscc4_dev_priv { 166 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 167 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 168 169 struct RxFD *rx_fd; 170 struct TxFD *tx_fd; 171 u32 *iqrx; 172 u32 *iqtx; 173 174 u32 rx_current; 175 u32 tx_current; 176 u32 iqrx_current; 177 u32 iqtx_current; 178 179 u32 tx_dirty; 180 int bad_tx_frame; 181 int bad_rx_frame; 182 int rx_needs_refill; 183 184 dma_addr_t tx_fd_dma; 185 dma_addr_t rx_fd_dma; 186 dma_addr_t iqtx_dma; 187 dma_addr_t iqrx_dma; 188 189 struct net_device_stats stats; 190 struct timer_list timer; 191 192 struct dscc4_pci_priv *pci_priv; 193 spinlock_t lock; 194 195 int dev_id; 196 u32 flags; 197 u32 timer_help; 198 u32 hi_expected; 199 200 struct hdlc_device_struct hdlc; 201 int usecount; 202}; 203 204/* GLOBAL registers definitions */ 205#define GCMDR 0x00 206#define GSTAR 0x04 207#define GMODE 0x08 208#define IQLENR0 0x0C 209#define IQLENR1 0x10 210#define IQRX0 0x14 211#define IQTX0 0x24 212#define IQCFG 0x3c 213#define FIFOCR1 0x44 214#define FIFOCR2 0x48 215#define FIFOCR3 0x4c 216#define FIFOCR4 0x34 217#define CH0CFG 0x50 218#define CH0BRDA 0x54 219#define CH0BTDA 0x58 220 221/* SCC registers definitions */ 222#define SCC_START 0x0100 223#define SCC_OFFSET 0x80 224#define CMDR 0x00 225#define STAR 0x04 226#define CCR0 0x08 227#define CCR1 0x0c 228#define CCR2 0x10 229#define BRR 0x2C 230#define RLCR 0x40 231#define IMR 0x54 232#define ISR 0x58 233 234/* Bit masks */ 235#define IntRxScc0 0x10000000 236#define IntTxScc0 0x01000000 237 238#define TxPollCmd 0x00000400 239#define RxActivate 0x08000000 240#define MTFi 0x04000000 241#define Rdr 0x00400000 242#define Rdt 0x00200000 243#define Idr 0x00100000 244#define Idt 0x00080000 245#define TxSccRes 0x01000000 246#define RxSccRes 0x00010000 247#define TxSizeMax 0x1ffc 248#define RxSizeMax 0x1ffc 249 250#define Ccr0ClockMask 0x0000003f 251#define Ccr1LoopMask 0x00000200 252#define BrrExpMask 0x00000f00 253#define BrrMultMask 0x0000003f 254#define EncodingMask 0x00700000 255#define Hold 0x40000000 256#define SccBusy 0x10000000 257#define FrameOk (FrameVfr | FrameCrc) 258#define FrameVfr 0x80 259#define FrameRdo 0x40 260#define FrameCrc 0x20 261#define FrameAborted 0x00000200 262#define FrameEnd 0x80000000 263#define DataComplete 0x40000000 264#define LengthCheck 0x00008000 265#define SccEvt 0x02000000 266#define NoAck 0x00000200 267#define Action 0x00000001 268#define HiDesc 0x20000000 269 270/* SCC events */ 271#define RxEvt 0xf0000000 272#define TxEvt 0x0f000000 273#define Alls 0x00040000 274#define Xdu 0x00010000 275#define Xmr 0x00002000 276#define Xpr 0x00001000 277#define Rdo 0x00000080 278#define Rfs 0x00000040 279#define Rfo 0x00000002 280#define Flex 0x00000001 281 282/* DMA core events */ 283#define Cfg 0x00200000 284#define Hi 0x00040000 285#define Fi 0x00020000 286#define Err 0x00010000 287#define Arf 0x00000002 288#define ArAck 0x00000001 289 290/* Misc */ 291#define NeedIDR 0x00000001 292#define NeedIDT 0x00000002 293#define RdoSet 0x00000004 294 295/* Functions prototypes */ 296static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *, struct net_device *); 297static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *, struct net_device *); 298static int dscc4_found1(struct pci_dev *, unsigned long ioaddr); 299static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent); 300static int dscc4_open(struct net_device *); 301static int dscc4_start_xmit(struct sk_buff *, struct net_device *); 302static int dscc4_close(struct net_device *); 303static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 304static int dscc4_change_mtu(struct net_device *dev, int mtu); 305static int dscc4_init_ring(struct net_device *); 306static void dscc4_release_ring(struct dscc4_dev_priv *); 307static void dscc4_timer(unsigned long); 308static void dscc4_tx_timeout(struct net_device *); 309static void dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs); 310static struct net_device_stats *dscc4_get_stats(struct net_device *); 311static int dscc4_attach_hdlc_device(struct net_device *); 312static void dscc4_unattach_hdlc_device(struct net_device *); 313static int dscc4_hdlc_open(struct hdlc_device_struct *); 314static void dscc4_hdlc_close(struct hdlc_device_struct *); 315static int dscc4_hdlc_ioctl(struct hdlc_device_struct *, struct ifreq *, int); 316static int dscc4_hdlc_xmit(hdlc_device *, struct sk_buff *); 317#ifdef EXPERIMENTAL_POLLING 318static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *); 319#endif 320 321void inline reset_TxFD(struct TxFD *tx_fd) { 322 tx_fd->state = FrameEnd | Hold | 0x00100000; 323 tx_fd->complete = 0x00000000; 324} 325 326void inline dscc4_release_ring_skbuff(struct sk_buff **p, int n) 327{ 328 for(; n > 0; n--) { 329 if (*p) 330 dev_kfree_skb(*p); 331 p++; 332 } 333} 334 335static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) 336{ 337 struct pci_dev *pdev = dpriv->pci_priv->pdev; 338 339 pci_free_consistent(pdev, TX_RING_SIZE*sizeof(struct TxFD), 340 dpriv->tx_fd, dpriv->tx_fd_dma); 341 pci_free_consistent(pdev, RX_RING_SIZE*sizeof(struct RxFD), 342 dpriv->rx_fd, dpriv->rx_fd_dma); 343 dscc4_release_ring_skbuff(dpriv->tx_skbuff, TX_RING_SIZE); 344 dscc4_release_ring_skbuff(dpriv->rx_skbuff, RX_RING_SIZE); 345} 346 347void inline try_get_rx_skb(struct dscc4_dev_priv *priv, int cur, struct net_device *dev) 348{ 349 struct sk_buff *skb; 350 351 skb = dev_alloc_skb(RX_MAX(HDLC_MAX_MRU+2)); 352 priv->rx_skbuff[cur] = skb; 353 if (!skb) { 354 priv->rx_fd[cur--].data = (u32) NULL; 355 priv->rx_fd[cur%RX_RING_SIZE].state1 |= Hold; 356 priv->rx_needs_refill++; 357 return; 358 } 359 skb->dev = dev; 360 skb->protocol = htons(ETH_P_IP); 361 skb->mac.raw = skb->data; 362 priv->rx_fd[cur].data = pci_map_single(priv->pci_priv->pdev, skb->data, 363 skb->len, PCI_DMA_FROMDEVICE); 364} 365 366/* 367 * IRQ/thread/whatever safe 368 */ 369static int dscc4_wait_ack_cec(u32 ioaddr, struct net_device *dev, char *msg) 370{ 371 s16 i = 0; 372 373 while (readl(ioaddr + STAR) & SccBusy) { 374 if (i++ < 0) { 375 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg); 376 return -1; 377 } 378 } 379 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, msg, i); 380 return 0; 381} 382 383static int dscc4_do_action(struct net_device *dev, char *msg) 384{ 385 unsigned long ioaddr = dev->base_addr; 386 u32 state; 387 s16 i; 388 389 writel(Action, ioaddr + GCMDR); 390 ioaddr += GSTAR; 391 for (i = 0; i >= 0; i++) { 392 state = readl(ioaddr); 393 if (state & Arf) { 394 printk(KERN_ERR "%s: %s failed\n", dev->name, msg); 395 writel(Arf, ioaddr); 396 return -1; 397 } else if (state & ArAck) { 398 printk(KERN_DEBUG "%s: %s ack (%d try)\n", 399 dev->name, msg, i); 400 writel(ArAck, ioaddr); 401 return 0; 402 } 403 } 404 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg); 405 return -1; 406} 407 408static __inline__ int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) 409{ 410 int cur; 411 s16 i; 412 413 cur = dpriv->iqtx_current%IRQ_RING_SIZE; 414 for (i = 0; i >= 0; i++) { 415 if (!(dpriv->flags & (NeedIDR | NeedIDT)) || 416 (dpriv->iqtx[cur] & Xpr)) 417 return 0; 418 } 419 printk(KERN_ERR "%s: %s timeout\n", "dscc4", "XPR"); 420 return -1; 421} 422 423static __inline__ void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur, 424 struct RxFD *rx_fd, struct net_device *dev) 425{ 426 struct pci_dev *pdev = dpriv->pci_priv->pdev; 427 struct sk_buff *skb; 428 int pkt_len; 429 430 skb = dpriv->rx_skbuff[cur]; 431 pkt_len = TO_SIZE(rx_fd->state2) - 1; 432 pci_dma_sync_single(pdev, rx_fd->data, pkt_len + 1, PCI_DMA_FROMDEVICE); 433 if((skb->data[pkt_len] & FrameOk) == FrameOk) { 434 pci_unmap_single(pdev, rx_fd->data, skb->len, PCI_DMA_FROMDEVICE); 435 dpriv->stats.rx_packets++; 436 dpriv->stats.rx_bytes += pkt_len; 437 skb->tail += pkt_len; 438 skb->len = pkt_len; 439 if (netif_running(hdlc_to_dev(&dpriv->hdlc))) 440 hdlc_netif_rx(&dpriv->hdlc, skb); 441 else 442 netif_rx(skb); 443 try_get_rx_skb(dpriv, cur, dev); 444 } else { 445 if(skb->data[pkt_len] & FrameRdo) 446 dpriv->stats.rx_fifo_errors++; 447 else if(!(skb->data[pkt_len] | ~FrameCrc)) 448 dpriv->stats.rx_crc_errors++; 449 else if(!(skb->data[pkt_len] | ~FrameVfr)) 450 dpriv->stats.rx_length_errors++; 451 else 452 dpriv->stats.rx_errors++; 453 } 454 rx_fd->state1 |= Hold; 455 rx_fd->state2 = 0x00000000; 456 rx_fd->end = 0xbabeface; 457 if (!rx_fd->data) 458 return; 459 rx_fd--; 460 if (!cur) 461 rx_fd += RX_RING_SIZE; 462 rx_fd->state1 &= ~Hold; 463} 464 465static int __init dscc4_init_one (struct pci_dev *pdev, 466 const struct pci_device_id *ent) 467{ 468 struct dscc4_pci_priv *priv; 469 struct dscc4_dev_priv *dpriv; 470 int i; 471 static int cards_found = 0; 472 unsigned long ioaddr; 473 474 printk(KERN_DEBUG "%s", version); 475 476 if (pci_enable_device(pdev)) 477 goto err_out; 478 if (!request_mem_region(pci_resource_start(pdev, 0), 479 pci_resource_len(pdev, 0), "registers")) { 480 printk (KERN_ERR "dscc4: can't reserve MMIO region (regs)\n"); 481 goto err_out; 482 } 483 if (!request_mem_region(pci_resource_start(pdev, 1), 484 pci_resource_len(pdev, 1), "LBI interface")) { 485 printk (KERN_ERR "dscc4: can't reserve MMIO region (lbi)\n"); 486 goto err_out_free_mmio_region0; 487 } 488 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0), 489 pci_resource_len(pdev, 0)); 490 if (!ioaddr) { 491 printk(KERN_ERR "dscc4: cannot remap MMIO region %lx @ %lx\n", 492 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0)); 493 goto err_out_free_mmio_region; 494 } 495 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d.\n", 496 pci_resource_start(pdev, 0), 497 pci_resource_start(pdev, 1), pdev->irq); 498 499 /* High PCI latency useless. Cf app. note. */ 500 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x10); 501 pci_set_master(pdev); 502 503 if (dscc4_found1(pdev, ioaddr)) 504 goto err_out_iounmap; 505 506 priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev); 507 508 if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, "dscc4", priv->root)) { 509 printk(KERN_WARNING "dscc4: IRQ %d is busy\n", pdev->irq); 510 goto err_out_iounmap; 511 } 512 priv->pdev = pdev; 513 514 /* power up/little endian/dma core controlled via hold bit */ 515 writel(0x00000000, ioaddr + GMODE); 516 /* Shared interrupt queue */ 517 { 518 u32 bits; 519 520 bits = (IRQ_RING_SIZE >> 5) - 1; 521 bits |= bits << 4; 522 bits |= bits << 8; 523 bits |= bits << 16; 524 writel(bits, ioaddr + IQLENR0); 525 } 526 /* Global interrupt queue */ 527 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); 528 priv->iqcfg = (u32 *) pci_alloc_consistent(pdev, 529 IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma); 530 if (!priv->iqcfg) 531 goto err_out_free_irq; 532 writel(priv->iqcfg_dma, ioaddr + IQCFG); 533 534 /* 535 * SCC 0-3 private rx/tx irq structures 536 * IQRX/TXi needs to be set soon. Learned it the hard way... 537 */ 538 for(i = 0; i < dev_per_card; i++) { 539 dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv; 540 dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev, 541 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); 542 if (!dpriv->iqtx) 543 goto err_out_free_iqtx; 544 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); 545 } 546 for(i = 0; i < dev_per_card; i++) { 547 dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv; 548 dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev, 549 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); 550 if (!dpriv->iqrx) 551 goto err_out_free_iqrx; 552 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); 553 } 554 555 /* 556 * Cf application hint. Beware of hard-lock condition on 557 * threshold . 558 */ 559 writel(0x42104000, ioaddr + FIFOCR1); 560 //writel(0x9ce69800, ioaddr + FIFOCR2); 561 writel(0xdef6d800, ioaddr + FIFOCR2); 562 //writel(0x11111111, ioaddr + FIFOCR4); 563 writel(0x18181818, ioaddr + FIFOCR4); 564 // FIXME: should depend on the chipset revision 565 writel(0x0000000e, ioaddr + FIFOCR3); 566 567 writel(0xff200001, ioaddr + GCMDR); 568 569 cards_found++; 570 return 0; 571 572err_out_free_iqrx: 573 while (--i >= 0) { 574 dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv; 575 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 576 dpriv->iqrx, dpriv->iqrx_dma); 577 } 578 i = dev_per_card; 579err_out_free_iqtx: 580 while (--i >= 0) { 581 dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv; 582 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 583 dpriv->iqtx, dpriv->iqtx_dma); 584 } 585 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, 586 priv->iqcfg_dma); 587err_out_free_irq: 588 free_irq(pdev->irq, priv->root); 589err_out_iounmap: 590 iounmap ((void *)ioaddr); 591err_out_free_mmio_region: 592 release_mem_region(pci_resource_start(pdev, 1), 593 pci_resource_len(pdev, 1)); 594err_out_free_mmio_region0: 595 release_mem_region(pci_resource_start(pdev, 0), 596 pci_resource_len(pdev, 0)); 597err_out: 598 return -ENODEV; 599}; 600 601static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr) 602{ 603 struct dscc4_pci_priv *ppriv; 604 struct dscc4_dev_priv *dpriv; 605 struct net_device *dev; 606 int i = 0; 607 608 dpriv = (struct dscc4_dev_priv *) 609 kmalloc(dev_per_card*sizeof(struct dscc4_dev_priv), GFP_KERNEL); 610 if (!dpriv) { 611 printk(KERN_ERR "dscc4: can't allocate data\n"); 612 goto err_out; 613 } 614 memset(dpriv, 0, dev_per_card*sizeof(struct dscc4_dev_priv)); 615 616 dev = (struct net_device *) 617 kmalloc(dev_per_card*sizeof(struct net_device), GFP_KERNEL); 618 if (!dev) { 619 printk(KERN_ERR "dscc4: can't allocate net_device\n"); 620 goto err_dealloc_priv; 621 } 622 memset(dev, 0, dev_per_card*sizeof(struct net_device)); 623 624 ppriv = (struct dscc4_pci_priv *) 625 kmalloc(sizeof(struct dscc4_pci_priv), GFP_KERNEL); 626 if (!ppriv) { 627 printk(KERN_ERR "dscc4: can't allocate pci private data.\n"); 628 goto err_dealloc_dev; 629 } 630 memset(ppriv, 0, sizeof(struct dscc4_pci_priv)); 631 632 for (i = 0; i < dev_per_card; i++) { 633 struct dscc4_dev_priv *p; 634 struct net_device *d; 635 636 d = dev + i; 637 d->base_addr = ioaddr; 638 d->init = NULL; 639 d->irq = pdev->irq; 640 /* The card adds the crc */ 641 d->type = ARPHRD_RAWHDLC; 642 d->open = dscc4_open; 643 d->stop = dscc4_close; 644 d->hard_start_xmit = dscc4_start_xmit; 645 d->set_multicast_list = NULL; 646 d->do_ioctl = dscc4_ioctl; 647 d->get_stats = dscc4_get_stats; 648 d->change_mtu = dscc4_change_mtu; 649 d->mtu = HDLC_MAX_MTU; 650 d->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP; 651 d->tx_timeout = dscc4_tx_timeout; 652 d->watchdog_timeo = TX_TIMEOUT; 653 654 p = dpriv + i; 655 p->dev_id = i; 656 p->pci_priv = ppriv; 657 spin_lock_init(&p->lock); 658 d->priv = p; 659 660 if (dev_alloc_name(d, "scc%d")<0) { 661 printk(KERN_ERR "dev_alloc_name failed for scc.\n"); 662 goto err_dealloc_dev; 663 } 664 if (register_netdev(d)) { 665 printk(KERN_ERR "%s: register_netdev != 0.\n", d->name); 666 goto err_dealloc_dev; 667 } 668 dscc4_attach_hdlc_device(d); 669 SET_MODULE_OWNER(d); 670 } 671 ppriv->root = dev; 672 ppriv->pdev = pdev; 673 spin_lock_init(&ppriv->lock); 674 pci_set_drvdata(pdev, ppriv); 675 return 0; 676 677err_dealloc_dev: 678 while (--i >= 0) 679 unregister_netdev(dev + i); 680 kfree(dev); 681err_dealloc_priv: 682 kfree(dpriv); 683err_out: 684 return -1; 685}; 686 687static void dscc4_timer(unsigned long data) 688{ 689 struct net_device *dev = (struct net_device *)data; 690 struct dscc4_dev_priv *dpriv; 691 struct dscc4_pci_priv *ppriv; 692 693 dpriv = dev->priv; 694 if (netif_queue_stopped(dev) && 695 ((jiffies - dev->trans_start) > TX_TIMEOUT)) { 696 ppriv = dpriv->pci_priv; 697 if (dpriv->iqtx[dpriv->iqtx_current%IRQ_RING_SIZE]) { 698 u32 flags; 699 700 printk(KERN_DEBUG "%s: pending events\n", dev->name); 701 dev->trans_start = jiffies; 702 spin_lock_irqsave(&ppriv->lock, flags); 703 dscc4_tx_irq(ppriv, dev); 704 spin_unlock_irqrestore(&ppriv->lock, flags); 705 } else { 706 struct TxFD *tx_fd; 707 struct sk_buff *skb; 708 int i,j; 709 710 printk(KERN_DEBUG "%s: missing events\n", dev->name); 711 i = dpriv->tx_dirty%TX_RING_SIZE; 712 j = dpriv->tx_current - dpriv->tx_dirty; 713 dpriv->stats.tx_dropped += j; 714 while(j--) { 715 skb = dpriv->tx_skbuff[i]; 716 tx_fd = dpriv->tx_fd + i; 717 if (skb) { 718 dpriv->tx_skbuff[i] = NULL; 719 pci_unmap_single(ppriv->pdev, tx_fd->data, skb->len, 720 PCI_DMA_TODEVICE); 721 dev_kfree_skb_irq(skb); 722 } else 723 printk(KERN_INFO "%s: hardware on drugs!\n", dev->name); 724 tx_fd->data = 0; /* DEBUG */ 725 tx_fd->complete &= ~DataComplete; 726 i++; 727 i %= TX_RING_SIZE; 728 } 729 dpriv->tx_dirty = dpriv->tx_current; 730 dev->trans_start = jiffies; 731 netif_wake_queue(dev); 732 printk(KERN_DEBUG "%s: re-enabled\n", dev->name); 733 } 734 } 735 dpriv->timer.expires = jiffies + TX_TIMEOUT; 736 add_timer(&dpriv->timer); 737} 738 739static void dscc4_tx_timeout(struct net_device *dev) 740{ 741}; 742 743static int dscc4_open(struct net_device *dev) 744{ 745 struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv; 746 struct dscc4_pci_priv *ppriv; 747 u32 ioaddr = 0; 748 749 MOD_INC_USE_COUNT; 750 751 ppriv = dpriv->pci_priv; 752 753 if (dscc4_init_ring(dev)) 754 goto err_out; 755 756 ioaddr = dev->base_addr + SCC_REG_START(dpriv->dev_id); 757 758 writel(readl(ioaddr + CCR0) | 0x80001000, ioaddr + CCR0); 759 760 writel(LengthCheck | (HDLC_MAX_MRU >> 5), ioaddr + RLCR); 761 762 /* no address recognition/crc-CCITT/cts enabled */ 763 writel(readl(ioaddr + CCR1) | 0x021c8000, ioaddr + CCR1); 764 765 /* Ccr2.Rac = 0 */ 766 writel(0x00050008 & ~RxActivate, ioaddr + CCR2); 767 768#ifdef EXPERIMENTAL_POLLING 769 writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */ 770#else 771 /* Don't mask RDO. Ever. */ 772 //writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */ 773 writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */ 774#endif 775 /* IDT+IDR during XPR */ 776 dpriv->flags = NeedIDR | NeedIDT; 777 778 /* 779 * The following is a bit paranoid... 780 * 781 * NB: the datasheet "...CEC will stay active if the SCC is in 782 * power-down mode or..." and CCR2.RAC = 1 are two different 783 * situations. 784 */ 785 if (readl(ioaddr + STAR) & SccBusy) { 786 printk(KERN_ERR "%s busy. Try later\n", dev->name); 787 goto err_free_ring; 788 } 789 writel(TxSccRes | RxSccRes, ioaddr + CMDR); 790 791 /* ... the following isn't */ 792 if (dscc4_wait_ack_cec(ioaddr, dev, "Cec")) 793 goto err_free_ring; 794 795 /* 796 * I would expect XPR near CE completion (before ? after ?). 797 * At worst, this code won't see a late XPR and people 798 * will have to re-issue an ifconfig (this is harmless). 799 * WARNING, a really missing XPR usually means a hardware 800 * reset is needed. Suggestions anyone ? 801 */ 802 if (dscc4_xpr_ack(dpriv)) 803 goto err_free_ring; 804 805 netif_start_queue(dev); 806 807 init_timer(&dpriv->timer); 808 dpriv->timer.expires = jiffies + 10*HZ; 809 dpriv->timer.data = (unsigned long)dev; 810 dpriv->timer.function = &dscc4_timer; 811 add_timer(&dpriv->timer); 812 netif_carrier_on(dev); 813 814 return 0; 815 816err_free_ring: 817 dscc4_release_ring(dpriv); 818err_out: 819 MOD_DEC_USE_COUNT; 820 return -EAGAIN; 821} 822 823#ifdef EXPERIMENTAL_POLLING 824static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) 825{ 826} 827#endif /* EXPERIMENTAL_POLLING */ 828 829static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev) 830{ 831 struct dscc4_dev_priv *dpriv = dev->priv; 832 struct dscc4_pci_priv *ppriv; 833 struct TxFD *tx_fd; 834 int cur, next; 835 836 ppriv = dpriv->pci_priv; 837 cur = dpriv->tx_current++%TX_RING_SIZE; 838 next = dpriv->tx_current%TX_RING_SIZE; 839 dpriv->tx_skbuff[next] = skb; 840 tx_fd = dpriv->tx_fd + next; 841 tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len & TxSizeMax); 842 tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len, 843 PCI_DMA_TODEVICE); 844 tx_fd->complete = 0x00000000; 845 mb(); // FIXME: suppress ? 846 847#ifdef EXPERIMENTAL_POLLING 848 spin_lock(&dpriv->lock); 849 while(dscc4_tx_poll(dpriv, dev)); 850 spin_unlock(&dpriv->lock); 851#endif 852 /* 853 * I know there's a window for a race in the following lines but 854 * dscc4_timer will take good care of it. The chipset eats events 855 * (especially the net_dev re-enabling ones) thus there is no 856 * reason to try and be smart. 857 */ 858 if ((dpriv->tx_dirty + 16) < dpriv->tx_current) { 859 netif_stop_queue(dev); 860 dpriv->hi_expected = 2; 861 } 862 tx_fd = dpriv->tx_fd + cur; 863 tx_fd->state &= ~Hold; 864 mb(); // FIXME: suppress ? 865 866 /* 867 * One may avoid some pci transactions during intense TX periods. 868 * Not sure it's worth the pain... 869 */ 870 writel((TxPollCmd << dpriv->dev_id) | NoAck, dev->base_addr + GCMDR); 871 dev->trans_start = jiffies; 872 return 0; 873} 874 875static int dscc4_close(struct net_device *dev) 876{ 877 struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv; 878 u32 ioaddr = dev->base_addr; 879 int dev_id; 880 881 del_timer_sync(&dpriv->timer); 882 netif_stop_queue(dev); 883 884 dev_id = dpriv->dev_id; 885 886 writel(0x00050000, ioaddr + SCC_REG_START(dev_id) + CCR2); 887 writel(MTFi|Rdr|Rdt, ioaddr + CH0CFG + dev_id*0x0c); /* Reset Rx/Tx */ 888 writel(0x00000001, ioaddr + GCMDR); 889 890 dscc4_release_ring(dpriv); 891 892 MOD_DEC_USE_COUNT; 893 return 0; 894} 895 896static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state) 897{ 898 struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv; 899 u32 brr; 900 901 *state &= ~Ccr0ClockMask; 902 if (*bps) { /* DCE */ 903 u32 n = 0, m = 0, divider; 904 int xtal; 905 906 xtal = dpriv->pci_priv->xtal_hz; 907 if (!xtal) 908 return -1; 909 divider = xtal / *bps; 910 if (divider > BRR_DIVIDER_MAX) { 911 divider >>= 4; 912 *state |= 0x00000036; /* Clock mode 6b (BRG/16) */ 913 } else 914 *state |= 0x00000037; /* Clock mode 7b (BRG) */ 915 if (divider >> 22) { 916 n = 63; 917 m = 15; 918 } else if (divider) { 919 /* Extraction of the 6 highest weighted bits */ 920 m = 0; 921 while (0xffffffc0 & divider) { 922 m++; 923 divider >>= 1; 924 } 925 n = divider; 926 } 927 brr = (m << 8) | n; 928 divider = n << m; 929 if (!(*state & 0x00000001)) /* Clock mode 6b */ 930 divider <<= 4; 931 *bps = xtal / divider; 932 } else { /* DTE */ 933 /* 934 * "state" already reflects Clock mode 0a. 935 * Nothing more to be done 936 */ 937 brr = 0; 938 } 939 writel(brr, dev->base_addr + BRR + SCC_REG_START(dpriv->dev_id)); 940 941 return 0; 942} 943 944#ifdef LATER_PLEASE 945/* 946 * -*- [RFC] Configuring Synchronous Interfaces in Linux -*- 947 */ 948 949// FIXME: MEDIA already defined in linux/hdlc.h 950#define HDLC_MEDIA_V35 0 951#define HDLC_MEDIA_RS232 1 952#define HDLC_MEDIA_X21 2 953#define HDLC_MEDIA_E1 3 954#define HDLC_MEDIA_HSSI 4 955 956#define HDLC_CODING_NRZ 0 957#define HDLC_CODING_NRZI 1 958#define HDLC_CODING_FM0 2 959#define HDLC_CODING_FM1 3 960#define HDLC_CODING_MANCHESTER 4 961 962#define HDLC_CRC_NONE 0 963#define HDLC_CRC_16 1 964#define HDLC_CRC_32 2 965#define HDLC_CRC_CCITT 3 966 967/* RFC: add the crc reset value ? */ 968struct hdlc_physical { 969 u8 media; 970 u8 coding; 971 u32 rate; 972 u8 crc; 973 u8 crc_siz; /* 2 or 4 bytes */ 974 u8 shared_flags; /* Discouraged on the DSCC4 */ 975}; 976 977// FIXME: PROTO already defined in linux/hdlc.h 978#define HDLC_PROTO_RAW 0 979#define HDLC_PROTO_FR 1 980#define HDLC_PROTO_X25 2 981#define HDLC_PROTO_PPP 3 982#define HDLC_PROTO_CHDLC 4 983 984struct hdlc_protocol { 985 u8 proto; 986 987 union { 988 } u; 989}; 990 991struct screq { 992 u16 media_group; 993 994 union { 995 struct hdlc_physical hdlc_phy; 996 struct hdlc_protocol hdlc_proto; 997 } u; 998}; 999 1000// FIXME: go sub-module 1001static struct { 1002 u16 coding; 1003 u16 bits; 1004} map[] = { 1005 {HDLC_CODING_NRZ, 0x00}, 1006 {HDLC_CODING_NRZI, 0x20}, 1007 {HDLC_CODING_FM0, 0x40}, 1008 {HDLC_CODING_FM1, 0x50}, 1009 {HDLC_CODING_MANCHESTER, 0x60}, 1010 {65535, 0x00} 1011}; 1012#endif /* LATER_PLEASE */ 1013 1014static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1015{ 1016 struct dscc4_dev_priv *dpriv = dev->priv; 1017 u32 state, ioaddr; 1018 1019 if (dev->flags & IFF_UP) 1020 return -EBUSY; 1021 1022 switch (cmd) { 1023 /* Set built-in quartz frequency */ 1024 case SIOCDEVPRIVATE: { 1025 u32 hz; 1026 1027 hz = ifr->ifr_ifru.ifru_ivalue; 1028 if (hz >= 33000000) /* 33 MHz */ 1029 return -EOPNOTSUPP; 1030 dpriv->pci_priv->xtal_hz = hz; 1031 return 0; 1032 } 1033 /* Set/unset loopback */ 1034 case SIOCDEVPRIVATE+1: { 1035 u32 flags; 1036 1037 ioaddr = dev->base_addr + CCR1 + 1038 SCC_REG_START(dpriv->dev_id); 1039 state = readl(ioaddr); 1040 flags = ifr->ifr_ifru.ifru_ivalue; 1041 if (flags & 0x00000001) { 1042 printk(KERN_DEBUG "%s: loopback\n", dev->name); 1043 state |= 0x00000100; 1044 } else { 1045 printk(KERN_DEBUG "%s: normal\n", dev->name); 1046 state &= ~0x00000100; 1047 } 1048 writel(state, ioaddr); 1049 return 0; 1050 } 1051 1052#ifdef LATER_PLEASE 1053 case SIOCDEVPRIVATE+2: { 1054 { 1055 struct screq scr; 1056 1057 err = copy_from_user(&scr, ifr->ifr_ifru.ifru_data, sizeof(struct screq)); 1058 if (err) 1059 return err; 1060 do { 1061 if (scr.u.hdlc_phy.coding == map[i].coding) 1062 break; 1063 } while (map[++i].coding != 65535); 1064 if (!map[i].coding) 1065 return -EOPNOTSUPP; 1066 1067 ioaddr = dev->base_addr + CCR0 + 1068 SCC_REG_START(dpriv->dev_id); 1069 state = readl(ioaddr) & ~EncodingMask; 1070 state |= (u32)map[i].bits << 16; 1071 writel(state, ioaddr); 1072 printk("state: %08x\n", state); /* DEBUG */ 1073 return 0; 1074 } 1075 case SIOCDEVPRIVATE+3: { 1076 struct screq *scr = (struct screq *)ifr->ifr_ifru.ifru_data; 1077 1078 ioaddr = dev->base_addr + CCR0 + 1079 SCC_REG_START(dpriv->dev_id); 1080 state = (readl(ioaddr) & EncodingMask) >> 16; 1081 do { 1082 if (state == map[i].bits) 1083 break; 1084 } while (map[++i].coding); 1085 return put_user(map[i].coding, (u16 *)scr->u.hdlc_phy.coding); 1086 } 1087#endif /* LATER_PLEASE */ 1088 1089 case HDLCSCLOCKRATE: 1090 { 1091 u32 state, bps; 1092 1093 bps = ifr->ifr_ifru.ifru_ivalue; 1094 ioaddr = dev->base_addr + CCR0 + 1095 SCC_REG_START(dpriv->dev_id); 1096 state = readl(ioaddr); 1097 if(dscc4_set_clock(dev, &bps, &state) < 0) 1098 return -EOPNOTSUPP; 1099 if (bps) { /* DCE */ 1100 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", 1101 dev->name); 1102 ifr->ifr_ifru.ifru_ivalue = bps; 1103 } else { /* DTE */ 1104 state = 0x80001000; 1105 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", 1106 dev->name); 1107 } 1108 writel(state, ioaddr); 1109 return 0; 1110 } 1111 case HDLCGCLOCKRATE: { 1112 u32 brr; 1113 int bps; 1114 1115 brr = readl(dev->base_addr + BRR + 1116 SCC_REG_START(dpriv->dev_id)); 1117 bps = dpriv->pci_priv->xtal_hz >> (brr >> 8); 1118 bps /= (brr & 0x3f) + 1; 1119 ifr->ifr_ifru.ifru_ivalue = bps; 1120 return 0; 1121 } 1122 1123 default: 1124 return -EOPNOTSUPP; 1125 } 1126} 1127 1128static int dscc4_change_mtu(struct net_device *dev, int mtu) 1129{ 1130 if ((mtu <= 3) || (mtu > 65531)) 1131 return -EINVAL; 1132 if(dev->flags & IFF_UP) 1133 return -EBUSY; 1134 dev->mtu = mtu; 1135 return(0); 1136} 1137 1138static void dscc4_irq(int irq, void *dev_instance, struct pt_regs *ptregs) 1139{ 1140 struct net_device *dev = dev_instance; 1141 struct dscc4_pci_priv *priv; 1142 u32 ioaddr, state; 1143 unsigned long flags; 1144 int i; 1145 1146 priv = ((struct dscc4_dev_priv *)dev->priv)->pci_priv; 1147 spin_lock_irqsave(&priv->lock, flags); 1148 1149 ioaddr = dev->base_addr; 1150 1151 state = readl(ioaddr + GSTAR); 1152 if (!state) 1153 goto out; 1154 writel(state, ioaddr + GSTAR); 1155 1156 if (state & Arf) { 1157 printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n", 1158 dev->name); 1159 goto out; 1160 } 1161 state &= ~ArAck; 1162 if (state & Cfg) { 1163 if (debug) 1164 printk(KERN_DEBUG "CfgIV\n"); 1165 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf) 1166 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG"); 1167 if (!(state &= ~Cfg)) 1168 goto out; 1169 } 1170 if (state & RxEvt) { 1171 i = dev_per_card - 1; 1172 do { 1173 dscc4_rx_irq(priv, dev + i); 1174 } while (--i >= 0); 1175 state &= ~RxEvt; 1176 } 1177 if (state & TxEvt) { 1178 i = dev_per_card - 1; 1179 do { 1180 dscc4_tx_irq(priv, dev + i); 1181 } while (--i >= 0); 1182 state &= ~TxEvt; 1183 } 1184out: 1185 spin_unlock_irqrestore(&priv->lock, flags); 1186} 1187 1188static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, 1189 struct net_device *dev) 1190{ 1191 struct dscc4_dev_priv *dpriv = dev->priv; 1192 u32 state; 1193 int cur, loop = 0; 1194 1195try: 1196 cur = dpriv->iqtx_current%IRQ_RING_SIZE; 1197 state = dpriv->iqtx[cur]; 1198 if (!state) { 1199#ifdef DEBUG 1200 if (loop > 1) 1201 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); 1202#endif 1203 if (loop && netif_queue_stopped(dev)) 1204 if ((dpriv->tx_dirty + 8) >= dpriv->tx_current) 1205 netif_wake_queue(dev); 1206 return; 1207 } 1208 loop++; 1209 dpriv->iqtx[cur] = 0; 1210 dpriv->iqtx_current++; 1211 1212#ifdef DEBUG_PARANOID 1213 if (SOURCE_ID(state) != dpriv->dev_id) { 1214 printk(KERN_DEBUG "%s (Tx): Source Id=%d, state=%08x\n", 1215 dev->name, SOURCE_ID(state), state ); 1216 return; 1217 } 1218 if (state & 0x0df80c00) { 1219 printk(KERN_DEBUG "%s (Tx): state=%08x (UFO alert)\n", 1220 dev->name, state); 1221 return; 1222 } 1223#endif 1224 // state &= 0x0fffffff; /* Tracking the analyzed bits */ 1225 if (state & SccEvt) { 1226 if (state & Alls) { 1227 struct TxFD *tx_fd; 1228 struct sk_buff *skb; 1229 1230 cur = dpriv->tx_dirty%TX_RING_SIZE; 1231 tx_fd = dpriv->tx_fd + cur; 1232 1233 skb = dpriv->tx_skbuff[cur]; 1234 1235 if (!skb) { 1236 printk(KERN_ERR "%s: NULL skb in tx_irq at index %d\n", dev->name, cur); 1237 goto try; 1238 } 1239 dpriv->tx_dirty++; // MUST be after skb test 1240 1241 /* Happens sometime. Don't know what triggers it */ 1242 if (!(tx_fd->complete & DataComplete)) { 1243 u32 ioaddr, isr; 1244 1245 ioaddr = dev->base_addr + 1246 SCC_REG_START(dpriv->dev_id) + ISR; 1247 isr = readl(ioaddr); 1248 printk(KERN_DEBUG 1249 "%s: DataComplete=0 cur=%d isr=%08x state=%08x\n", 1250 dev->name, cur, isr, state); 1251 writel(isr, ioaddr); 1252 dpriv->stats.tx_dropped++; 1253 } else { 1254 tx_fd->complete &= ~DataComplete; 1255 if (tx_fd->state & FrameEnd) { 1256 dpriv->stats.tx_packets++; 1257 dpriv->stats.tx_bytes += skb->len; 1258 } 1259 } 1260 1261 dpriv->tx_skbuff[cur] = NULL; 1262 pci_unmap_single(ppriv->pdev, tx_fd->data, skb->len, 1263 PCI_DMA_TODEVICE); 1264 tx_fd->data = 0; /* DEBUG */ 1265 dev_kfree_skb_irq(skb); 1266{ // DEBUG 1267 cur = (dpriv->tx_dirty-1)%TX_RING_SIZE; 1268 tx_fd = dpriv->tx_fd + cur; 1269 tx_fd->state |= Hold; 1270} 1271 if (!(state &= ~Alls)) 1272 goto try; 1273 } 1274 /* 1275 * Transmit Data Underrun 1276 */ 1277 if (state & Xdu) { 1278 printk(KERN_ERR "dscc4: XDU. Contact maintainer\n"); 1279 dpriv->flags = NeedIDT; 1280 /* Tx reset */ 1281 writel(MTFi | Rdt, 1282 dev->base_addr + 0x0c*dpriv->dev_id + CH0CFG); 1283 writel(0x00000001, dev->base_addr + GCMDR); 1284 return; 1285 } 1286 if (state & Xmr) { 1287 //dscc4_start_xmit(dpriv->tx_skbuff[dpriv->tx_dirty], dev); 1288 if (!(state &= ~0x00002000)) /* DEBUG */ 1289 goto try; 1290 } 1291 if (state & Xpr) { 1292 unsigned long ioaddr = dev->base_addr; 1293 unsigned long scc_offset; 1294 u32 scc_addr; 1295 1296 scc_offset = ioaddr + SCC_REG_START(dpriv->dev_id); 1297 scc_addr = ioaddr + 0x0c*dpriv->dev_id; 1298 if (readl(scc_offset + STAR) & SccBusy) 1299 printk(KERN_DEBUG "%s busy. Fatal\n", 1300 dev->name); 1301 /* 1302 * Keep this order: IDT before IDR 1303 */ 1304 if (dpriv->flags & NeedIDT) { 1305 writel(MTFi | Idt, scc_addr + CH0CFG); 1306 writel(dpriv->tx_fd_dma + 1307 (dpriv->tx_dirty%TX_RING_SIZE)* 1308 sizeof(struct TxFD), scc_addr + CH0BTDA); 1309 if(dscc4_do_action(dev, "IDT")) 1310 goto err_xpr; 1311 dpriv->flags &= ~NeedIDT; 1312 mb(); 1313 } 1314 if (dpriv->flags & NeedIDR) { 1315 writel(MTFi | Idr, scc_addr + CH0CFG); 1316 writel(dpriv->rx_fd_dma + 1317 (dpriv->rx_current%RX_RING_SIZE)* 1318 sizeof(struct RxFD), scc_addr + CH0BRDA); 1319 if(dscc4_do_action(dev, "IDR")) 1320 goto err_xpr; 1321 dpriv->flags &= ~NeedIDR; 1322 mb(); 1323 /* Activate receiver and misc */ 1324 writel(0x08050008, scc_offset + CCR2); 1325 } 1326 err_xpr: 1327 if (!(state &= ~Xpr)) 1328 goto try; 1329 } 1330 } else { /* ! SccEvt */ 1331 if (state & Hi) { 1332#ifdef EXPERIMENTAL_POLLING 1333 while(!dscc4_tx_poll(dpriv, dev)); 1334#endif 1335 state &= ~Hi; 1336 } 1337 if (state & Err) { 1338 printk(KERN_ERR "%s: Tx ERR\n", dev->name); 1339 dpriv->stats.tx_errors++; 1340 state &= ~Err; 1341 } 1342 } 1343 goto try; 1344} 1345 1346static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *priv, struct net_device *dev) 1347{ 1348 struct dscc4_dev_priv *dpriv = dev->priv; 1349 u32 state; 1350 int cur; 1351 1352try: 1353 cur = dpriv->iqrx_current%IRQ_RING_SIZE; 1354 state = dpriv->iqrx[cur]; 1355 if (!state) 1356 return; 1357 dpriv->iqrx[cur] = 0; 1358 dpriv->iqrx_current++; 1359 1360#ifdef DEBUG_PARANOID 1361 if (SOURCE_ID(state) != dpriv->dev_id) { 1362 printk(KERN_DEBUG "%s (Rx): Source Id=%d, state=%08x\n", 1363 dev->name, SOURCE_ID(state), state); 1364 goto try; 1365 } 1366 if (state & 0x0df80c00) { 1367 printk(KERN_DEBUG "%s (Rx): state=%08x (UFO alert)\n", 1368 dev->name, state); 1369 goto try; 1370 } 1371#endif 1372 if (!(state & SccEvt)){ 1373 struct RxFD *rx_fd; 1374 1375 state &= 0x00ffffff; 1376 if (state & Err) { /* Hold or reset */ 1377 printk(KERN_DEBUG "%s (Rx): ERR\n", dev->name); 1378 cur = dpriv->rx_current; 1379 rx_fd = dpriv->rx_fd + cur; 1380 /* 1381 * Presume we're not facing a DMAC receiver reset. 1382 * As We use the rx size-filtering feature of the 1383 * DSCC4, the beginning of a new frame is waiting in 1384 * the rx fifo. I bet a Receive Data Overflow will 1385 * happen most of time but let's try and avoid it. 1386 * Btw (as for RDO) if one experiences ERR whereas 1387 * the system looks rather idle, there may be a 1388 * problem with latency. In this case, increasing 1389 * RX_RING_SIZE may help. 1390 */ 1391 while (dpriv->rx_needs_refill) { 1392 while(!(rx_fd->state1 & Hold)) { 1393 rx_fd++; 1394 cur++; 1395 if (!(cur = cur%RX_RING_SIZE)) 1396 rx_fd = dpriv->rx_fd; 1397 } 1398 dpriv->rx_needs_refill--; 1399 try_get_rx_skb(dpriv, cur, dev); 1400 if (!rx_fd->data) 1401 goto try; 1402 rx_fd->state1 &= ~Hold; 1403 rx_fd->state2 = 0x00000000; 1404 rx_fd->end = 0xbabeface; 1405 } 1406 goto try; 1407 } 1408 if (state & Fi) { 1409 cur = dpriv->rx_current%RX_RING_SIZE; 1410 rx_fd = dpriv->rx_fd + cur; 1411 dscc4_rx_skb(dpriv, cur, rx_fd, dev); 1412 dpriv->rx_current++; 1413 goto try; 1414 } 1415 if (state & Hi ) { /* HI bit */ 1416 state &= ~Hi; 1417 goto try; 1418 } 1419 } else { /* ! SccEvt */ 1420#ifdef DEBUG_PARANOIA 1421 int i; 1422 static struct { 1423 u32 mask; 1424 const char *irq_name; 1425 } evts[] = { 1426 { 0x00008000, "TIN"}, 1427 { 0x00004000, "CSC"}, 1428 { 0x00000020, "RSC"}, 1429 { 0x00000010, "PCE"}, 1430 { 0x00000008, "PLLA"}, 1431 { 0x00000004, "CDSC"}, 1432 { 0, NULL} 1433 }; 1434#endif /* DEBUG_PARANOIA */ 1435 state &= 0x00ffffff; 1436#ifdef DEBUG_PARANOIA 1437 for (i = 0; evts[i].irq_name; i++) { 1438 if (state & evts[i].mask) { 1439 printk(KERN_DEBUG "dscc4(%s): %s\n", 1440 dev->name, evts[i].irq_name); 1441 if (!(state &= ~evts[i].mask)) 1442 goto try; 1443 } 1444 } 1445#endif /* DEBUG_PARANOIA */ 1446 if (state & Rdo) { 1447 u32 ioaddr, scc_offset, scc_addr; 1448 struct RxFD *rx_fd; 1449 int cur; 1450 1451 //if (debug) 1452 // dscc4_rx_dump(dpriv); 1453 ioaddr = dev->base_addr; 1454 scc_addr = ioaddr + 0x0c*dpriv->dev_id; 1455 scc_offset = ioaddr + SCC_REG_START(dpriv->dev_id); 1456 1457 writel(readl(scc_offset + CCR2) & ~RxActivate, 1458 scc_offset + CCR2); 1459 /* 1460 * This has no effect. Why ? 1461 * ORed with TxSccRes, one sees the CFG ack (for 1462 * the TX part only). 1463 */ 1464 writel(RxSccRes, scc_offset + CMDR); 1465 dpriv->flags |= RdoSet; 1466 1467 /* 1468 * Let's try and save something in the received data. 1469 * rx_current must be incremented at least once to 1470 * avoid HOLD in the BRDA-to-be-pointed desc. 1471 */ 1472 do { 1473 cur = dpriv->rx_current++%RX_RING_SIZE; 1474 rx_fd = dpriv->rx_fd + cur; 1475 if (!(rx_fd->state2 & DataComplete)) 1476 break; 1477 if (rx_fd->state2 & FrameAborted) { 1478 dpriv->stats.rx_over_errors++; 1479 rx_fd->state1 |= Hold; 1480 rx_fd->state2 = 0x00000000; 1481 rx_fd->end = 0xbabeface; 1482 } else 1483 dscc4_rx_skb(dpriv, cur, rx_fd, dev); 1484 } while (1); 1485 1486 if (debug) { 1487 if (dpriv->flags & RdoSet) 1488 printk(KERN_DEBUG 1489 "dscc4: no RDO in Rx data\n"); 1490 } 1491#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY 1492 writel(dpriv->rx_fd_dma + 1493 (dpriv->rx_current%RX_RING_SIZE)* 1494 sizeof(struct RxFD), scc_addr + CH0BRDA); 1495 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG); 1496 if(dscc4_do_action(dev, "RDR")) { 1497 printk(KERN_ERR "%s: RDO recovery failed(%s)\n", 1498 dev->name, "RDR"); 1499 goto rdo_end; 1500 } 1501 writel(MTFi|Idr, scc_addr + CH0CFG); 1502 if(dscc4_do_action(dev, "IDR")) { 1503 printk(KERN_ERR "%s: RDO recovery failed(%s)\n", 1504 dev->name, "IDR"); 1505 goto rdo_end; 1506 } 1507 rdo_end: 1508#endif 1509 writel(readl(scc_offset + CCR2) | RxActivate, 1510 scc_offset + CCR2); 1511 goto try; 1512 } 1513 /* These will be used later */ 1514 if (state & Rfs) { 1515 if (!(state &= ~Rfs)) 1516 goto try; 1517 } 1518 if (state & Rfo) { 1519 if (!(state &= ~Rfo)) 1520 goto try; 1521 } 1522 if (state & Flex) { 1523 if (!(state &= ~Flex)) 1524 goto try; 1525 } 1526 } 1527} 1528 1529static int dscc4_init_ring(struct net_device *dev) 1530{ 1531 struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv; 1532 struct TxFD *tx_fd; 1533 struct RxFD *rx_fd; 1534 int i; 1535 1536 tx_fd = (struct TxFD *) pci_alloc_consistent(dpriv->pci_priv->pdev, 1537 TX_RING_SIZE*sizeof(struct TxFD), &dpriv->tx_fd_dma); 1538 if (!tx_fd) 1539 goto err_out; 1540 rx_fd = (struct RxFD *) pci_alloc_consistent(dpriv->pci_priv->pdev, 1541 RX_RING_SIZE*sizeof(struct RxFD), &dpriv->rx_fd_dma); 1542 if (!rx_fd) 1543 goto err_free_dma_tx; 1544 1545 dpriv->tx_fd = tx_fd; 1546 dpriv->rx_fd = rx_fd; 1547 dpriv->rx_current = 0; 1548 dpriv->tx_current = 0; 1549 dpriv->tx_dirty = 0; 1550 1551 /* the dma core of the dscc4 will be locked on the first desc */ 1552 for(i = 0; i < TX_RING_SIZE; ) { 1553 reset_TxFD(tx_fd); 1554 tx_fd->data = dpriv->tx_fd_dma; 1555 dpriv->tx_skbuff[i] = NULL; 1556 i++; 1557 tx_fd->next = (u32)(dpriv->tx_fd_dma + i*sizeof(struct TxFD)); 1558 tx_fd++; 1559 } 1560 (--tx_fd)->next = (u32)dpriv->tx_fd_dma; 1561{ 1562 struct sk_buff *skb; 1563 1564 skb = dev_alloc_skb(32); 1565 if (!skb) 1566 goto err_free_dma_tx; 1567 skb->len = 32; 1568 memset(skb->data, 0xaa, 16); 1569 tx_fd -= (TX_RING_SIZE - 1); 1570 tx_fd->state = 0xc0000000; 1571 tx_fd->state |= ((u32)(skb->len & TxSizeMax)) << 16; 1572 tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, 1573 skb->len, PCI_DMA_TODEVICE); 1574 dpriv->tx_skbuff[0] = skb; 1575} 1576 for (i = 0; i < RX_RING_SIZE;) { 1577 /* size set by the host. Multiple of 4 bytes please */ 1578 rx_fd->state1 = HiDesc; /* Hi, no Hold */ 1579 rx_fd->state2 = 0x00000000; 1580 rx_fd->end = 0xbabeface; 1581 rx_fd->state1 |= ((u32)(HDLC_MAX_MRU & RxSizeMax)) << 16; 1582 try_get_rx_skb(dpriv, i, dev); 1583 i++; 1584 rx_fd->next = (u32)(dpriv->rx_fd_dma + i*sizeof(struct RxFD)); 1585 rx_fd++; 1586 } 1587 (--rx_fd)->next = (u32)dpriv->rx_fd_dma; 1588 rx_fd->state1 |= 0x40000000; /* Hold */ 1589 1590 return 0; 1591 1592err_free_dma_tx: 1593 pci_free_consistent(dpriv->pci_priv->pdev, TX_RING_SIZE*sizeof(*tx_fd), 1594 tx_fd, dpriv->tx_fd_dma); 1595err_out: 1596 return -1; 1597} 1598 1599static struct net_device_stats *dscc4_get_stats(struct net_device *dev) 1600{ 1601 struct dscc4_dev_priv *priv = (struct dscc4_dev_priv *)dev->priv; 1602 1603 return &priv->stats; 1604} 1605 1606static void __exit dscc4_remove_one(struct pci_dev *pdev) 1607{ 1608 struct dscc4_pci_priv *ppriv; 1609 struct net_device *root; 1610 int i; 1611 1612 ppriv = pci_get_drvdata(pdev); 1613 root = ppriv->root; 1614 1615 free_irq(pdev->irq, root); 1616 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, 1617 ppriv->iqcfg_dma); 1618 for (i=0; i < dev_per_card; i++) { 1619 struct dscc4_dev_priv *dpriv; 1620 struct net_device *dev; 1621 1622 dev = ppriv->root + i; 1623 dscc4_unattach_hdlc_device(dev); 1624 1625 dpriv = (struct dscc4_dev_priv *)dev->priv; 1626 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 1627 dpriv->iqrx, dpriv->iqrx_dma); 1628 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 1629 dpriv->iqtx, dpriv->iqtx_dma); 1630 unregister_netdev(dev); 1631 } 1632 kfree(root->priv); 1633 1634 iounmap((void *)root->base_addr); 1635 kfree(root); 1636 1637 kfree(ppriv); 1638 1639 release_mem_region(pci_resource_start(pdev, 1), 1640 pci_resource_len(pdev, 1)); 1641 release_mem_region(pci_resource_start(pdev, 0), 1642 pci_resource_len(pdev, 0)); 1643} 1644 1645static int dscc4_hdlc_ioctl(struct hdlc_device_struct *hdlc, struct ifreq *ifr, int cmd) 1646{ 1647 struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr; 1648 int result; 1649 1650 result = dscc4_ioctl(dev, ifr, cmd); 1651 return result; 1652} 1653 1654static int dscc4_hdlc_open(struct hdlc_device_struct *hdlc) 1655{ 1656 struct net_device *dev = (struct net_device *)(hdlc->netdev.base_addr); 1657 1658 if (netif_running(dev)) { 1659 printk(KERN_DEBUG "%s: already running\n", dev->name); // DEBUG 1660 return 0; 1661 } 1662 return dscc4_open(dev); 1663} 1664 1665static int dscc4_hdlc_xmit(hdlc_device *hdlc, struct sk_buff *skb) 1666{ 1667 struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr; 1668 1669 return dscc4_start_xmit(skb, dev); 1670} 1671 1672static void dscc4_hdlc_close(struct hdlc_device_struct *hdlc) 1673{ 1674 struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr; 1675 struct dscc4_dev_priv *dpriv; 1676 1677 dpriv = dev->priv; 1678 --dpriv->usecount; 1679} 1680 1681/* Operated under dev lock */ 1682static int dscc4_attach_hdlc_device(struct net_device *dev) 1683{ 1684 struct dscc4_dev_priv *dpriv = dev->priv; 1685 struct hdlc_device_struct *hdlc; 1686 int result; 1687 1688 hdlc = &dpriv->hdlc; 1689 hdlc->netdev.base_addr = (unsigned long)dev; 1690 hdlc->set_mode = NULL; 1691 hdlc->open = dscc4_hdlc_open; 1692 hdlc->close = dscc4_hdlc_close; 1693 hdlc->ioctl = dscc4_hdlc_ioctl; 1694 hdlc->xmit = dscc4_hdlc_xmit; 1695 1696 result = register_hdlc_device(hdlc); 1697 if (!result) 1698 dpriv->usecount++; 1699 return result; 1700} 1701 1702/* Operated under dev lock */ 1703static void dscc4_unattach_hdlc_device(struct net_device *dev) 1704{ 1705 struct dscc4_dev_priv *dpriv = dev->priv; 1706 1707 unregister_hdlc_device(&dpriv->hdlc); 1708 dpriv->usecount--; 1709} 1710 1711static struct pci_device_id dscc4_pci_tbl[] __devinitdata = { 1712 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, 1713 PCI_ANY_ID, PCI_ANY_ID, }, 1714 { 0,} 1715}; 1716MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl); 1717 1718static struct pci_driver dscc4_driver = { 1719 name: "dscc4", 1720 id_table: dscc4_pci_tbl, 1721 probe: dscc4_init_one, 1722 remove: dscc4_remove_one, 1723}; 1724 1725static int __init dscc4_init_module(void) 1726{ 1727 return pci_module_init(&dscc4_driver); 1728} 1729 1730static void __exit dscc4_cleanup_module(void) 1731{ 1732 pci_unregister_driver(&dscc4_driver); 1733} 1734 1735module_init(dscc4_init_module); 1736module_exit(dscc4_cleanup_module); 1737