1/* 2 * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux 3 * 4 * This software may be used and distributed according to the terms of the 5 * GNU General Public License. 6 * 7 * The author may be reached as romieu@cogenit.fr. 8 * Specific bug reports/asian food will be welcome. 9 * 10 * Special thanks to the nice people at CS-Telecom for the hardware and the 11 * access to the test/measure tools. 12 * 13 * 14 * Theory of Operation 15 * 16 * I. Board Compatibility 17 * 18 * This device driver is designed for the Siemens PEB20534 4 ports serial 19 * controller as found on Etinc PCISYNC cards. The documentation for the 20 * chipset is available at http://www.infineon.com: 21 * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with 22 * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1"; 23 * - Application Hint "Management of DSCC4 on-chip FIFO resources". 24 * - Errata sheet DS5 (courtesy of Michael Skerritt). 25 * Jens David has built an adapter based on the same chipset. Take a look 26 * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific 27 * driver. 28 * Sample code (2 revisions) is available at Infineon. 29 * 30 * II. Board-specific settings 31 * 32 * Pcisync can transmit some clock signal to the outside world on the 33 * *first two* ports provided you put a quartz and a line driver on it and 34 * remove the jumpers. The operation is described on Etinc web site. If you 35 * go DCE on these ports, don't forget to use an adequate cable. 36 * 37 * Sharing of the PCI interrupt line for this board is possible. 38 * 39 * III. Driver operation 40 * 41 * The rx/tx operations are based on a linked list of descriptors. The driver 42 * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more 43 * I tried to fix it, the more it started to look like (convoluted) software 44 * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider 45 * this a rfc2119 MUST. 46 * 47 * Tx direction 48 * When the tx ring is full, the xmit routine issues a call to netdev_stop. 49 * The device is supposed to be enabled again during an ALLS irq (we could 50 * use HI but as it's easy to lose events, it's fscked). 51 * 52 * Rx direction 53 * The received frames aren't supposed to span over multiple receiving areas. 54 * I may implement it some day but it isn't the highest ranked item. 55 * 56 * IV. Notes 57 * The current error (XDU, RFO) recovery code is untested. 58 * So far, RDO takes his RX channel down and the right sequence to enable it 59 * again is still a mistery. If RDO happens, plan a reboot. More details 60 * in the code (NB: as this happens, TX still works). 61 * Don't mess the cables during operation, especially on DTE ports. I don't 62 * suggest it for DCE either but at least one can get some messages instead 63 * of a complete instant freeze. 64 * Tests are done on Rev. 20 of the silicium. The RDO handling changes with 65 * the documentation/chipset releases. 66 * 67 * TODO: 68 * - test X25. 69 * - use polling at high irq/s, 70 * - performance analysis, 71 * - endianness. 72 * 73 * 2001/12/10 Daniela Squassoni <daniela@cyclades.com> 74 * - Contribution to support the new generic HDLC layer. 75 * 76 * 2002/01 Ueimor 77 * - old style interface removal 78 * - dscc4_release_ring fix (related to DMA mapping) 79 * - hard_start_xmit fix (hint: TxSizeMax) 80 * - misc crapectomy. 81 */ 82 83#include <linux/module.h> 84#include <linux/sched.h> 85#include <linux/types.h> 86#include <linux/errno.h> 87#include <linux/list.h> 88#include <linux/ioport.h> 89#include <linux/pci.h> 90#include <linux/kernel.h> 91#include <linux/mm.h> 92#include <linux/slab.h> 93 94#include <asm/system.h> 95#include <asm/cache.h> 96#include <asm/byteorder.h> 97#include <asm/uaccess.h> 98#include <asm/io.h> 99#include <asm/irq.h> 100 101#include <linux/init.h> 102#include <linux/string.h> 103 104#include <linux/if_arp.h> 105#include <linux/netdevice.h> 106#include <linux/skbuff.h> 107#include <linux/delay.h> 108#include <linux/hdlc.h> 109#include <linux/mutex.h> 110 111/* Version */ 112static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 Exp $ for Linux\n"; 113static int debug; 114static int quartz; 115 116#ifdef CONFIG_DSCC4_PCI_RST 117static DEFINE_MUTEX(dscc4_mutex); 118static u32 dscc4_pci_config_store[16]; 119#endif 120 121#define DRV_NAME "dscc4" 122 123#undef DSCC4_POLLING 124 125/* Module parameters */ 126 127MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>"); 128MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler"); 129MODULE_LICENSE("GPL"); 130module_param(debug, int, 0); 131MODULE_PARM_DESC(debug,"Enable/disable extra messages"); 132module_param(quartz, int, 0); 133MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)"); 134 135/* Structures */ 136 137struct thingie { 138 int define; 139 u32 bits; 140}; 141 142struct TxFD { 143 __le32 state; 144 __le32 next; 145 __le32 data; 146 __le32 complete; 147 u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */ 148 /* FWIW, datasheet calls that "dummy" and says that card 149 * never looks at it; neither does the driver */ 150}; 151 152struct RxFD { 153 __le32 state1; 154 __le32 next; 155 __le32 data; 156 __le32 state2; 157 __le32 end; 158}; 159 160#define DUMMY_SKB_SIZE 64 161#define TX_LOW 8 162#define TX_RING_SIZE 32 163#define RX_RING_SIZE 32 164#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD) 165#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD) 166#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */ 167#define TX_TIMEOUT (HZ/10) 168#define DSCC4_HZ_MAX 33000000 169#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */ 170#define dev_per_card 4 171#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */ 172 173#define SOURCE_ID(flags) (((flags) >> 28) & 0x03) 174#define TO_SIZE(state) (((state) >> 16) & 0x1fff) 175 176/* 177 * Given the operating range of Linux HDLC, the 2 defines below could be 178 * made simpler. However they are a fine reminder for the limitations of 179 * the driver: it's better to stay < TxSizeMax and < RxSizeMax. 180 */ 181#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16) 182#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16) 183#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */ 184#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) 185 186struct dscc4_pci_priv { 187 __le32 *iqcfg; 188 int cfg_cur; 189 spinlock_t lock; 190 struct pci_dev *pdev; 191 192 struct dscc4_dev_priv *root; 193 dma_addr_t iqcfg_dma; 194 u32 xtal_hz; 195}; 196 197struct dscc4_dev_priv { 198 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 199 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 200 201 struct RxFD *rx_fd; 202 struct TxFD *tx_fd; 203 __le32 *iqrx; 204 __le32 *iqtx; 205 206 volatile u32 tx_current; 207 u32 rx_current; 208 u32 iqtx_current; 209 u32 iqrx_current; 210 211 volatile u32 tx_dirty; 212 volatile u32 ltda; 213 u32 rx_dirty; 214 u32 lrda; 215 216 dma_addr_t tx_fd_dma; 217 dma_addr_t rx_fd_dma; 218 dma_addr_t iqtx_dma; 219 dma_addr_t iqrx_dma; 220 221 u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */ 222 223 struct timer_list timer; 224 225 struct dscc4_pci_priv *pci_priv; 226 spinlock_t lock; 227 228 int dev_id; 229 volatile u32 flags; 230 u32 timer_help; 231 232 unsigned short encoding; 233 unsigned short parity; 234 struct net_device *dev; 235 sync_serial_settings settings; 236 void __iomem *base_addr; 237 u32 __pad __attribute__ ((aligned (4))); 238}; 239 240/* GLOBAL registers definitions */ 241#define GCMDR 0x00 242#define GSTAR 0x04 243#define GMODE 0x08 244#define IQLENR0 0x0C 245#define IQLENR1 0x10 246#define IQRX0 0x14 247#define IQTX0 0x24 248#define IQCFG 0x3c 249#define FIFOCR1 0x44 250#define FIFOCR2 0x48 251#define FIFOCR3 0x4c 252#define FIFOCR4 0x34 253#define CH0CFG 0x50 254#define CH0BRDA 0x54 255#define CH0BTDA 0x58 256#define CH0FRDA 0x98 257#define CH0FTDA 0xb0 258#define CH0LRDA 0xc8 259#define CH0LTDA 0xe0 260 261/* SCC registers definitions */ 262#define SCC_START 0x0100 263#define SCC_OFFSET 0x80 264#define CMDR 0x00 265#define STAR 0x04 266#define CCR0 0x08 267#define CCR1 0x0c 268#define CCR2 0x10 269#define BRR 0x2C 270#define RLCR 0x40 271#define IMR 0x54 272#define ISR 0x58 273 274#define GPDIR 0x0400 275#define GPDATA 0x0404 276#define GPIM 0x0408 277 278/* Bit masks */ 279#define EncodingMask 0x00700000 280#define CrcMask 0x00000003 281 282#define IntRxScc0 0x10000000 283#define IntTxScc0 0x01000000 284 285#define TxPollCmd 0x00000400 286#define RxActivate 0x08000000 287#define MTFi 0x04000000 288#define Rdr 0x00400000 289#define Rdt 0x00200000 290#define Idr 0x00100000 291#define Idt 0x00080000 292#define TxSccRes 0x01000000 293#define RxSccRes 0x00010000 294#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */ 295#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */ 296 297#define Ccr0ClockMask 0x0000003f 298#define Ccr1LoopMask 0x00000200 299#define IsrMask 0x000fffff 300#define BrrExpMask 0x00000f00 301#define BrrMultMask 0x0000003f 302#define EncodingMask 0x00700000 303#define Hold cpu_to_le32(0x40000000) 304#define SccBusy 0x10000000 305#define PowerUp 0x80000000 306#define Vis 0x00001000 307#define FrameOk (FrameVfr | FrameCrc) 308#define FrameVfr 0x80 309#define FrameRdo 0x40 310#define FrameCrc 0x20 311#define FrameRab 0x10 312#define FrameAborted cpu_to_le32(0x00000200) 313#define FrameEnd cpu_to_le32(0x80000000) 314#define DataComplete cpu_to_le32(0x40000000) 315#define LengthCheck 0x00008000 316#define SccEvt 0x02000000 317#define NoAck 0x00000200 318#define Action 0x00000001 319#define HiDesc cpu_to_le32(0x20000000) 320 321/* SCC events */ 322#define RxEvt 0xf0000000 323#define TxEvt 0x0f000000 324#define Alls 0x00040000 325#define Xdu 0x00010000 326#define Cts 0x00004000 327#define Xmr 0x00002000 328#define Xpr 0x00001000 329#define Rdo 0x00000080 330#define Rfs 0x00000040 331#define Cd 0x00000004 332#define Rfo 0x00000002 333#define Flex 0x00000001 334 335/* DMA core events */ 336#define Cfg 0x00200000 337#define Hi 0x00040000 338#define Fi 0x00020000 339#define Err 0x00010000 340#define Arf 0x00000002 341#define ArAck 0x00000001 342 343/* State flags */ 344#define Ready 0x00000000 345#define NeedIDR 0x00000001 346#define NeedIDT 0x00000002 347#define RdoSet 0x00000004 348#define FakeReset 0x00000008 349 350/* Don't mask RDO. Ever. */ 351#ifdef DSCC4_POLLING 352#define EventsMask 0xfffeef7f 353#else 354#define EventsMask 0xfffa8f7a 355#endif 356 357/* Functions prototypes */ 358static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); 359static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); 360static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr); 361static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent); 362static int dscc4_open(struct net_device *); 363static netdev_tx_t dscc4_start_xmit(struct sk_buff *, 364 struct net_device *); 365static int dscc4_close(struct net_device *); 366static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 367static int dscc4_init_ring(struct net_device *); 368static void dscc4_release_ring(struct dscc4_dev_priv *); 369static void dscc4_timer(unsigned long); 370static void dscc4_tx_timeout(struct net_device *); 371static irqreturn_t dscc4_irq(int irq, void *dev_id); 372static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short); 373static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *); 374#ifdef DSCC4_POLLING 375static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *); 376#endif 377 378static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev) 379{ 380 return dev_to_hdlc(dev)->priv; 381} 382 383static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p) 384{ 385 return p->dev; 386} 387 388static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv, 389 struct net_device *dev, int offset) 390{ 391 u32 state; 392 393 /* Cf scc_writel for concern regarding thread-safety */ 394 state = dpriv->scc_regs[offset >> 2]; 395 state &= ~mask; 396 state |= value; 397 dpriv->scc_regs[offset >> 2] = state; 398 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset); 399} 400 401static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv, 402 struct net_device *dev, int offset) 403{ 404 /* 405 * Thread-UNsafe. 406 * As of 2002/02/16, there are no thread racing for access. 407 */ 408 dpriv->scc_regs[offset >> 2] = bits; 409 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset); 410} 411 412static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset) 413{ 414 return dpriv->scc_regs[offset >> 2]; 415} 416 417static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev) 418{ 419 /* Cf errata DS5 p.4 */ 420 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); 421 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); 422} 423 424static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv, 425 struct net_device *dev) 426{ 427 dpriv->ltda = dpriv->tx_fd_dma + 428 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); 429 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); 430 /* Flush posted writes *NOW* */ 431 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); 432} 433 434static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv, 435 struct net_device *dev) 436{ 437 dpriv->lrda = dpriv->rx_fd_dma + 438 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); 439 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); 440} 441 442static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv) 443{ 444 return dpriv->tx_current == dpriv->tx_dirty; 445} 446 447static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, 448 struct net_device *dev) 449{ 450 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; 451} 452 453static int state_check(u32 state, struct dscc4_dev_priv *dpriv, 454 struct net_device *dev, const char *msg) 455{ 456 int ret = 0; 457 458 if (debug > 1) { 459 if (SOURCE_ID(state) != dpriv->dev_id) { 460 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n", 461 dev->name, msg, SOURCE_ID(state), state ); 462 ret = -1; 463 } 464 if (state & 0x0df80c00) { 465 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n", 466 dev->name, msg, state); 467 ret = -1; 468 } 469 } 470 return ret; 471} 472 473static void dscc4_tx_print(struct net_device *dev, 474 struct dscc4_dev_priv *dpriv, 475 char *msg) 476{ 477 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n", 478 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); 479} 480 481static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) 482{ 483 struct pci_dev *pdev = dpriv->pci_priv->pdev; 484 struct TxFD *tx_fd = dpriv->tx_fd; 485 struct RxFD *rx_fd = dpriv->rx_fd; 486 struct sk_buff **skbuff; 487 int i; 488 489 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); 490 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); 491 492 skbuff = dpriv->tx_skbuff; 493 for (i = 0; i < TX_RING_SIZE; i++) { 494 if (*skbuff) { 495 pci_unmap_single(pdev, le32_to_cpu(tx_fd->data), 496 (*skbuff)->len, PCI_DMA_TODEVICE); 497 dev_kfree_skb(*skbuff); 498 } 499 skbuff++; 500 tx_fd++; 501 } 502 503 skbuff = dpriv->rx_skbuff; 504 for (i = 0; i < RX_RING_SIZE; i++) { 505 if (*skbuff) { 506 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), 507 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); 508 dev_kfree_skb(*skbuff); 509 } 510 skbuff++; 511 rx_fd++; 512 } 513} 514 515static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, 516 struct net_device *dev) 517{ 518 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; 519 struct RxFD *rx_fd = dpriv->rx_fd + dirty; 520 const int len = RX_MAX(HDLC_MAX_MRU); 521 struct sk_buff *skb; 522 int ret = 0; 523 524 skb = dev_alloc_skb(len); 525 dpriv->rx_skbuff[dirty] = skb; 526 if (skb) { 527 skb->protocol = hdlc_type_trans(skb, dev); 528 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, 529 skb->data, len, PCI_DMA_FROMDEVICE)); 530 } else { 531 rx_fd->data = 0; 532 ret = -1; 533 } 534 return ret; 535} 536 537/* 538 * IRQ/thread/whatever safe 539 */ 540static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, 541 struct net_device *dev, char *msg) 542{ 543 s8 i = 0; 544 545 do { 546 if (!(scc_readl_star(dpriv, dev) & SccBusy)) { 547 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, 548 msg, i); 549 goto done; 550 } 551 schedule_timeout_uninterruptible(10); 552 rmb(); 553 } while (++i > 0); 554 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg); 555done: 556 return (i >= 0) ? i : -EAGAIN; 557} 558 559static int dscc4_do_action(struct net_device *dev, char *msg) 560{ 561 void __iomem *ioaddr = dscc4_priv(dev)->base_addr; 562 s16 i = 0; 563 564 writel(Action, ioaddr + GCMDR); 565 ioaddr += GSTAR; 566 do { 567 u32 state = readl(ioaddr); 568 569 if (state & ArAck) { 570 printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg); 571 writel(ArAck, ioaddr); 572 goto done; 573 } else if (state & Arf) { 574 printk(KERN_ERR "%s: %s failed\n", dev->name, msg); 575 writel(Arf, ioaddr); 576 i = -1; 577 goto done; 578 } 579 rmb(); 580 } while (++i > 0); 581 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg); 582done: 583 return i; 584} 585 586static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) 587{ 588 int cur = dpriv->iqtx_current%IRQ_RING_SIZE; 589 s8 i = 0; 590 591 do { 592 if (!(dpriv->flags & (NeedIDR | NeedIDT)) || 593 (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) 594 break; 595 smp_rmb(); 596 schedule_timeout_uninterruptible(10); 597 } while (++i > 0); 598 599 return (i >= 0 ) ? i : -EAGAIN; 600} 601 602 603 604/* TODO: (ab)use this function to refill a completely depleted RX ring. */ 605static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, 606 struct net_device *dev) 607{ 608 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; 609 struct pci_dev *pdev = dpriv->pci_priv->pdev; 610 struct sk_buff *skb; 611 int pkt_len; 612 613 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; 614 if (!skb) { 615 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__); 616 goto refill; 617 } 618 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); 619 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), 620 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); 621 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { 622 dev->stats.rx_packets++; 623 dev->stats.rx_bytes += pkt_len; 624 skb_put(skb, pkt_len); 625 if (netif_running(dev)) 626 skb->protocol = hdlc_type_trans(skb, dev); 627 netif_rx(skb); 628 } else { 629 if (skb->data[pkt_len] & FrameRdo) 630 dev->stats.rx_fifo_errors++; 631 else if (!(skb->data[pkt_len] & FrameCrc)) 632 dev->stats.rx_crc_errors++; 633 else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) != 634 (FrameVfr | FrameRab)) 635 dev->stats.rx_length_errors++; 636 dev->stats.rx_errors++; 637 dev_kfree_skb_irq(skb); 638 } 639refill: 640 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { 641 if (try_get_rx_skb(dpriv, dev) < 0) 642 break; 643 dpriv->rx_dirty++; 644 } 645 dscc4_rx_update(dpriv, dev); 646 rx_fd->state2 = 0x00000000; 647 rx_fd->end = cpu_to_le32(0xbabeface); 648} 649 650static void dscc4_free1(struct pci_dev *pdev) 651{ 652 struct dscc4_pci_priv *ppriv; 653 struct dscc4_dev_priv *root; 654 int i; 655 656 ppriv = pci_get_drvdata(pdev); 657 root = ppriv->root; 658 659 for (i = 0; i < dev_per_card; i++) 660 unregister_hdlc_device(dscc4_to_dev(root + i)); 661 662 pci_set_drvdata(pdev, NULL); 663 664 for (i = 0; i < dev_per_card; i++) 665 free_netdev(root[i].dev); 666 kfree(root); 667 kfree(ppriv); 668} 669 670static int __devinit dscc4_init_one(struct pci_dev *pdev, 671 const struct pci_device_id *ent) 672{ 673 struct dscc4_pci_priv *priv; 674 struct dscc4_dev_priv *dpriv; 675 void __iomem *ioaddr; 676 int i, rc; 677 678 printk(KERN_DEBUG "%s", version); 679 680 rc = pci_enable_device(pdev); 681 if (rc < 0) 682 goto out; 683 684 rc = pci_request_region(pdev, 0, "registers"); 685 if (rc < 0) { 686 printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n", 687 DRV_NAME); 688 goto err_disable_0; 689 } 690 rc = pci_request_region(pdev, 1, "LBI interface"); 691 if (rc < 0) { 692 printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n", 693 DRV_NAME); 694 goto err_free_mmio_region_1; 695 } 696 697 ioaddr = pci_ioremap_bar(pdev, 0); 698 if (!ioaddr) { 699 printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n", 700 DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0), 701 (unsigned long long)pci_resource_start(pdev, 0)); 702 rc = -EIO; 703 goto err_free_mmio_regions_2; 704 } 705 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n", 706 (unsigned long long)pci_resource_start(pdev, 0), 707 (unsigned long long)pci_resource_start(pdev, 1), pdev->irq); 708 709 /* Cf errata DS5 p.2 */ 710 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8); 711 pci_set_master(pdev); 712 713 rc = dscc4_found1(pdev, ioaddr); 714 if (rc < 0) 715 goto err_iounmap_3; 716 717 priv = pci_get_drvdata(pdev); 718 719 rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root); 720 if (rc < 0) { 721 printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq); 722 goto err_release_4; 723 } 724 725 /* power up/little endian/dma core controlled via lrda/ltda */ 726 writel(0x00000001, ioaddr + GMODE); 727 /* Shared interrupt queue */ 728 { 729 u32 bits; 730 731 bits = (IRQ_RING_SIZE >> 5) - 1; 732 bits |= bits << 4; 733 bits |= bits << 8; 734 bits |= bits << 16; 735 writel(bits, ioaddr + IQLENR0); 736 } 737 /* Global interrupt queue */ 738 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); 739 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, 740 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); 741 if (!priv->iqcfg) 742 goto err_free_irq_5; 743 writel(priv->iqcfg_dma, ioaddr + IQCFG); 744 745 rc = -ENOMEM; 746 747 /* 748 * SCC 0-3 private rx/tx irq structures 749 * IQRX/TXi needs to be set soon. Learned it the hard way... 750 */ 751 for (i = 0; i < dev_per_card; i++) { 752 dpriv = priv->root + i; 753 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev, 754 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); 755 if (!dpriv->iqtx) 756 goto err_free_iqtx_6; 757 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); 758 } 759 for (i = 0; i < dev_per_card; i++) { 760 dpriv = priv->root + i; 761 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev, 762 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); 763 if (!dpriv->iqrx) 764 goto err_free_iqrx_7; 765 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); 766 } 767 768 /* Cf application hint. Beware of hard-lock condition on threshold. */ 769 writel(0x42104000, ioaddr + FIFOCR1); 770 //writel(0x9ce69800, ioaddr + FIFOCR2); 771 writel(0xdef6d800, ioaddr + FIFOCR2); 772 //writel(0x11111111, ioaddr + FIFOCR4); 773 writel(0x18181818, ioaddr + FIFOCR4); 774 writel(0x0000000e, ioaddr + FIFOCR3); 775 776 writel(0xff200001, ioaddr + GCMDR); 777 778 rc = 0; 779out: 780 return rc; 781 782err_free_iqrx_7: 783 while (--i >= 0) { 784 dpriv = priv->root + i; 785 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 786 dpriv->iqrx, dpriv->iqrx_dma); 787 } 788 i = dev_per_card; 789err_free_iqtx_6: 790 while (--i >= 0) { 791 dpriv = priv->root + i; 792 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 793 dpriv->iqtx, dpriv->iqtx_dma); 794 } 795 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, 796 priv->iqcfg_dma); 797err_free_irq_5: 798 free_irq(pdev->irq, priv->root); 799err_release_4: 800 dscc4_free1(pdev); 801err_iounmap_3: 802 iounmap (ioaddr); 803err_free_mmio_regions_2: 804 pci_release_region(pdev, 1); 805err_free_mmio_region_1: 806 pci_release_region(pdev, 0); 807err_disable_0: 808 pci_disable_device(pdev); 809 goto out; 810}; 811 812/* 813 * Let's hope the default values are decent enough to protect my 814 * feet from the user's gun - Ueimor 815 */ 816static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, 817 struct net_device *dev) 818{ 819 /* No interrupts, SCC core disabled. Let's relax */ 820 scc_writel(0x00000000, dpriv, dev, CCR0); 821 822 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); 823 824 scc_writel(0x02408000, dpriv, dev, CCR1); 825 826 /* crc not forwarded - Cf errata DS5 p.11 */ 827 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); 828 // crc forwarded 829 //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2); 830} 831 832static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) 833{ 834 int ret = 0; 835 836 if ((hz < 0) || (hz > DSCC4_HZ_MAX)) 837 ret = -EOPNOTSUPP; 838 else 839 dpriv->pci_priv->xtal_hz = hz; 840 841 return ret; 842} 843 844static const struct net_device_ops dscc4_ops = { 845 .ndo_open = dscc4_open, 846 .ndo_stop = dscc4_close, 847 .ndo_change_mtu = hdlc_change_mtu, 848 .ndo_start_xmit = hdlc_start_xmit, 849 .ndo_do_ioctl = dscc4_ioctl, 850 .ndo_tx_timeout = dscc4_tx_timeout, 851}; 852 853static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) 854{ 855 struct dscc4_pci_priv *ppriv; 856 struct dscc4_dev_priv *root; 857 int i, ret = -ENOMEM; 858 859 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); 860 if (!root) { 861 printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME); 862 goto err_out; 863 } 864 865 for (i = 0; i < dev_per_card; i++) { 866 root[i].dev = alloc_hdlcdev(root + i); 867 if (!root[i].dev) 868 goto err_free_dev; 869 } 870 871 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); 872 if (!ppriv) { 873 printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME); 874 goto err_free_dev; 875 } 876 877 ppriv->root = root; 878 spin_lock_init(&ppriv->lock); 879 880 for (i = 0; i < dev_per_card; i++) { 881 struct dscc4_dev_priv *dpriv = root + i; 882 struct net_device *d = dscc4_to_dev(dpriv); 883 hdlc_device *hdlc = dev_to_hdlc(d); 884 885 d->base_addr = (unsigned long)ioaddr; 886 d->irq = pdev->irq; 887 d->netdev_ops = &dscc4_ops; 888 d->watchdog_timeo = TX_TIMEOUT; 889 SET_NETDEV_DEV(d, &pdev->dev); 890 891 dpriv->dev_id = i; 892 dpriv->pci_priv = ppriv; 893 dpriv->base_addr = ioaddr; 894 spin_lock_init(&dpriv->lock); 895 896 hdlc->xmit = dscc4_start_xmit; 897 hdlc->attach = dscc4_hdlc_attach; 898 899 dscc4_init_registers(dpriv, d); 900 dpriv->parity = PARITY_CRC16_PR0_CCITT; 901 dpriv->encoding = ENCODING_NRZ; 902 903 ret = dscc4_init_ring(d); 904 if (ret < 0) 905 goto err_unregister; 906 907 ret = register_hdlc_device(d); 908 if (ret < 0) { 909 printk(KERN_ERR "%s: unable to register\n", DRV_NAME); 910 dscc4_release_ring(dpriv); 911 goto err_unregister; 912 } 913 } 914 915 ret = dscc4_set_quartz(root, quartz); 916 if (ret < 0) 917 goto err_unregister; 918 919 pci_set_drvdata(pdev, ppriv); 920 return ret; 921 922err_unregister: 923 while (i-- > 0) { 924 dscc4_release_ring(root + i); 925 unregister_hdlc_device(dscc4_to_dev(root + i)); 926 } 927 kfree(ppriv); 928 i = dev_per_card; 929err_free_dev: 930 while (i-- > 0) 931 free_netdev(root[i].dev); 932 kfree(root); 933err_out: 934 return ret; 935}; 936 937static void dscc4_timer(unsigned long data) 938{ 939 struct net_device *dev = (struct net_device *)data; 940 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 941// struct dscc4_pci_priv *ppriv; 942 943 goto done; 944done: 945 dpriv->timer.expires = jiffies + TX_TIMEOUT; 946 add_timer(&dpriv->timer); 947} 948 949static void dscc4_tx_timeout(struct net_device *dev) 950{ 951} 952 953static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv) 954{ 955 sync_serial_settings *settings = &dpriv->settings; 956 957 if (settings->loopback && (settings->clock_type != CLOCK_INT)) { 958 struct net_device *dev = dscc4_to_dev(dpriv); 959 960 printk(KERN_INFO "%s: loopback requires clock\n", dev->name); 961 return -1; 962 } 963 return 0; 964} 965 966#ifdef CONFIG_DSCC4_PCI_RST 967/* 968 * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together 969 * so as to provide a safe way to reset the asic while not the whole machine 970 * rebooting. 971 * 972 * This code doesn't need to be efficient. Keep It Simple 973 */ 974static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) 975{ 976 int i; 977 978 mutex_lock(&dscc4_mutex); 979 for (i = 0; i < 16; i++) 980 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); 981 982 /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */ 983 writel(0x001c0000, ioaddr + GMODE); 984 /* Configure GPIO port as output */ 985 writel(0x0000ffff, ioaddr + GPDIR); 986 /* Disable interruption */ 987 writel(0x0000ffff, ioaddr + GPIM); 988 989 writel(0x0000ffff, ioaddr + GPDATA); 990 writel(0x00000000, ioaddr + GPDATA); 991 992 /* Flush posted writes */ 993 readl(ioaddr + GSTAR); 994 995 schedule_timeout_uninterruptible(10); 996 997 for (i = 0; i < 16; i++) 998 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); 999 mutex_unlock(&dscc4_mutex); 1000} 1001#else 1002#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) 1003#endif /* CONFIG_DSCC4_PCI_RST */ 1004 1005static int dscc4_open(struct net_device *dev) 1006{ 1007 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1008 struct dscc4_pci_priv *ppriv; 1009 int ret = -EAGAIN; 1010 1011 if ((dscc4_loopback_check(dpriv) < 0)) 1012 goto err; 1013 1014 if ((ret = hdlc_open(dev))) 1015 goto err; 1016 1017 ppriv = dpriv->pci_priv; 1018 1019 /* 1020 * Due to various bugs, there is no way to reliably reset a 1021 * specific port (manufacturer's dependant special PCI #RST wiring 1022 * apart: it affects all ports). Thus the device goes in the best 1023 * silent mode possible at dscc4_close() time and simply claims to 1024 * be up if it's opened again. It still isn't possible to change 1025 * the HDLC configuration without rebooting but at least the ports 1026 * can be up/down ifconfig'ed without killing the host. 1027 */ 1028 if (dpriv->flags & FakeReset) { 1029 dpriv->flags &= ~FakeReset; 1030 scc_patchl(0, PowerUp, dpriv, dev, CCR0); 1031 scc_patchl(0, 0x00050000, dpriv, dev, CCR2); 1032 scc_writel(EventsMask, dpriv, dev, IMR); 1033 printk(KERN_INFO "%s: up again.\n", dev->name); 1034 goto done; 1035 } 1036 1037 /* IDT+IDR during XPR */ 1038 dpriv->flags = NeedIDR | NeedIDT; 1039 1040 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); 1041 1042 /* 1043 * The following is a bit paranoid... 1044 * 1045 * NB: the datasheet "...CEC will stay active if the SCC is in 1046 * power-down mode or..." and CCR2.RAC = 1 are two different 1047 * situations. 1048 */ 1049 if (scc_readl_star(dpriv, dev) & SccBusy) { 1050 printk(KERN_ERR "%s busy. Try later\n", dev->name); 1051 ret = -EAGAIN; 1052 goto err_out; 1053 } else 1054 printk(KERN_INFO "%s: available. Good\n", dev->name); 1055 1056 scc_writel(EventsMask, dpriv, dev, IMR); 1057 1058 /* Posted write is flushed in the wait_ack loop */ 1059 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); 1060 1061 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) 1062 goto err_disable_scc_events; 1063 1064 /* 1065 * I would expect XPR near CE completion (before ? after ?). 1066 * At worst, this code won't see a late XPR and people 1067 * will have to re-issue an ifconfig (this is harmless). 1068 * WARNING, a really missing XPR usually means a hardware 1069 * reset is needed. Suggestions anyone ? 1070 */ 1071 if ((ret = dscc4_xpr_ack(dpriv)) < 0) { 1072 printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR"); 1073 goto err_disable_scc_events; 1074 } 1075 1076 if (debug > 2) 1077 dscc4_tx_print(dev, dpriv, "Open"); 1078 1079done: 1080 netif_start_queue(dev); 1081 1082 init_timer(&dpriv->timer); 1083 dpriv->timer.expires = jiffies + 10*HZ; 1084 dpriv->timer.data = (unsigned long)dev; 1085 dpriv->timer.function = dscc4_timer; 1086 add_timer(&dpriv->timer); 1087 netif_carrier_on(dev); 1088 1089 return 0; 1090 1091err_disable_scc_events: 1092 scc_writel(0xffffffff, dpriv, dev, IMR); 1093 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); 1094err_out: 1095 hdlc_close(dev); 1096err: 1097 return ret; 1098} 1099 1100#ifdef DSCC4_POLLING 1101static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) 1102{ 1103} 1104#endif /* DSCC4_POLLING */ 1105 1106static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, 1107 struct net_device *dev) 1108{ 1109 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1110 struct dscc4_pci_priv *ppriv = dpriv->pci_priv; 1111 struct TxFD *tx_fd; 1112 int next; 1113 1114 next = dpriv->tx_current%TX_RING_SIZE; 1115 dpriv->tx_skbuff[next] = skb; 1116 tx_fd = dpriv->tx_fd + next; 1117 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); 1118 tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len, 1119 PCI_DMA_TODEVICE)); 1120 tx_fd->complete = 0x00000000; 1121 tx_fd->jiffies = jiffies; 1122 mb(); 1123 1124#ifdef DSCC4_POLLING 1125 spin_lock(&dpriv->lock); 1126 while (dscc4_tx_poll(dpriv, dev)); 1127 spin_unlock(&dpriv->lock); 1128#endif 1129 1130 if (debug > 2) 1131 dscc4_tx_print(dev, dpriv, "Xmit"); 1132 /* To be cleaned(unsigned int)/optimized. Later, ok ? */ 1133 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) 1134 netif_stop_queue(dev); 1135 1136 if (dscc4_tx_quiescent(dpriv, dev)) 1137 dscc4_do_tx(dpriv, dev); 1138 1139 return NETDEV_TX_OK; 1140} 1141 1142static int dscc4_close(struct net_device *dev) 1143{ 1144 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1145 1146 del_timer_sync(&dpriv->timer); 1147 netif_stop_queue(dev); 1148 1149 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); 1150 scc_patchl(0x00050000, 0, dpriv, dev, CCR2); 1151 scc_writel(0xffffffff, dpriv, dev, IMR); 1152 1153 dpriv->flags |= FakeReset; 1154 1155 hdlc_close(dev); 1156 1157 return 0; 1158} 1159 1160static inline int dscc4_check_clock_ability(int port) 1161{ 1162 int ret = 0; 1163 1164#ifdef CONFIG_DSCC4_PCISYNC 1165 if (port >= 2) 1166 ret = -1; 1167#endif 1168 return ret; 1169} 1170 1171static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state) 1172{ 1173 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1174 int ret = -1; 1175 u32 brr; 1176 1177 *state &= ~Ccr0ClockMask; 1178 if (*bps) { /* Clock generated - required for DCE */ 1179 u32 n = 0, m = 0, divider; 1180 int xtal; 1181 1182 xtal = dpriv->pci_priv->xtal_hz; 1183 if (!xtal) 1184 goto done; 1185 if (dscc4_check_clock_ability(dpriv->dev_id) < 0) 1186 goto done; 1187 divider = xtal / *bps; 1188 if (divider > BRR_DIVIDER_MAX) { 1189 divider >>= 4; 1190 *state |= 0x00000036; /* Clock mode 6b (BRG/16) */ 1191 } else 1192 *state |= 0x00000037; /* Clock mode 7b (BRG) */ 1193 if (divider >> 22) { 1194 n = 63; 1195 m = 15; 1196 } else if (divider) { 1197 /* Extraction of the 6 highest weighted bits */ 1198 m = 0; 1199 while (0xffffffc0 & divider) { 1200 m++; 1201 divider >>= 1; 1202 } 1203 n = divider; 1204 } 1205 brr = (m << 8) | n; 1206 divider = n << m; 1207 if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */ 1208 divider <<= 4; 1209 *bps = xtal / divider; 1210 } else { 1211 /* 1212 * External clock - DTE 1213 * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00). 1214 * Nothing more to be done 1215 */ 1216 brr = 0; 1217 } 1218 scc_writel(brr, dpriv, dev, BRR); 1219 ret = 0; 1220done: 1221 return ret; 1222} 1223 1224static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1225{ 1226 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 1227 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1228 const size_t size = sizeof(dpriv->settings); 1229 int ret = 0; 1230 1231 if (dev->flags & IFF_UP) 1232 return -EBUSY; 1233 1234 if (cmd != SIOCWANDEV) 1235 return -EOPNOTSUPP; 1236 1237 switch(ifr->ifr_settings.type) { 1238 case IF_GET_IFACE: 1239 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 1240 if (ifr->ifr_settings.size < size) { 1241 ifr->ifr_settings.size = size; /* data size wanted */ 1242 return -ENOBUFS; 1243 } 1244 if (copy_to_user(line, &dpriv->settings, size)) 1245 return -EFAULT; 1246 break; 1247 1248 case IF_IFACE_SYNC_SERIAL: 1249 if (!capable(CAP_NET_ADMIN)) 1250 return -EPERM; 1251 1252 if (dpriv->flags & FakeReset) { 1253 printk(KERN_INFO "%s: please reset the device" 1254 " before this command\n", dev->name); 1255 return -EPERM; 1256 } 1257 if (copy_from_user(&dpriv->settings, line, size)) 1258 return -EFAULT; 1259 ret = dscc4_set_iface(dpriv, dev); 1260 break; 1261 1262 default: 1263 ret = hdlc_ioctl(dev, ifr, cmd); 1264 break; 1265 } 1266 1267 return ret; 1268} 1269 1270static int dscc4_match(struct thingie *p, int value) 1271{ 1272 int i; 1273 1274 for (i = 0; p[i].define != -1; i++) { 1275 if (value == p[i].define) 1276 break; 1277 } 1278 if (p[i].define == -1) 1279 return -1; 1280 else 1281 return i; 1282} 1283 1284static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, 1285 struct net_device *dev) 1286{ 1287 sync_serial_settings *settings = &dpriv->settings; 1288 int ret = -EOPNOTSUPP; 1289 u32 bps, state; 1290 1291 bps = settings->clock_rate; 1292 state = scc_readl(dpriv, CCR0); 1293 if (dscc4_set_clock(dev, &bps, &state) < 0) 1294 goto done; 1295 if (bps) { /* DCE */ 1296 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name); 1297 if (settings->clock_rate != bps) { 1298 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n", 1299 dev->name, settings->clock_rate, bps); 1300 settings->clock_rate = bps; 1301 } 1302 } else { /* DTE */ 1303 state |= PowerUp | Vis; 1304 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name); 1305 } 1306 scc_writel(state, dpriv, dev, CCR0); 1307 ret = 0; 1308done: 1309 return ret; 1310} 1311 1312static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, 1313 struct net_device *dev) 1314{ 1315 struct thingie encoding[] = { 1316 { ENCODING_NRZ, 0x00000000 }, 1317 { ENCODING_NRZI, 0x00200000 }, 1318 { ENCODING_FM_MARK, 0x00400000 }, 1319 { ENCODING_FM_SPACE, 0x00500000 }, 1320 { ENCODING_MANCHESTER, 0x00600000 }, 1321 { -1, 0} 1322 }; 1323 int i, ret = 0; 1324 1325 i = dscc4_match(encoding, dpriv->encoding); 1326 if (i >= 0) 1327 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); 1328 else 1329 ret = -EOPNOTSUPP; 1330 return ret; 1331} 1332 1333static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, 1334 struct net_device *dev) 1335{ 1336 sync_serial_settings *settings = &dpriv->settings; 1337 u32 state; 1338 1339 state = scc_readl(dpriv, CCR1); 1340 if (settings->loopback) { 1341 printk(KERN_DEBUG "%s: loopback\n", dev->name); 1342 state |= 0x00000100; 1343 } else { 1344 printk(KERN_DEBUG "%s: normal\n", dev->name); 1345 state &= ~0x00000100; 1346 } 1347 scc_writel(state, dpriv, dev, CCR1); 1348 return 0; 1349} 1350 1351static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, 1352 struct net_device *dev) 1353{ 1354 struct thingie crc[] = { 1355 { PARITY_CRC16_PR0_CCITT, 0x00000010 }, 1356 { PARITY_CRC16_PR1_CCITT, 0x00000000 }, 1357 { PARITY_CRC32_PR0_CCITT, 0x00000011 }, 1358 { PARITY_CRC32_PR1_CCITT, 0x00000001 } 1359 }; 1360 int i, ret = 0; 1361 1362 i = dscc4_match(crc, dpriv->parity); 1363 if (i >= 0) 1364 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); 1365 else 1366 ret = -EOPNOTSUPP; 1367 return ret; 1368} 1369 1370static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev) 1371{ 1372 struct { 1373 int (*action)(struct dscc4_dev_priv *, struct net_device *); 1374 } *p, do_setting[] = { 1375 { dscc4_encoding_setting }, 1376 { dscc4_clock_setting }, 1377 { dscc4_loopback_setting }, 1378 { dscc4_crc_setting }, 1379 { NULL } 1380 }; 1381 int ret = 0; 1382 1383 for (p = do_setting; p->action; p++) { 1384 if ((ret = p->action(dpriv, dev)) < 0) 1385 break; 1386 } 1387 return ret; 1388} 1389 1390static irqreturn_t dscc4_irq(int irq, void *token) 1391{ 1392 struct dscc4_dev_priv *root = token; 1393 struct dscc4_pci_priv *priv; 1394 struct net_device *dev; 1395 void __iomem *ioaddr; 1396 u32 state; 1397 unsigned long flags; 1398 int i, handled = 1; 1399 1400 priv = root->pci_priv; 1401 dev = dscc4_to_dev(root); 1402 1403 spin_lock_irqsave(&priv->lock, flags); 1404 1405 ioaddr = root->base_addr; 1406 1407 state = readl(ioaddr + GSTAR); 1408 if (!state) { 1409 handled = 0; 1410 goto out; 1411 } 1412 if (debug > 3) 1413 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state); 1414 writel(state, ioaddr + GSTAR); 1415 1416 if (state & Arf) { 1417 printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n", 1418 dev->name); 1419 goto out; 1420 } 1421 state &= ~ArAck; 1422 if (state & Cfg) { 1423 if (debug > 0) 1424 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME); 1425 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf)) 1426 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG"); 1427 if (!(state &= ~Cfg)) 1428 goto out; 1429 } 1430 if (state & RxEvt) { 1431 i = dev_per_card - 1; 1432 do { 1433 dscc4_rx_irq(priv, root + i); 1434 } while (--i >= 0); 1435 state &= ~RxEvt; 1436 } 1437 if (state & TxEvt) { 1438 i = dev_per_card - 1; 1439 do { 1440 dscc4_tx_irq(priv, root + i); 1441 } while (--i >= 0); 1442 state &= ~TxEvt; 1443 } 1444out: 1445 spin_unlock_irqrestore(&priv->lock, flags); 1446 return IRQ_RETVAL(handled); 1447} 1448 1449static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, 1450 struct dscc4_dev_priv *dpriv) 1451{ 1452 struct net_device *dev = dscc4_to_dev(dpriv); 1453 u32 state; 1454 int cur, loop = 0; 1455 1456try: 1457 cur = dpriv->iqtx_current%IRQ_RING_SIZE; 1458 state = le32_to_cpu(dpriv->iqtx[cur]); 1459 if (!state) { 1460 if (debug > 4) 1461 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name, 1462 state); 1463 if ((debug > 1) && (loop > 1)) 1464 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); 1465 if (loop && netif_queue_stopped(dev)) 1466 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) 1467 netif_wake_queue(dev); 1468 1469 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && 1470 !dscc4_tx_done(dpriv)) 1471 dscc4_do_tx(dpriv, dev); 1472 return; 1473 } 1474 loop++; 1475 dpriv->iqtx[cur] = 0; 1476 dpriv->iqtx_current++; 1477 1478 if (state_check(state, dpriv, dev, "Tx") < 0) 1479 return; 1480 1481 if (state & SccEvt) { 1482 if (state & Alls) { 1483 struct sk_buff *skb; 1484 struct TxFD *tx_fd; 1485 1486 if (debug > 2) 1487 dscc4_tx_print(dev, dpriv, "Alls"); 1488 /* 1489 * DataComplete can't be trusted for Tx completion. 1490 * Cf errata DS5 p.8 1491 */ 1492 cur = dpriv->tx_dirty%TX_RING_SIZE; 1493 tx_fd = dpriv->tx_fd + cur; 1494 skb = dpriv->tx_skbuff[cur]; 1495 if (skb) { 1496 pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data), 1497 skb->len, PCI_DMA_TODEVICE); 1498 if (tx_fd->state & FrameEnd) { 1499 dev->stats.tx_packets++; 1500 dev->stats.tx_bytes += skb->len; 1501 } 1502 dev_kfree_skb_irq(skb); 1503 dpriv->tx_skbuff[cur] = NULL; 1504 ++dpriv->tx_dirty; 1505 } else { 1506 if (debug > 1) 1507 printk(KERN_ERR "%s Tx: NULL skb %d\n", 1508 dev->name, cur); 1509 } 1510 /* 1511 * If the driver ends sending crap on the wire, it 1512 * will be way easier to diagnose than the (not so) 1513 * random freeze induced by null sized tx frames. 1514 */ 1515 tx_fd->data = tx_fd->next; 1516 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); 1517 tx_fd->complete = 0x00000000; 1518 tx_fd->jiffies = 0; 1519 1520 if (!(state &= ~Alls)) 1521 goto try; 1522 } 1523 /* 1524 * Transmit Data Underrun 1525 */ 1526 if (state & Xdu) { 1527 printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME); 1528 dpriv->flags = NeedIDT; 1529 /* Tx reset */ 1530 writel(MTFi | Rdt, 1531 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG); 1532 writel(Action, dpriv->base_addr + GCMDR); 1533 return; 1534 } 1535 if (state & Cts) { 1536 printk(KERN_INFO "%s: CTS transition\n", dev->name); 1537 if (!(state &= ~Cts)) /* DEBUG */ 1538 goto try; 1539 } 1540 if (state & Xmr) { 1541 printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME); 1542 if (!(state &= ~Xmr)) /* DEBUG */ 1543 goto try; 1544 } 1545 if (state & Xpr) { 1546 void __iomem *scc_addr; 1547 unsigned long ring; 1548 int i; 1549 1550 /* 1551 * - the busy condition happens (sometimes); 1552 * - it doesn't seem to make the handler unreliable. 1553 */ 1554 for (i = 1; i; i <<= 1) { 1555 if (!(scc_readl_star(dpriv, dev) & SccBusy)) 1556 break; 1557 } 1558 if (!i) 1559 printk(KERN_INFO "%s busy in irq\n", dev->name); 1560 1561 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; 1562 /* Keep this order: IDT before IDR */ 1563 if (dpriv->flags & NeedIDT) { 1564 if (debug > 2) 1565 dscc4_tx_print(dev, dpriv, "Xpr"); 1566 ring = dpriv->tx_fd_dma + 1567 (dpriv->tx_dirty%TX_RING_SIZE)* 1568 sizeof(struct TxFD); 1569 writel(ring, scc_addr + CH0BTDA); 1570 dscc4_do_tx(dpriv, dev); 1571 writel(MTFi | Idt, scc_addr + CH0CFG); 1572 if (dscc4_do_action(dev, "IDT") < 0) 1573 goto err_xpr; 1574 dpriv->flags &= ~NeedIDT; 1575 } 1576 if (dpriv->flags & NeedIDR) { 1577 ring = dpriv->rx_fd_dma + 1578 (dpriv->rx_current%RX_RING_SIZE)* 1579 sizeof(struct RxFD); 1580 writel(ring, scc_addr + CH0BRDA); 1581 dscc4_rx_update(dpriv, dev); 1582 writel(MTFi | Idr, scc_addr + CH0CFG); 1583 if (dscc4_do_action(dev, "IDR") < 0) 1584 goto err_xpr; 1585 dpriv->flags &= ~NeedIDR; 1586 smp_wmb(); 1587 /* Activate receiver and misc */ 1588 scc_writel(0x08050008, dpriv, dev, CCR2); 1589 } 1590 err_xpr: 1591 if (!(state &= ~Xpr)) 1592 goto try; 1593 } 1594 if (state & Cd) { 1595 if (debug > 0) 1596 printk(KERN_INFO "%s: CD transition\n", dev->name); 1597 if (!(state &= ~Cd)) /* DEBUG */ 1598 goto try; 1599 } 1600 } else { /* ! SccEvt */ 1601 if (state & Hi) { 1602#ifdef DSCC4_POLLING 1603 while (!dscc4_tx_poll(dpriv, dev)); 1604#endif 1605 printk(KERN_INFO "%s: Tx Hi\n", dev->name); 1606 state &= ~Hi; 1607 } 1608 if (state & Err) { 1609 printk(KERN_INFO "%s: Tx ERR\n", dev->name); 1610 dev->stats.tx_errors++; 1611 state &= ~Err; 1612 } 1613 } 1614 goto try; 1615} 1616 1617static void dscc4_rx_irq(struct dscc4_pci_priv *priv, 1618 struct dscc4_dev_priv *dpriv) 1619{ 1620 struct net_device *dev = dscc4_to_dev(dpriv); 1621 u32 state; 1622 int cur; 1623 1624try: 1625 cur = dpriv->iqrx_current%IRQ_RING_SIZE; 1626 state = le32_to_cpu(dpriv->iqrx[cur]); 1627 if (!state) 1628 return; 1629 dpriv->iqrx[cur] = 0; 1630 dpriv->iqrx_current++; 1631 1632 if (state_check(state, dpriv, dev, "Rx") < 0) 1633 return; 1634 1635 if (!(state & SccEvt)){ 1636 struct RxFD *rx_fd; 1637 1638 if (debug > 4) 1639 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name, 1640 state); 1641 state &= 0x00ffffff; 1642 if (state & Err) { /* Hold or reset */ 1643 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name); 1644 cur = dpriv->rx_current%RX_RING_SIZE; 1645 rx_fd = dpriv->rx_fd + cur; 1646 /* 1647 * Presume we're not facing a DMAC receiver reset. 1648 * As We use the rx size-filtering feature of the 1649 * DSCC4, the beginning of a new frame is waiting in 1650 * the rx fifo. I bet a Receive Data Overflow will 1651 * happen most of time but let's try and avoid it. 1652 * Btw (as for RDO) if one experiences ERR whereas 1653 * the system looks rather idle, there may be a 1654 * problem with latency. In this case, increasing 1655 * RX_RING_SIZE may help. 1656 */ 1657 //while (dpriv->rx_needs_refill) { 1658 while (!(rx_fd->state1 & Hold)) { 1659 rx_fd++; 1660 cur++; 1661 if (!(cur = cur%RX_RING_SIZE)) 1662 rx_fd = dpriv->rx_fd; 1663 } 1664 //dpriv->rx_needs_refill--; 1665 try_get_rx_skb(dpriv, dev); 1666 if (!rx_fd->data) 1667 goto try; 1668 rx_fd->state1 &= ~Hold; 1669 rx_fd->state2 = 0x00000000; 1670 rx_fd->end = cpu_to_le32(0xbabeface); 1671 //} 1672 goto try; 1673 } 1674 if (state & Fi) { 1675 dscc4_rx_skb(dpriv, dev); 1676 goto try; 1677 } 1678 if (state & Hi ) { /* HI bit */ 1679 printk(KERN_INFO "%s: Rx Hi\n", dev->name); 1680 state &= ~Hi; 1681 goto try; 1682 } 1683 } else { /* SccEvt */ 1684 if (debug > 1) { 1685 static struct { 1686 u32 mask; 1687 const char *irq_name; 1688 } evts[] = { 1689 { 0x00008000, "TIN"}, 1690 { 0x00000020, "RSC"}, 1691 { 0x00000010, "PCE"}, 1692 { 0x00000008, "PLLA"}, 1693 { 0, NULL} 1694 }, *evt; 1695 1696 for (evt = evts; evt->irq_name; evt++) { 1697 if (state & evt->mask) { 1698 printk(KERN_DEBUG "%s: %s\n", 1699 dev->name, evt->irq_name); 1700 if (!(state &= ~evt->mask)) 1701 goto try; 1702 } 1703 } 1704 } else { 1705 if (!(state &= ~0x0000c03c)) 1706 goto try; 1707 } 1708 if (state & Cts) { 1709 printk(KERN_INFO "%s: CTS transition\n", dev->name); 1710 if (!(state &= ~Cts)) /* DEBUG */ 1711 goto try; 1712 } 1713 if (state & Rdo) { 1714 struct RxFD *rx_fd; 1715 void __iomem *scc_addr; 1716 int cur; 1717 1718 //if (debug) 1719 // dscc4_rx_dump(dpriv); 1720 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; 1721 1722 scc_patchl(RxActivate, 0, dpriv, dev, CCR2); 1723 /* 1724 * This has no effect. Why ? 1725 * ORed with TxSccRes, one sees the CFG ack (for 1726 * the TX part only). 1727 */ 1728 scc_writel(RxSccRes, dpriv, dev, CMDR); 1729 dpriv->flags |= RdoSet; 1730 1731 /* 1732 * Let's try and save something in the received data. 1733 * rx_current must be incremented at least once to 1734 * avoid HOLD in the BRDA-to-be-pointed desc. 1735 */ 1736 do { 1737 cur = dpriv->rx_current++%RX_RING_SIZE; 1738 rx_fd = dpriv->rx_fd + cur; 1739 if (!(rx_fd->state2 & DataComplete)) 1740 break; 1741 if (rx_fd->state2 & FrameAborted) { 1742 dev->stats.rx_over_errors++; 1743 rx_fd->state1 |= Hold; 1744 rx_fd->state2 = 0x00000000; 1745 rx_fd->end = cpu_to_le32(0xbabeface); 1746 } else 1747 dscc4_rx_skb(dpriv, dev); 1748 } while (1); 1749 1750 if (debug > 0) { 1751 if (dpriv->flags & RdoSet) 1752 printk(KERN_DEBUG 1753 "%s: no RDO in Rx data\n", DRV_NAME); 1754 } 1755#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY 1756#warning "FIXME: CH0BRDA" 1757 writel(dpriv->rx_fd_dma + 1758 (dpriv->rx_current%RX_RING_SIZE)* 1759 sizeof(struct RxFD), scc_addr + CH0BRDA); 1760 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG); 1761 if (dscc4_do_action(dev, "RDR") < 0) { 1762 printk(KERN_ERR "%s: RDO recovery failed(%s)\n", 1763 dev->name, "RDR"); 1764 goto rdo_end; 1765 } 1766 writel(MTFi|Idr, scc_addr + CH0CFG); 1767 if (dscc4_do_action(dev, "IDR") < 0) { 1768 printk(KERN_ERR "%s: RDO recovery failed(%s)\n", 1769 dev->name, "IDR"); 1770 goto rdo_end; 1771 } 1772 rdo_end: 1773#endif 1774 scc_patchl(0, RxActivate, dpriv, dev, CCR2); 1775 goto try; 1776 } 1777 if (state & Cd) { 1778 printk(KERN_INFO "%s: CD transition\n", dev->name); 1779 if (!(state &= ~Cd)) /* DEBUG */ 1780 goto try; 1781 } 1782 if (state & Flex) { 1783 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME); 1784 if (!(state &= ~Flex)) 1785 goto try; 1786 } 1787 } 1788} 1789 1790/* 1791 * I had expected the following to work for the first descriptor 1792 * (tx_fd->state = 0xc0000000) 1793 * - Hold=1 (don't try and branch to the next descripto); 1794 * - No=0 (I want an empty data section, i.e. size=0); 1795 * - Fe=1 (required by No=0 or we got an Err irq and must reset). 1796 * It failed and locked solid. Thus the introduction of a dummy skb. 1797 * Problem is acknowledged in errata sheet DS5. Joy :o/ 1798 */ 1799static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) 1800{ 1801 struct sk_buff *skb; 1802 1803 skb = dev_alloc_skb(DUMMY_SKB_SIZE); 1804 if (skb) { 1805 int last = dpriv->tx_dirty%TX_RING_SIZE; 1806 struct TxFD *tx_fd = dpriv->tx_fd + last; 1807 1808 skb->len = DUMMY_SKB_SIZE; 1809 skb_copy_to_linear_data(skb, version, 1810 strlen(version) % DUMMY_SKB_SIZE); 1811 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); 1812 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, 1813 skb->data, DUMMY_SKB_SIZE, 1814 PCI_DMA_TODEVICE)); 1815 dpriv->tx_skbuff[last] = skb; 1816 } 1817 return skb; 1818} 1819 1820static int dscc4_init_ring(struct net_device *dev) 1821{ 1822 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1823 struct pci_dev *pdev = dpriv->pci_priv->pdev; 1824 struct TxFD *tx_fd; 1825 struct RxFD *rx_fd; 1826 void *ring; 1827 int i; 1828 1829 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma); 1830 if (!ring) 1831 goto err_out; 1832 dpriv->rx_fd = rx_fd = (struct RxFD *) ring; 1833 1834 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma); 1835 if (!ring) 1836 goto err_free_dma_rx; 1837 dpriv->tx_fd = tx_fd = (struct TxFD *) ring; 1838 1839 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE); 1840 dpriv->tx_dirty = 0xffffffff; 1841 i = dpriv->tx_current = 0; 1842 do { 1843 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); 1844 tx_fd->complete = 0x00000000; 1845 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma); 1846 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma + 1847 (++i%TX_RING_SIZE)*sizeof(*tx_fd)); 1848 } while (i < TX_RING_SIZE); 1849 1850 if (!dscc4_init_dummy_skb(dpriv)) 1851 goto err_free_dma_tx; 1852 1853 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); 1854 i = dpriv->rx_dirty = dpriv->rx_current = 0; 1855 do { 1856 /* size set by the host. Multiple of 4 bytes please */ 1857 rx_fd->state1 = HiDesc; 1858 rx_fd->state2 = 0x00000000; 1859 rx_fd->end = cpu_to_le32(0xbabeface); 1860 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU); 1861 if (try_get_rx_skb(dpriv, dev) >= 0) 1862 dpriv->rx_dirty++; 1863 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma + 1864 (++i%RX_RING_SIZE)*sizeof(*rx_fd)); 1865 } while (i < RX_RING_SIZE); 1866 1867 return 0; 1868 1869err_free_dma_tx: 1870 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); 1871err_free_dma_rx: 1872 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); 1873err_out: 1874 return -ENOMEM; 1875} 1876 1877static void __devexit dscc4_remove_one(struct pci_dev *pdev) 1878{ 1879 struct dscc4_pci_priv *ppriv; 1880 struct dscc4_dev_priv *root; 1881 void __iomem *ioaddr; 1882 int i; 1883 1884 ppriv = pci_get_drvdata(pdev); 1885 root = ppriv->root; 1886 1887 ioaddr = root->base_addr; 1888 1889 dscc4_pci_reset(pdev, ioaddr); 1890 1891 free_irq(pdev->irq, root); 1892 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, 1893 ppriv->iqcfg_dma); 1894 for (i = 0; i < dev_per_card; i++) { 1895 struct dscc4_dev_priv *dpriv = root + i; 1896 1897 dscc4_release_ring(dpriv); 1898 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 1899 dpriv->iqrx, dpriv->iqrx_dma); 1900 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), 1901 dpriv->iqtx, dpriv->iqtx_dma); 1902 } 1903 1904 dscc4_free1(pdev); 1905 1906 iounmap(ioaddr); 1907 1908 pci_release_region(pdev, 1); 1909 pci_release_region(pdev, 0); 1910 1911 pci_disable_device(pdev); 1912} 1913 1914static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding, 1915 unsigned short parity) 1916{ 1917 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1918 1919 if (encoding != ENCODING_NRZ && 1920 encoding != ENCODING_NRZI && 1921 encoding != ENCODING_FM_MARK && 1922 encoding != ENCODING_FM_SPACE && 1923 encoding != ENCODING_MANCHESTER) 1924 return -EINVAL; 1925 1926 if (parity != PARITY_NONE && 1927 parity != PARITY_CRC16_PR0_CCITT && 1928 parity != PARITY_CRC16_PR1_CCITT && 1929 parity != PARITY_CRC32_PR0_CCITT && 1930 parity != PARITY_CRC32_PR1_CCITT) 1931 return -EINVAL; 1932 1933 dpriv->encoding = encoding; 1934 dpriv->parity = parity; 1935 return 0; 1936} 1937 1938#ifndef MODULE 1939static int __init dscc4_setup(char *str) 1940{ 1941 int *args[] = { &debug, &quartz, NULL }, **p = args; 1942 1943 while (*p && (get_option(&str, *p) == 2)) 1944 p++; 1945 return 1; 1946} 1947 1948__setup("dscc4.setup=", dscc4_setup); 1949#endif 1950 1951static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = { 1952 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, 1953 PCI_ANY_ID, PCI_ANY_ID, }, 1954 { 0,} 1955}; 1956MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl); 1957 1958static struct pci_driver dscc4_driver = { 1959 .name = DRV_NAME, 1960 .id_table = dscc4_pci_tbl, 1961 .probe = dscc4_init_one, 1962 .remove = __devexit_p(dscc4_remove_one), 1963}; 1964 1965static int __init dscc4_init_module(void) 1966{ 1967 return pci_register_driver(&dscc4_driver); 1968} 1969 1970static void __exit dscc4_cleanup_module(void) 1971{ 1972 pci_unregister_driver(&dscc4_driver); 1973} 1974 1975module_init(dscc4_init_module); 1976module_exit(dscc4_cleanup_module); 1977