1/* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10#include <linux/kernel.h> 11#include <linux/init.h> 12#include <linux/types.h> 13#include <linux/module.h> 14#include <linux/list.h> 15#include <linux/pci.h> 16#include <linux/dma-mapping.h> 17#include <linux/sched.h> 18#include <linux/slab.h> 19#include <linux/dmapool.h> 20#include <linux/mempool.h> 21#include <linux/spinlock.h> 22#include <linux/kthread.h> 23#include <linux/interrupt.h> 24#include <linux/errno.h> 25#include <linux/ioport.h> 26#include <linux/ip.h> 27#include <linux/in.h> 28#include <linux/if_arp.h> 29#include <linux/if_ether.h> 30#include <linux/netdevice.h> 31#include <linux/etherdevice.h> 32#include <linux/ethtool.h> 33#include <linux/skbuff.h> 34#include <linux/rtnetlink.h> 35#include <linux/if_vlan.h> 36#include <linux/delay.h> 37#include <linux/mm.h> 38 39#include "qla3xxx.h" 40 41#define DRV_NAME "qla3xxx" 42#define DRV_STRING "QLogic ISP3XXX Network Driver" 43#define DRV_VERSION "v2.03.00-k5" 44 45static const char ql3xxx_driver_name[] = DRV_NAME; 46static const char ql3xxx_driver_version[] = DRV_VERSION; 47 48#define TIMED_OUT_MSG \ 49"Timed out waiting for management port to get free before issuing command\n" 50 51MODULE_AUTHOR("QLogic Corporation"); 52MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 53MODULE_LICENSE("GPL"); 54MODULE_VERSION(DRV_VERSION); 55 56static const u32 default_msg 57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 59 60static int debug = -1; /* defaults above */ 61module_param(debug, int, 0); 62MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 63 64static int msi; 65module_param(msi, int, 0); 66MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 67 68static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { 69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 71 /* required last entry */ 72 {0,} 73}; 74 75MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 76 77/* 78 * These are the known PHY's which are used 79 */ 80enum PHY_DEVICE_TYPE { 81 PHY_TYPE_UNKNOWN = 0, 82 PHY_VITESSE_VSC8211, 83 PHY_AGERE_ET1011C, 84 MAX_PHY_DEV_TYPES 85}; 86 87struct PHY_DEVICE_INFO { 88 const enum PHY_DEVICE_TYPE phyDevice; 89 const u32 phyIdOUI; 90 const u16 phyIdModel; 91 const char *name; 92}; 93 94static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 98}; 99 100 101/* 102 * Caller must take hw_lock. 103 */ 104static int ql_sem_spinlock(struct ql3_adapter *qdev, 105 u32 sem_mask, u32 sem_bits) 106{ 107 struct ql3xxx_port_registers __iomem *port_regs = 108 qdev->mem_map_registers; 109 u32 value; 110 unsigned int seconds = 3; 111 112 do { 113 writel((sem_mask | sem_bits), 114 &port_regs->CommonRegs.semaphoreReg); 115 value = readl(&port_regs->CommonRegs.semaphoreReg); 116 if ((value & (sem_mask >> 16)) == sem_bits) 117 return 0; 118 ssleep(1); 119 } while (--seconds); 120 return -1; 121} 122 123static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 124{ 125 struct ql3xxx_port_registers __iomem *port_regs = 126 qdev->mem_map_registers; 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg); 129} 130 131static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 132{ 133 struct ql3xxx_port_registers __iomem *port_regs = 134 qdev->mem_map_registers; 135 u32 value; 136 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 138 value = readl(&port_regs->CommonRegs.semaphoreReg); 139 return ((value & (sem_mask >> 16)) == sem_bits); 140} 141 142/* 143 * Caller holds hw_lock. 144 */ 145static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 146{ 147 int i = 0; 148 149 while (i < 10) { 150 if (i) 151 ssleep(1); 152 153 if (ql_sem_lock(qdev, 154 QL_DRVR_SEM_MASK, 155 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 156 * 2) << 1)) { 157 netdev_printk(KERN_DEBUG, qdev->ndev, 158 "driver lock acquired\n"); 159 return 1; 160 } 161 } 162 163 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 164 return 0; 165} 166 167static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 168{ 169 struct ql3xxx_port_registers __iomem *port_regs = 170 qdev->mem_map_registers; 171 172 writel(((ISP_CONTROL_NP_MASK << 16) | page), 173 &port_regs->CommonRegs.ispControlStatus); 174 readl(&port_regs->CommonRegs.ispControlStatus); 175 qdev->current_page = page; 176} 177 178static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 179{ 180 u32 value; 181 unsigned long hw_flags; 182 183 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 184 value = readl(reg); 185 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 186 187 return value; 188} 189 190static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 191{ 192 return readl(reg); 193} 194 195static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 196{ 197 u32 value; 198 unsigned long hw_flags; 199 200 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 201 202 if (qdev->current_page != 0) 203 ql_set_register_page(qdev, 0); 204 value = readl(reg); 205 206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 207 return value; 208} 209 210static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 211{ 212 if (qdev->current_page != 0) 213 ql_set_register_page(qdev, 0); 214 return readl(reg); 215} 216 217static void ql_write_common_reg_l(struct ql3_adapter *qdev, 218 u32 __iomem *reg, u32 value) 219{ 220 unsigned long hw_flags; 221 222 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 223 writel(value, reg); 224 readl(reg); 225 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 226} 227 228static void ql_write_common_reg(struct ql3_adapter *qdev, 229 u32 __iomem *reg, u32 value) 230{ 231 writel(value, reg); 232 readl(reg); 233} 234 235static void ql_write_nvram_reg(struct ql3_adapter *qdev, 236 u32 __iomem *reg, u32 value) 237{ 238 writel(value, reg); 239 readl(reg); 240 udelay(1); 241} 242 243static void ql_write_page0_reg(struct ql3_adapter *qdev, 244 u32 __iomem *reg, u32 value) 245{ 246 if (qdev->current_page != 0) 247 ql_set_register_page(qdev, 0); 248 writel(value, reg); 249 readl(reg); 250} 251 252/* 253 * Caller holds hw_lock. Only called during init. 254 */ 255static void ql_write_page1_reg(struct ql3_adapter *qdev, 256 u32 __iomem *reg, u32 value) 257{ 258 if (qdev->current_page != 1) 259 ql_set_register_page(qdev, 1); 260 writel(value, reg); 261 readl(reg); 262} 263 264/* 265 * Caller holds hw_lock. Only called during init. 266 */ 267static void ql_write_page2_reg(struct ql3_adapter *qdev, 268 u32 __iomem *reg, u32 value) 269{ 270 if (qdev->current_page != 2) 271 ql_set_register_page(qdev, 2); 272 writel(value, reg); 273 readl(reg); 274} 275 276static void ql_disable_interrupts(struct ql3_adapter *qdev) 277{ 278 struct ql3xxx_port_registers __iomem *port_regs = 279 qdev->mem_map_registers; 280 281 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 282 (ISP_IMR_ENABLE_INT << 16)); 283 284} 285 286static void ql_enable_interrupts(struct ql3_adapter *qdev) 287{ 288 struct ql3xxx_port_registers __iomem *port_regs = 289 qdev->mem_map_registers; 290 291 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 292 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 293 294} 295 296static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 297 struct ql_rcv_buf_cb *lrg_buf_cb) 298{ 299 dma_addr_t map; 300 int err; 301 lrg_buf_cb->next = NULL; 302 303 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 304 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 305 } else { 306 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 307 qdev->lrg_buf_free_tail = lrg_buf_cb; 308 } 309 310 if (!lrg_buf_cb->skb) { 311 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 312 qdev->lrg_buffer_len); 313 if (unlikely(!lrg_buf_cb->skb)) { 314 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); 315 qdev->lrg_buf_skb_check++; 316 } else { 317 /* 318 * We save some space to copy the ethhdr from first 319 * buffer 320 */ 321 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 322 map = pci_map_single(qdev->pdev, 323 lrg_buf_cb->skb->data, 324 qdev->lrg_buffer_len - 325 QL_HEADER_SPACE, 326 PCI_DMA_FROMDEVICE); 327 err = pci_dma_mapping_error(qdev->pdev, map); 328 if (err) { 329 netdev_err(qdev->ndev, 330 "PCI mapping failed with error: %d\n", 331 err); 332 dev_kfree_skb(lrg_buf_cb->skb); 333 lrg_buf_cb->skb = NULL; 334 335 qdev->lrg_buf_skb_check++; 336 return; 337 } 338 339 lrg_buf_cb->buf_phy_addr_low = 340 cpu_to_le32(LS_64BITS(map)); 341 lrg_buf_cb->buf_phy_addr_high = 342 cpu_to_le32(MS_64BITS(map)); 343 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 344 dma_unmap_len_set(lrg_buf_cb, maplen, 345 qdev->lrg_buffer_len - 346 QL_HEADER_SPACE); 347 } 348 } 349 350 qdev->lrg_buf_free_count++; 351} 352 353static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 354 *qdev) 355{ 356 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 357 358 if (lrg_buf_cb != NULL) { 359 qdev->lrg_buf_free_head = lrg_buf_cb->next; 360 if (qdev->lrg_buf_free_head == NULL) 361 qdev->lrg_buf_free_tail = NULL; 362 qdev->lrg_buf_free_count--; 363 } 364 365 return lrg_buf_cb; 366} 367 368static u32 addrBits = EEPROM_NO_ADDR_BITS; 369static u32 dataBits = EEPROM_NO_DATA_BITS; 370 371static void fm93c56a_deselect(struct ql3_adapter *qdev); 372static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 373 unsigned short *value); 374 375/* 376 * Caller holds hw_lock. 377 */ 378static void fm93c56a_select(struct ql3_adapter *qdev) 379{ 380 struct ql3xxx_port_registers __iomem *port_regs = 381 qdev->mem_map_registers; 382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 383 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 386 ql_write_nvram_reg(qdev, spir, 387 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 388} 389 390/* 391 * Caller holds hw_lock. 392 */ 393static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 394{ 395 int i; 396 u32 mask; 397 u32 dataBit; 398 u32 previousBit; 399 struct ql3xxx_port_registers __iomem *port_regs = 400 qdev->mem_map_registers; 401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 402 403 /* Clock in a zero, then do the start bit */ 404 ql_write_nvram_reg(qdev, spir, 405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 406 AUBURN_EEPROM_DO_1)); 407 ql_write_nvram_reg(qdev, spir, 408 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 409 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 410 ql_write_nvram_reg(qdev, spir, 411 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 412 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 413 414 mask = 1 << (FM93C56A_CMD_BITS - 1); 415 /* Force the previous data bit to be different */ 416 previousBit = 0xffff; 417 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 418 dataBit = (cmd & mask) 419 ? AUBURN_EEPROM_DO_1 420 : AUBURN_EEPROM_DO_0; 421 if (previousBit != dataBit) { 422 /* If the bit changed, change the DO state to match */ 423 ql_write_nvram_reg(qdev, spir, 424 (ISP_NVRAM_MASK | 425 qdev->eeprom_cmd_data | dataBit)); 426 previousBit = dataBit; 427 } 428 ql_write_nvram_reg(qdev, spir, 429 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 430 dataBit | AUBURN_EEPROM_CLK_RISE)); 431 ql_write_nvram_reg(qdev, spir, 432 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 433 dataBit | AUBURN_EEPROM_CLK_FALL)); 434 cmd = cmd << 1; 435 } 436 437 mask = 1 << (addrBits - 1); 438 /* Force the previous data bit to be different */ 439 previousBit = 0xffff; 440 for (i = 0; i < addrBits; i++) { 441 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 442 : AUBURN_EEPROM_DO_0; 443 if (previousBit != dataBit) { 444 /* 445 * If the bit changed, then change the DO state to 446 * match 447 */ 448 ql_write_nvram_reg(qdev, spir, 449 (ISP_NVRAM_MASK | 450 qdev->eeprom_cmd_data | dataBit)); 451 previousBit = dataBit; 452 } 453 ql_write_nvram_reg(qdev, spir, 454 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 455 dataBit | AUBURN_EEPROM_CLK_RISE)); 456 ql_write_nvram_reg(qdev, spir, 457 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 458 dataBit | AUBURN_EEPROM_CLK_FALL)); 459 eepromAddr = eepromAddr << 1; 460 } 461} 462 463/* 464 * Caller holds hw_lock. 465 */ 466static void fm93c56a_deselect(struct ql3_adapter *qdev) 467{ 468 struct ql3xxx_port_registers __iomem *port_regs = 469 qdev->mem_map_registers; 470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 471 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 474} 475 476/* 477 * Caller holds hw_lock. 478 */ 479static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 480{ 481 int i; 482 u32 data = 0; 483 u32 dataBit; 484 struct ql3xxx_port_registers __iomem *port_regs = 485 qdev->mem_map_registers; 486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 487 488 /* Read the data bits */ 489 /* The first bit is a dummy. Clock right over it. */ 490 for (i = 0; i < dataBits; i++) { 491 ql_write_nvram_reg(qdev, spir, 492 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 493 AUBURN_EEPROM_CLK_RISE); 494 ql_write_nvram_reg(qdev, spir, 495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 496 AUBURN_EEPROM_CLK_FALL); 497 dataBit = (ql_read_common_reg(qdev, spir) & 498 AUBURN_EEPROM_DI_1) ? 1 : 0; 499 data = (data << 1) | dataBit; 500 } 501 *value = (u16)data; 502} 503 504/* 505 * Caller holds hw_lock. 506 */ 507static void eeprom_readword(struct ql3_adapter *qdev, 508 u32 eepromAddr, unsigned short *value) 509{ 510 fm93c56a_select(qdev); 511 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 512 fm93c56a_datain(qdev, value); 513 fm93c56a_deselect(qdev); 514} 515 516static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 517{ 518 __le16 *p = (__le16 *)ndev->dev_addr; 519 p[0] = cpu_to_le16(addr[0]); 520 p[1] = cpu_to_le16(addr[1]); 521 p[2] = cpu_to_le16(addr[2]); 522} 523 524static int ql_get_nvram_params(struct ql3_adapter *qdev) 525{ 526 u16 *pEEPROMData; 527 u16 checksum = 0; 528 u32 index; 529 unsigned long hw_flags; 530 531 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 532 533 pEEPROMData = (u16 *)&qdev->nvram_data; 534 qdev->eeprom_cmd_data = 0; 535 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 536 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 537 2) << 10)) { 538 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 539 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 540 return -1; 541 } 542 543 for (index = 0; index < EEPROM_SIZE; index++) { 544 eeprom_readword(qdev, index, pEEPROMData); 545 checksum += *pEEPROMData; 546 pEEPROMData++; 547 } 548 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 549 550 if (checksum != 0) { 551 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 552 checksum); 553 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 554 return -1; 555 } 556 557 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 558 return checksum; 559} 560 561static const u32 PHYAddr[2] = { 562 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 563}; 564 565static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 566{ 567 struct ql3xxx_port_registers __iomem *port_regs = 568 qdev->mem_map_registers; 569 u32 temp; 570 int count = 1000; 571 572 while (count) { 573 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 574 if (!(temp & MAC_MII_STATUS_BSY)) 575 return 0; 576 udelay(10); 577 count--; 578 } 579 return -1; 580} 581 582static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 583{ 584 struct ql3xxx_port_registers __iomem *port_regs = 585 qdev->mem_map_registers; 586 u32 scanControl; 587 588 if (qdev->numPorts > 1) { 589 /* Auto scan will cycle through multiple ports */ 590 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 591 } else { 592 scanControl = MAC_MII_CONTROL_SC; 593 } 594 595 /* 596 * Scan register 1 of PHY/PETBI, 597 * Set up to scan both devices 598 * The autoscan starts from the first register, completes 599 * the last one before rolling over to the first 600 */ 601 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 602 PHYAddr[0] | MII_SCAN_REGISTER); 603 604 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 605 (scanControl) | 606 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 607} 608 609static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 610{ 611 u8 ret; 612 struct ql3xxx_port_registers __iomem *port_regs = 613 qdev->mem_map_registers; 614 615 /* See if scan mode is enabled before we turn it off */ 616 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 617 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 618 /* Scan is enabled */ 619 ret = 1; 620 } else { 621 /* Scan is disabled */ 622 ret = 0; 623 } 624 625 /* 626 * When disabling scan mode you must first change the MII register 627 * address 628 */ 629 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 630 PHYAddr[0] | MII_SCAN_REGISTER); 631 632 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 633 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 634 MAC_MII_CONTROL_RC) << 16)); 635 636 return ret; 637} 638 639static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 640 u16 regAddr, u16 value, u32 phyAddr) 641{ 642 struct ql3xxx_port_registers __iomem *port_regs = 643 qdev->mem_map_registers; 644 u8 scanWasEnabled; 645 646 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 647 648 if (ql_wait_for_mii_ready(qdev)) { 649 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 650 return -1; 651 } 652 653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 654 phyAddr | regAddr); 655 656 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 657 658 /* Wait for write to complete 9/10/04 SJP */ 659 if (ql_wait_for_mii_ready(qdev)) { 660 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 661 return -1; 662 } 663 664 if (scanWasEnabled) 665 ql_mii_enable_scan_mode(qdev); 666 667 return 0; 668} 669 670static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 671 u16 *value, u32 phyAddr) 672{ 673 struct ql3xxx_port_registers __iomem *port_regs = 674 qdev->mem_map_registers; 675 u8 scanWasEnabled; 676 u32 temp; 677 678 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 679 680 if (ql_wait_for_mii_ready(qdev)) { 681 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 682 return -1; 683 } 684 685 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 686 phyAddr | regAddr); 687 688 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 689 (MAC_MII_CONTROL_RC << 16)); 690 691 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 692 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 693 694 /* Wait for the read to complete */ 695 if (ql_wait_for_mii_ready(qdev)) { 696 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 697 return -1; 698 } 699 700 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 701 *value = (u16) temp; 702 703 if (scanWasEnabled) 704 ql_mii_enable_scan_mode(qdev); 705 706 return 0; 707} 708 709static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 710{ 711 struct ql3xxx_port_registers __iomem *port_regs = 712 qdev->mem_map_registers; 713 714 ql_mii_disable_scan_mode(qdev); 715 716 if (ql_wait_for_mii_ready(qdev)) { 717 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 718 return -1; 719 } 720 721 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 722 qdev->PHYAddr | regAddr); 723 724 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 725 726 /* Wait for write to complete. */ 727 if (ql_wait_for_mii_ready(qdev)) { 728 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 729 return -1; 730 } 731 732 ql_mii_enable_scan_mode(qdev); 733 734 return 0; 735} 736 737static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 738{ 739 u32 temp; 740 struct ql3xxx_port_registers __iomem *port_regs = 741 qdev->mem_map_registers; 742 743 ql_mii_disable_scan_mode(qdev); 744 745 if (ql_wait_for_mii_ready(qdev)) { 746 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 747 return -1; 748 } 749 750 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 751 qdev->PHYAddr | regAddr); 752 753 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 754 (MAC_MII_CONTROL_RC << 16)); 755 756 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 757 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 758 759 /* Wait for the read to complete */ 760 if (ql_wait_for_mii_ready(qdev)) { 761 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 762 return -1; 763 } 764 765 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 766 *value = (u16) temp; 767 768 ql_mii_enable_scan_mode(qdev); 769 770 return 0; 771} 772 773static void ql_petbi_reset(struct ql3_adapter *qdev) 774{ 775 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 776} 777 778static void ql_petbi_start_neg(struct ql3_adapter *qdev) 779{ 780 u16 reg; 781 782 /* Enable Auto-negotiation sense */ 783 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 784 reg |= PETBI_TBI_AUTO_SENSE; 785 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 786 787 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 788 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 789 790 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 791 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 792 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 793 794} 795 796static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 797{ 798 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 799 PHYAddr[qdev->mac_index]); 800} 801 802static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 803{ 804 u16 reg; 805 806 /* Enable Auto-negotiation sense */ 807 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 808 PHYAddr[qdev->mac_index]); 809 reg |= PETBI_TBI_AUTO_SENSE; 810 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 811 PHYAddr[qdev->mac_index]); 812 813 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 814 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 815 PHYAddr[qdev->mac_index]); 816 817 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 818 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 819 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 820 PHYAddr[qdev->mac_index]); 821} 822 823static void ql_petbi_init(struct ql3_adapter *qdev) 824{ 825 ql_petbi_reset(qdev); 826 ql_petbi_start_neg(qdev); 827} 828 829static void ql_petbi_init_ex(struct ql3_adapter *qdev) 830{ 831 ql_petbi_reset_ex(qdev); 832 ql_petbi_start_neg_ex(qdev); 833} 834 835static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 836{ 837 u16 reg; 838 839 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 840 return 0; 841 842 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 843} 844 845static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 846{ 847 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 848 /* power down device bit 11 = 1 */ 849 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 850 /* enable diagnostic mode bit 2 = 1 */ 851 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 852 /* 1000MB amplitude adjust (see Agere errata) */ 853 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 854 /* 1000MB amplitude adjust (see Agere errata) */ 855 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 856 /* 100MB amplitude adjust (see Agere errata) */ 857 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 858 /* 100MB amplitude adjust (see Agere errata) */ 859 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 860 /* 10MB amplitude adjust (see Agere errata) */ 861 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 862 /* 10MB amplitude adjust (see Agere errata) */ 863 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 864 /* point to hidden reg 0x2806 */ 865 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 866 /* Write new PHYAD w/bit 5 set */ 867 ql_mii_write_reg_ex(qdev, 0x11, 868 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 869 /* 870 * Disable diagnostic mode bit 2 = 0 871 * Power up device bit 11 = 0 872 * Link up (on) and activity (blink) 873 */ 874 ql_mii_write_reg(qdev, 0x12, 0x840a); 875 ql_mii_write_reg(qdev, 0x00, 0x1140); 876 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 877} 878 879static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 880 u16 phyIdReg0, u16 phyIdReg1) 881{ 882 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 883 u32 oui; 884 u16 model; 885 int i; 886 887 if (phyIdReg0 == 0xffff) 888 return result; 889 890 if (phyIdReg1 == 0xffff) 891 return result; 892 893 /* oui is split between two registers */ 894 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 895 896 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 897 898 /* Scan table for this PHY */ 899 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 900 if ((oui == PHY_DEVICES[i].phyIdOUI) && 901 (model == PHY_DEVICES[i].phyIdModel)) { 902 netdev_info(qdev->ndev, "Phy: %s\n", 903 PHY_DEVICES[i].name); 904 result = PHY_DEVICES[i].phyDevice; 905 break; 906 } 907 } 908 909 return result; 910} 911 912static int ql_phy_get_speed(struct ql3_adapter *qdev) 913{ 914 u16 reg; 915 916 switch (qdev->phyType) { 917 case PHY_AGERE_ET1011C: { 918 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 919 return 0; 920 921 reg = (reg >> 8) & 3; 922 break; 923 } 924 default: 925 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 926 return 0; 927 928 reg = (((reg & 0x18) >> 3) & 3); 929 } 930 931 switch (reg) { 932 case 2: 933 return SPEED_1000; 934 case 1: 935 return SPEED_100; 936 case 0: 937 return SPEED_10; 938 default: 939 return -1; 940 } 941} 942 943static int ql_is_full_dup(struct ql3_adapter *qdev) 944{ 945 u16 reg; 946 947 switch (qdev->phyType) { 948 case PHY_AGERE_ET1011C: { 949 if (ql_mii_read_reg(qdev, 0x1A, ®)) 950 return 0; 951 952 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 953 } 954 case PHY_VITESSE_VSC8211: 955 default: { 956 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 957 return 0; 958 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 959 } 960 } 961} 962 963static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 964{ 965 u16 reg; 966 967 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 968 return 0; 969 970 return (reg & PHY_NEG_PAUSE) != 0; 971} 972 973static int PHY_Setup(struct ql3_adapter *qdev) 974{ 975 u16 reg1; 976 u16 reg2; 977 bool agereAddrChangeNeeded = false; 978 u32 miiAddr = 0; 979 int err; 980 981 /* Determine the PHY we are using by reading the ID's */ 982 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 983 if (err != 0) { 984 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 985 return err; 986 } 987 988 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 989 if (err != 0) { 990 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 991 return err; 992 } 993 994 /* Check if we have a Agere PHY */ 995 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 996 997 /* Determine which MII address we should be using 998 determined by the index of the card */ 999 if (qdev->mac_index == 0) 1000 miiAddr = MII_AGERE_ADDR_1; 1001 else 1002 miiAddr = MII_AGERE_ADDR_2; 1003 1004 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1005 if (err != 0) { 1006 netdev_err(qdev->ndev, 1007 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1008 return err; 1009 } 1010 1011 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1012 if (err != 0) { 1013 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1014 return err; 1015 } 1016 1017 /* We need to remember to initialize the Agere PHY */ 1018 agereAddrChangeNeeded = true; 1019 } 1020 1021 /* Determine the particular PHY we have on board to apply 1022 PHY specific initializations */ 1023 qdev->phyType = getPhyType(qdev, reg1, reg2); 1024 1025 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1026 /* need this here so address gets changed */ 1027 phyAgereSpecificInit(qdev, miiAddr); 1028 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1029 netdev_err(qdev->ndev, "PHY is unknown\n"); 1030 return -EIO; 1031 } 1032 1033 return 0; 1034} 1035 1036/* 1037 * Caller holds hw_lock. 1038 */ 1039static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1040{ 1041 struct ql3xxx_port_registers __iomem *port_regs = 1042 qdev->mem_map_registers; 1043 u32 value; 1044 1045 if (enable) 1046 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1047 else 1048 value = (MAC_CONFIG_REG_PE << 16); 1049 1050 if (qdev->mac_index) 1051 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1052 else 1053 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1054} 1055 1056/* 1057 * Caller holds hw_lock. 1058 */ 1059static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1060{ 1061 struct ql3xxx_port_registers __iomem *port_regs = 1062 qdev->mem_map_registers; 1063 u32 value; 1064 1065 if (enable) 1066 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1067 else 1068 value = (MAC_CONFIG_REG_SR << 16); 1069 1070 if (qdev->mac_index) 1071 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1072 else 1073 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1074} 1075 1076/* 1077 * Caller holds hw_lock. 1078 */ 1079static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1080{ 1081 struct ql3xxx_port_registers __iomem *port_regs = 1082 qdev->mem_map_registers; 1083 u32 value; 1084 1085 if (enable) 1086 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1087 else 1088 value = (MAC_CONFIG_REG_GM << 16); 1089 1090 if (qdev->mac_index) 1091 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1092 else 1093 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1094} 1095 1096/* 1097 * Caller holds hw_lock. 1098 */ 1099static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1100{ 1101 struct ql3xxx_port_registers __iomem *port_regs = 1102 qdev->mem_map_registers; 1103 u32 value; 1104 1105 if (enable) 1106 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1107 else 1108 value = (MAC_CONFIG_REG_FD << 16); 1109 1110 if (qdev->mac_index) 1111 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1112 else 1113 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1114} 1115 1116/* 1117 * Caller holds hw_lock. 1118 */ 1119static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1120{ 1121 struct ql3xxx_port_registers __iomem *port_regs = 1122 qdev->mem_map_registers; 1123 u32 value; 1124 1125 if (enable) 1126 value = 1127 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1128 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1129 else 1130 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1131 1132 if (qdev->mac_index) 1133 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1134 else 1135 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1136} 1137 1138/* 1139 * Caller holds hw_lock. 1140 */ 1141static int ql_is_fiber(struct ql3_adapter *qdev) 1142{ 1143 struct ql3xxx_port_registers __iomem *port_regs = 1144 qdev->mem_map_registers; 1145 u32 bitToCheck = 0; 1146 u32 temp; 1147 1148 switch (qdev->mac_index) { 1149 case 0: 1150 bitToCheck = PORT_STATUS_SM0; 1151 break; 1152 case 1: 1153 bitToCheck = PORT_STATUS_SM1; 1154 break; 1155 } 1156 1157 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1158 return (temp & bitToCheck) != 0; 1159} 1160 1161static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1162{ 1163 u16 reg; 1164 ql_mii_read_reg(qdev, 0x00, ®); 1165 return (reg & 0x1000) != 0; 1166} 1167 1168/* 1169 * Caller holds hw_lock. 1170 */ 1171static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1172{ 1173 struct ql3xxx_port_registers __iomem *port_regs = 1174 qdev->mem_map_registers; 1175 u32 bitToCheck = 0; 1176 u32 temp; 1177 1178 switch (qdev->mac_index) { 1179 case 0: 1180 bitToCheck = PORT_STATUS_AC0; 1181 break; 1182 case 1: 1183 bitToCheck = PORT_STATUS_AC1; 1184 break; 1185 } 1186 1187 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1188 if (temp & bitToCheck) { 1189 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1190 return 1; 1191 } 1192 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1193 return 0; 1194} 1195 1196/* 1197 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1198 */ 1199static int ql_is_neg_pause(struct ql3_adapter *qdev) 1200{ 1201 if (ql_is_fiber(qdev)) 1202 return ql_is_petbi_neg_pause(qdev); 1203 else 1204 return ql_is_phy_neg_pause(qdev); 1205} 1206 1207static int ql_auto_neg_error(struct ql3_adapter *qdev) 1208{ 1209 struct ql3xxx_port_registers __iomem *port_regs = 1210 qdev->mem_map_registers; 1211 u32 bitToCheck = 0; 1212 u32 temp; 1213 1214 switch (qdev->mac_index) { 1215 case 0: 1216 bitToCheck = PORT_STATUS_AE0; 1217 break; 1218 case 1: 1219 bitToCheck = PORT_STATUS_AE1; 1220 break; 1221 } 1222 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1223 return (temp & bitToCheck) != 0; 1224} 1225 1226static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1227{ 1228 if (ql_is_fiber(qdev)) 1229 return SPEED_1000; 1230 else 1231 return ql_phy_get_speed(qdev); 1232} 1233 1234static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1235{ 1236 if (ql_is_fiber(qdev)) 1237 return 1; 1238 else 1239 return ql_is_full_dup(qdev); 1240} 1241 1242/* 1243 * Caller holds hw_lock. 1244 */ 1245static int ql_link_down_detect(struct ql3_adapter *qdev) 1246{ 1247 struct ql3xxx_port_registers __iomem *port_regs = 1248 qdev->mem_map_registers; 1249 u32 bitToCheck = 0; 1250 u32 temp; 1251 1252 switch (qdev->mac_index) { 1253 case 0: 1254 bitToCheck = ISP_CONTROL_LINK_DN_0; 1255 break; 1256 case 1: 1257 bitToCheck = ISP_CONTROL_LINK_DN_1; 1258 break; 1259 } 1260 1261 temp = 1262 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1263 return (temp & bitToCheck) != 0; 1264} 1265 1266/* 1267 * Caller holds hw_lock. 1268 */ 1269static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1270{ 1271 struct ql3xxx_port_registers __iomem *port_regs = 1272 qdev->mem_map_registers; 1273 1274 switch (qdev->mac_index) { 1275 case 0: 1276 ql_write_common_reg(qdev, 1277 &port_regs->CommonRegs.ispControlStatus, 1278 (ISP_CONTROL_LINK_DN_0) | 1279 (ISP_CONTROL_LINK_DN_0 << 16)); 1280 break; 1281 1282 case 1: 1283 ql_write_common_reg(qdev, 1284 &port_regs->CommonRegs.ispControlStatus, 1285 (ISP_CONTROL_LINK_DN_1) | 1286 (ISP_CONTROL_LINK_DN_1 << 16)); 1287 break; 1288 1289 default: 1290 return 1; 1291 } 1292 1293 return 0; 1294} 1295 1296/* 1297 * Caller holds hw_lock. 1298 */ 1299static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1300{ 1301 struct ql3xxx_port_registers __iomem *port_regs = 1302 qdev->mem_map_registers; 1303 u32 bitToCheck = 0; 1304 u32 temp; 1305 1306 switch (qdev->mac_index) { 1307 case 0: 1308 bitToCheck = PORT_STATUS_F1_ENABLED; 1309 break; 1310 case 1: 1311 bitToCheck = PORT_STATUS_F3_ENABLED; 1312 break; 1313 default: 1314 break; 1315 } 1316 1317 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1318 if (temp & bitToCheck) { 1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1320 "not link master\n"); 1321 return 0; 1322 } 1323 1324 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1325 return 1; 1326} 1327 1328static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1329{ 1330 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1331 PHYAddr[qdev->mac_index]); 1332} 1333 1334static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1335{ 1336 u16 reg; 1337 u16 portConfiguration; 1338 1339 if (qdev->phyType == PHY_AGERE_ET1011C) 1340 ql_mii_write_reg(qdev, 0x13, 0x0000); 1341 /* turn off external loopback */ 1342 1343 if (qdev->mac_index == 0) 1344 portConfiguration = 1345 qdev->nvram_data.macCfg_port0.portConfiguration; 1346 else 1347 portConfiguration = 1348 qdev->nvram_data.macCfg_port1.portConfiguration; 1349 1350 /* Some HBA's in the field are set to 0 and they need to 1351 be reinterpreted with a default value */ 1352 if (portConfiguration == 0) 1353 portConfiguration = PORT_CONFIG_DEFAULT; 1354 1355 /* Set the 1000 advertisements */ 1356 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1357 PHYAddr[qdev->mac_index]); 1358 reg &= ~PHY_GIG_ALL_PARAMS; 1359 1360 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1361 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1362 reg |= PHY_GIG_ADV_1000F; 1363 else 1364 reg |= PHY_GIG_ADV_1000H; 1365 } 1366 1367 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1368 PHYAddr[qdev->mac_index]); 1369 1370 /* Set the 10/100 & pause negotiation advertisements */ 1371 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1372 PHYAddr[qdev->mac_index]); 1373 reg &= ~PHY_NEG_ALL_PARAMS; 1374 1375 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1376 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1377 1378 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1379 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1380 reg |= PHY_NEG_ADV_100F; 1381 1382 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1383 reg |= PHY_NEG_ADV_10F; 1384 } 1385 1386 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1387 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1388 reg |= PHY_NEG_ADV_100H; 1389 1390 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1391 reg |= PHY_NEG_ADV_10H; 1392 } 1393 1394 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1395 reg |= 1; 1396 1397 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1398 PHYAddr[qdev->mac_index]); 1399 1400 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1401 1402 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1403 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1404 PHYAddr[qdev->mac_index]); 1405} 1406 1407static void ql_phy_init_ex(struct ql3_adapter *qdev) 1408{ 1409 ql_phy_reset_ex(qdev); 1410 PHY_Setup(qdev); 1411 ql_phy_start_neg_ex(qdev); 1412} 1413 1414/* 1415 * Caller holds hw_lock. 1416 */ 1417static u32 ql_get_link_state(struct ql3_adapter *qdev) 1418{ 1419 struct ql3xxx_port_registers __iomem *port_regs = 1420 qdev->mem_map_registers; 1421 u32 bitToCheck = 0; 1422 u32 temp, linkState; 1423 1424 switch (qdev->mac_index) { 1425 case 0: 1426 bitToCheck = PORT_STATUS_UP0; 1427 break; 1428 case 1: 1429 bitToCheck = PORT_STATUS_UP1; 1430 break; 1431 } 1432 1433 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1434 if (temp & bitToCheck) 1435 linkState = LS_UP; 1436 else 1437 linkState = LS_DOWN; 1438 1439 return linkState; 1440} 1441 1442static int ql_port_start(struct ql3_adapter *qdev) 1443{ 1444 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1445 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1446 2) << 7)) { 1447 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1448 return -1; 1449 } 1450 1451 if (ql_is_fiber(qdev)) { 1452 ql_petbi_init(qdev); 1453 } else { 1454 /* Copper port */ 1455 ql_phy_init_ex(qdev); 1456 } 1457 1458 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1459 return 0; 1460} 1461 1462static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1463{ 1464 1465 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1466 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1467 2) << 7)) 1468 return -1; 1469 1470 if (!ql_auto_neg_error(qdev)) { 1471 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1472 /* configure the MAC */ 1473 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1474 "Configuring link\n"); 1475 ql_mac_cfg_soft_reset(qdev, 1); 1476 ql_mac_cfg_gig(qdev, 1477 (ql_get_link_speed 1478 (qdev) == 1479 SPEED_1000)); 1480 ql_mac_cfg_full_dup(qdev, 1481 ql_is_link_full_dup 1482 (qdev)); 1483 ql_mac_cfg_pause(qdev, 1484 ql_is_neg_pause 1485 (qdev)); 1486 ql_mac_cfg_soft_reset(qdev, 0); 1487 1488 /* enable the MAC */ 1489 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1490 "Enabling mac\n"); 1491 ql_mac_enable(qdev, 1); 1492 } 1493 1494 qdev->port_link_state = LS_UP; 1495 netif_start_queue(qdev->ndev); 1496 netif_carrier_on(qdev->ndev); 1497 netif_info(qdev, link, qdev->ndev, 1498 "Link is up at %d Mbps, %s duplex\n", 1499 ql_get_link_speed(qdev), 1500 ql_is_link_full_dup(qdev) ? "full" : "half"); 1501 1502 } else { /* Remote error detected */ 1503 1504 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1505 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1506 "Remote error detected. Calling ql_port_start()\n"); 1507 /* 1508 * ql_port_start() is shared code and needs 1509 * to lock the PHY on it's own. 1510 */ 1511 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1512 if (ql_port_start(qdev)) /* Restart port */ 1513 return -1; 1514 return 0; 1515 } 1516 } 1517 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1518 return 0; 1519} 1520 1521static void ql_link_state_machine_work(struct work_struct *work) 1522{ 1523 struct ql3_adapter *qdev = 1524 container_of(work, struct ql3_adapter, link_state_work.work); 1525 1526 u32 curr_link_state; 1527 unsigned long hw_flags; 1528 1529 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1530 1531 curr_link_state = ql_get_link_state(qdev); 1532 1533 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1534 netif_info(qdev, link, qdev->ndev, 1535 "Reset in progress, skip processing link state\n"); 1536 1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1538 1539 /* Restart timer on 2 second interval. */ 1540 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1541 1542 return; 1543 } 1544 1545 switch (qdev->port_link_state) { 1546 default: 1547 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1548 ql_port_start(qdev); 1549 qdev->port_link_state = LS_DOWN; 1550 /* Fall Through */ 1551 1552 case LS_DOWN: 1553 if (curr_link_state == LS_UP) { 1554 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1555 if (ql_is_auto_neg_complete(qdev)) 1556 ql_finish_auto_neg(qdev); 1557 1558 if (qdev->port_link_state == LS_UP) 1559 ql_link_down_detect_clear(qdev); 1560 1561 qdev->port_link_state = LS_UP; 1562 } 1563 break; 1564 1565 case LS_UP: 1566 /* 1567 * See if the link is currently down or went down and came 1568 * back up 1569 */ 1570 if (curr_link_state == LS_DOWN) { 1571 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1572 qdev->port_link_state = LS_DOWN; 1573 } 1574 if (ql_link_down_detect(qdev)) 1575 qdev->port_link_state = LS_DOWN; 1576 break; 1577 } 1578 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1579 1580 /* Restart timer on 2 second interval. */ 1581 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1582} 1583 1584/* 1585 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1586 */ 1587static void ql_get_phy_owner(struct ql3_adapter *qdev) 1588{ 1589 if (ql_this_adapter_controls_port(qdev)) 1590 set_bit(QL_LINK_MASTER, &qdev->flags); 1591 else 1592 clear_bit(QL_LINK_MASTER, &qdev->flags); 1593} 1594 1595/* 1596 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1597 */ 1598static void ql_init_scan_mode(struct ql3_adapter *qdev) 1599{ 1600 ql_mii_enable_scan_mode(qdev); 1601 1602 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1603 if (ql_this_adapter_controls_port(qdev)) 1604 ql_petbi_init_ex(qdev); 1605 } else { 1606 if (ql_this_adapter_controls_port(qdev)) 1607 ql_phy_init_ex(qdev); 1608 } 1609} 1610 1611/* 1612 * MII_Setup needs to be called before taking the PHY out of reset 1613 * so that the management interface clock speed can be set properly. 1614 * It would be better if we had a way to disable MDC until after the 1615 * PHY is out of reset, but we don't have that capability. 1616 */ 1617static int ql_mii_setup(struct ql3_adapter *qdev) 1618{ 1619 u32 reg; 1620 struct ql3xxx_port_registers __iomem *port_regs = 1621 qdev->mem_map_registers; 1622 1623 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1624 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1625 2) << 7)) 1626 return -1; 1627 1628 if (qdev->device_id == QL3032_DEVICE_ID) 1629 ql_write_page0_reg(qdev, 1630 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1631 1632 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1633 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1634 1635 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1636 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1637 1638 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1639 return 0; 1640} 1641 1642#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1643 SUPPORTED_FIBRE | \ 1644 SUPPORTED_Autoneg) 1645#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1646 SUPPORTED_10baseT_Full | \ 1647 SUPPORTED_100baseT_Half | \ 1648 SUPPORTED_100baseT_Full | \ 1649 SUPPORTED_1000baseT_Half | \ 1650 SUPPORTED_1000baseT_Full | \ 1651 SUPPORTED_Autoneg | \ 1652 SUPPORTED_TP); \ 1653 1654static u32 ql_supported_modes(struct ql3_adapter *qdev) 1655{ 1656 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1657 return SUPPORTED_OPTICAL_MODES; 1658 1659 return SUPPORTED_TP_MODES; 1660} 1661 1662static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1663{ 1664 int status; 1665 unsigned long hw_flags; 1666 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1667 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1668 (QL_RESOURCE_BITS_BASE_CODE | 1669 (qdev->mac_index) * 2) << 7)) { 1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1671 return 0; 1672 } 1673 status = ql_is_auto_cfg(qdev); 1674 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1675 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1676 return status; 1677} 1678 1679static u32 ql_get_speed(struct ql3_adapter *qdev) 1680{ 1681 u32 status; 1682 unsigned long hw_flags; 1683 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1684 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1685 (QL_RESOURCE_BITS_BASE_CODE | 1686 (qdev->mac_index) * 2) << 7)) { 1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1688 return 0; 1689 } 1690 status = ql_get_link_speed(qdev); 1691 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1692 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1693 return status; 1694} 1695 1696static int ql_get_full_dup(struct ql3_adapter *qdev) 1697{ 1698 int status; 1699 unsigned long hw_flags; 1700 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1701 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1702 (QL_RESOURCE_BITS_BASE_CODE | 1703 (qdev->mac_index) * 2) << 7)) { 1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1705 return 0; 1706 } 1707 status = ql_is_link_full_dup(qdev); 1708 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1709 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1710 return status; 1711} 1712 1713static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1714{ 1715 struct ql3_adapter *qdev = netdev_priv(ndev); 1716 1717 ecmd->transceiver = XCVR_INTERNAL; 1718 ecmd->supported = ql_supported_modes(qdev); 1719 1720 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1721 ecmd->port = PORT_FIBRE; 1722 } else { 1723 ecmd->port = PORT_TP; 1724 ecmd->phy_address = qdev->PHYAddr; 1725 } 1726 ecmd->advertising = ql_supported_modes(qdev); 1727 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1728 ecmd->speed = ql_get_speed(qdev); 1729 ecmd->duplex = ql_get_full_dup(qdev); 1730 return 0; 1731} 1732 1733static void ql_get_drvinfo(struct net_device *ndev, 1734 struct ethtool_drvinfo *drvinfo) 1735{ 1736 struct ql3_adapter *qdev = netdev_priv(ndev); 1737 strncpy(drvinfo->driver, ql3xxx_driver_name, 32); 1738 strncpy(drvinfo->version, ql3xxx_driver_version, 32); 1739 strncpy(drvinfo->fw_version, "N/A", 32); 1740 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 1741 drvinfo->regdump_len = 0; 1742 drvinfo->eedump_len = 0; 1743} 1744 1745static u32 ql_get_msglevel(struct net_device *ndev) 1746{ 1747 struct ql3_adapter *qdev = netdev_priv(ndev); 1748 return qdev->msg_enable; 1749} 1750 1751static void ql_set_msglevel(struct net_device *ndev, u32 value) 1752{ 1753 struct ql3_adapter *qdev = netdev_priv(ndev); 1754 qdev->msg_enable = value; 1755} 1756 1757static void ql_get_pauseparam(struct net_device *ndev, 1758 struct ethtool_pauseparam *pause) 1759{ 1760 struct ql3_adapter *qdev = netdev_priv(ndev); 1761 struct ql3xxx_port_registers __iomem *port_regs = 1762 qdev->mem_map_registers; 1763 1764 u32 reg; 1765 if (qdev->mac_index == 0) 1766 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1767 else 1768 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1769 1770 pause->autoneg = ql_get_auto_cfg_status(qdev); 1771 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1772 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1773} 1774 1775static const struct ethtool_ops ql3xxx_ethtool_ops = { 1776 .get_settings = ql_get_settings, 1777 .get_drvinfo = ql_get_drvinfo, 1778 .get_link = ethtool_op_get_link, 1779 .get_msglevel = ql_get_msglevel, 1780 .set_msglevel = ql_set_msglevel, 1781 .get_pauseparam = ql_get_pauseparam, 1782}; 1783 1784static int ql_populate_free_queue(struct ql3_adapter *qdev) 1785{ 1786 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1787 dma_addr_t map; 1788 int err; 1789 1790 while (lrg_buf_cb) { 1791 if (!lrg_buf_cb->skb) { 1792 lrg_buf_cb->skb = 1793 netdev_alloc_skb(qdev->ndev, 1794 qdev->lrg_buffer_len); 1795 if (unlikely(!lrg_buf_cb->skb)) { 1796 netdev_printk(KERN_DEBUG, qdev->ndev, 1797 "Failed netdev_alloc_skb()\n"); 1798 break; 1799 } else { 1800 /* 1801 * We save some space to copy the ethhdr from 1802 * first buffer 1803 */ 1804 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1805 map = pci_map_single(qdev->pdev, 1806 lrg_buf_cb->skb->data, 1807 qdev->lrg_buffer_len - 1808 QL_HEADER_SPACE, 1809 PCI_DMA_FROMDEVICE); 1810 1811 err = pci_dma_mapping_error(qdev->pdev, map); 1812 if (err) { 1813 netdev_err(qdev->ndev, 1814 "PCI mapping failed with error: %d\n", 1815 err); 1816 dev_kfree_skb(lrg_buf_cb->skb); 1817 lrg_buf_cb->skb = NULL; 1818 break; 1819 } 1820 1821 1822 lrg_buf_cb->buf_phy_addr_low = 1823 cpu_to_le32(LS_64BITS(map)); 1824 lrg_buf_cb->buf_phy_addr_high = 1825 cpu_to_le32(MS_64BITS(map)); 1826 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1827 dma_unmap_len_set(lrg_buf_cb, maplen, 1828 qdev->lrg_buffer_len - 1829 QL_HEADER_SPACE); 1830 --qdev->lrg_buf_skb_check; 1831 if (!qdev->lrg_buf_skb_check) 1832 return 1; 1833 } 1834 } 1835 lrg_buf_cb = lrg_buf_cb->next; 1836 } 1837 return 0; 1838} 1839 1840/* 1841 * Caller holds hw_lock. 1842 */ 1843static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1844{ 1845 struct ql3xxx_port_registers __iomem *port_regs = 1846 qdev->mem_map_registers; 1847 1848 if (qdev->small_buf_release_cnt >= 16) { 1849 while (qdev->small_buf_release_cnt >= 16) { 1850 qdev->small_buf_q_producer_index++; 1851 1852 if (qdev->small_buf_q_producer_index == 1853 NUM_SBUFQ_ENTRIES) 1854 qdev->small_buf_q_producer_index = 0; 1855 qdev->small_buf_release_cnt -= 8; 1856 } 1857 wmb(); 1858 writel(qdev->small_buf_q_producer_index, 1859 &port_regs->CommonRegs.rxSmallQProducerIndex); 1860 } 1861} 1862 1863/* 1864 * Caller holds hw_lock. 1865 */ 1866static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1867{ 1868 struct bufq_addr_element *lrg_buf_q_ele; 1869 int i; 1870 struct ql_rcv_buf_cb *lrg_buf_cb; 1871 struct ql3xxx_port_registers __iomem *port_regs = 1872 qdev->mem_map_registers; 1873 1874 if ((qdev->lrg_buf_free_count >= 8) && 1875 (qdev->lrg_buf_release_cnt >= 16)) { 1876 1877 if (qdev->lrg_buf_skb_check) 1878 if (!ql_populate_free_queue(qdev)) 1879 return; 1880 1881 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1882 1883 while ((qdev->lrg_buf_release_cnt >= 16) && 1884 (qdev->lrg_buf_free_count >= 8)) { 1885 1886 for (i = 0; i < 8; i++) { 1887 lrg_buf_cb = 1888 ql_get_from_lrg_buf_free_list(qdev); 1889 lrg_buf_q_ele->addr_high = 1890 lrg_buf_cb->buf_phy_addr_high; 1891 lrg_buf_q_ele->addr_low = 1892 lrg_buf_cb->buf_phy_addr_low; 1893 lrg_buf_q_ele++; 1894 1895 qdev->lrg_buf_release_cnt--; 1896 } 1897 1898 qdev->lrg_buf_q_producer_index++; 1899 1900 if (qdev->lrg_buf_q_producer_index == 1901 qdev->num_lbufq_entries) 1902 qdev->lrg_buf_q_producer_index = 0; 1903 1904 if (qdev->lrg_buf_q_producer_index == 1905 (qdev->num_lbufq_entries - 1)) { 1906 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1907 } 1908 } 1909 wmb(); 1910 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1911 writel(qdev->lrg_buf_q_producer_index, 1912 &port_regs->CommonRegs.rxLargeQProducerIndex); 1913 } 1914} 1915 1916static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1917 struct ob_mac_iocb_rsp *mac_rsp) 1918{ 1919 struct ql_tx_buf_cb *tx_cb; 1920 int i; 1921 int retval = 0; 1922 1923 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1924 netdev_warn(qdev->ndev, 1925 "Frame too short but it was padded and sent\n"); 1926 } 1927 1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1929 1930 /* Check the transmit response flags for any errors */ 1931 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1932 netdev_err(qdev->ndev, 1933 "Frame too short to be legal, frame not sent\n"); 1934 1935 qdev->ndev->stats.tx_errors++; 1936 retval = -EIO; 1937 goto frame_not_sent; 1938 } 1939 1940 if (tx_cb->seg_count == 0) { 1941 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1942 mac_rsp->transaction_id); 1943 1944 qdev->ndev->stats.tx_errors++; 1945 retval = -EIO; 1946 goto invalid_seg_count; 1947 } 1948 1949 pci_unmap_single(qdev->pdev, 1950 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1951 dma_unmap_len(&tx_cb->map[0], maplen), 1952 PCI_DMA_TODEVICE); 1953 tx_cb->seg_count--; 1954 if (tx_cb->seg_count) { 1955 for (i = 1; i < tx_cb->seg_count; i++) { 1956 pci_unmap_page(qdev->pdev, 1957 dma_unmap_addr(&tx_cb->map[i], 1958 mapaddr), 1959 dma_unmap_len(&tx_cb->map[i], maplen), 1960 PCI_DMA_TODEVICE); 1961 } 1962 } 1963 qdev->ndev->stats.tx_packets++; 1964 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1965 1966frame_not_sent: 1967 dev_kfree_skb_irq(tx_cb->skb); 1968 tx_cb->skb = NULL; 1969 1970invalid_seg_count: 1971 atomic_inc(&qdev->tx_count); 1972} 1973 1974static void ql_get_sbuf(struct ql3_adapter *qdev) 1975{ 1976 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1977 qdev->small_buf_index = 0; 1978 qdev->small_buf_release_cnt++; 1979} 1980 1981static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1982{ 1983 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1984 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1985 qdev->lrg_buf_release_cnt++; 1986 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1987 qdev->lrg_buf_index = 0; 1988 return lrg_buf_cb; 1989} 1990 1991/* 1992 * The difference between 3022 and 3032 for inbound completions: 1993 * 3022 uses two buffers per completion. The first buffer contains 1994 * (some) header info, the second the remainder of the headers plus 1995 * the data. For this chip we reserve some space at the top of the 1996 * receive buffer so that the header info in buffer one can be 1997 * prepended to the buffer two. Buffer two is the sent up while 1998 * buffer one is returned to the hardware to be reused. 1999 * 3032 receives all of it's data and headers in one buffer for a 2000 * simpler process. 3032 also supports checksum verification as 2001 * can be seen in ql_process_macip_rx_intr(). 2002 */ 2003static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2004 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2005{ 2006 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2007 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2008 struct sk_buff *skb; 2009 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2010 2011 /* 2012 * Get the inbound address list (small buffer). 2013 */ 2014 ql_get_sbuf(qdev); 2015 2016 if (qdev->device_id == QL3022_DEVICE_ID) 2017 lrg_buf_cb1 = ql_get_lbuf(qdev); 2018 2019 /* start of second buffer */ 2020 lrg_buf_cb2 = ql_get_lbuf(qdev); 2021 skb = lrg_buf_cb2->skb; 2022 2023 qdev->ndev->stats.rx_packets++; 2024 qdev->ndev->stats.rx_bytes += length; 2025 2026 skb_put(skb, length); 2027 pci_unmap_single(qdev->pdev, 2028 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2029 dma_unmap_len(lrg_buf_cb2, maplen), 2030 PCI_DMA_FROMDEVICE); 2031 prefetch(skb->data); 2032 skb->ip_summed = CHECKSUM_NONE; 2033 skb->protocol = eth_type_trans(skb, qdev->ndev); 2034 2035 netif_receive_skb(skb); 2036 lrg_buf_cb2->skb = NULL; 2037 2038 if (qdev->device_id == QL3022_DEVICE_ID) 2039 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2040 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2041} 2042 2043static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2044 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2045{ 2046 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2047 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2048 struct sk_buff *skb1 = NULL, *skb2; 2049 struct net_device *ndev = qdev->ndev; 2050 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2051 u16 size = 0; 2052 2053 /* 2054 * Get the inbound address list (small buffer). 2055 */ 2056 2057 ql_get_sbuf(qdev); 2058 2059 if (qdev->device_id == QL3022_DEVICE_ID) { 2060 /* start of first buffer on 3022 */ 2061 lrg_buf_cb1 = ql_get_lbuf(qdev); 2062 skb1 = lrg_buf_cb1->skb; 2063 size = ETH_HLEN; 2064 if (*((u16 *) skb1->data) != 0xFFFF) 2065 size += VLAN_ETH_HLEN - ETH_HLEN; 2066 } 2067 2068 /* start of second buffer */ 2069 lrg_buf_cb2 = ql_get_lbuf(qdev); 2070 skb2 = lrg_buf_cb2->skb; 2071 2072 skb_put(skb2, length); /* Just the second buffer length here. */ 2073 pci_unmap_single(qdev->pdev, 2074 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2075 dma_unmap_len(lrg_buf_cb2, maplen), 2076 PCI_DMA_FROMDEVICE); 2077 prefetch(skb2->data); 2078 2079 skb2->ip_summed = CHECKSUM_NONE; 2080 if (qdev->device_id == QL3022_DEVICE_ID) { 2081 /* 2082 * Copy the ethhdr from first buffer to second. This 2083 * is necessary for 3022 IP completions. 2084 */ 2085 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2086 skb_push(skb2, size), size); 2087 } else { 2088 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2089 if (checksum & 2090 (IB_IP_IOCB_RSP_3032_ICE | 2091 IB_IP_IOCB_RSP_3032_CE)) { 2092 netdev_err(ndev, 2093 "%s: Bad checksum for this %s packet, checksum = %x\n", 2094 __func__, 2095 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2096 "TCP" : "UDP"), checksum); 2097 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2098 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2099 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2100 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2101 } 2102 } 2103 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2104 2105 netif_receive_skb(skb2); 2106 ndev->stats.rx_packets++; 2107 ndev->stats.rx_bytes += length; 2108 lrg_buf_cb2->skb = NULL; 2109 2110 if (qdev->device_id == QL3022_DEVICE_ID) 2111 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2112 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2113} 2114 2115static int ql_tx_rx_clean(struct ql3_adapter *qdev, 2116 int *tx_cleaned, int *rx_cleaned, int work_to_do) 2117{ 2118 struct net_rsp_iocb *net_rsp; 2119 struct net_device *ndev = qdev->ndev; 2120 int work_done = 0; 2121 2122 /* While there are entries in the completion queue. */ 2123 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2124 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2125 2126 net_rsp = qdev->rsp_current; 2127 rmb(); 2128 /* 2129 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2130 * if the inbound completion is for a VLAN. 2131 */ 2132 if (qdev->device_id == QL3032_DEVICE_ID) 2133 net_rsp->opcode &= 0x7f; 2134 switch (net_rsp->opcode) { 2135 2136 case OPCODE_OB_MAC_IOCB_FN0: 2137 case OPCODE_OB_MAC_IOCB_FN2: 2138 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2139 net_rsp); 2140 (*tx_cleaned)++; 2141 break; 2142 2143 case OPCODE_IB_MAC_IOCB: 2144 case OPCODE_IB_3032_MAC_IOCB: 2145 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2146 net_rsp); 2147 (*rx_cleaned)++; 2148 break; 2149 2150 case OPCODE_IB_IP_IOCB: 2151 case OPCODE_IB_3032_IP_IOCB: 2152 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2153 net_rsp); 2154 (*rx_cleaned)++; 2155 break; 2156 default: { 2157 u32 *tmp = (u32 *)net_rsp; 2158 netdev_err(ndev, 2159 "Hit default case, not handled!\n" 2160 " dropping the packet, opcode = %x\n" 2161 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2162 net_rsp->opcode, 2163 (unsigned long int)tmp[0], 2164 (unsigned long int)tmp[1], 2165 (unsigned long int)tmp[2], 2166 (unsigned long int)tmp[3]); 2167 } 2168 } 2169 2170 qdev->rsp_consumer_index++; 2171 2172 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2173 qdev->rsp_consumer_index = 0; 2174 qdev->rsp_current = qdev->rsp_q_virt_addr; 2175 } else { 2176 qdev->rsp_current++; 2177 } 2178 2179 work_done = *tx_cleaned + *rx_cleaned; 2180 } 2181 2182 return work_done; 2183} 2184 2185static int ql_poll(struct napi_struct *napi, int budget) 2186{ 2187 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2188 int rx_cleaned = 0, tx_cleaned = 0; 2189 unsigned long hw_flags; 2190 struct ql3xxx_port_registers __iomem *port_regs = 2191 qdev->mem_map_registers; 2192 2193 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2194 2195 if (tx_cleaned + rx_cleaned != budget) { 2196 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2197 __napi_complete(napi); 2198 ql_update_small_bufq_prod_index(qdev); 2199 ql_update_lrg_bufq_prod_index(qdev); 2200 writel(qdev->rsp_consumer_index, 2201 &port_regs->CommonRegs.rspQConsumerIndex); 2202 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2203 2204 ql_enable_interrupts(qdev); 2205 } 2206 return tx_cleaned + rx_cleaned; 2207} 2208 2209static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2210{ 2211 2212 struct net_device *ndev = dev_id; 2213 struct ql3_adapter *qdev = netdev_priv(ndev); 2214 struct ql3xxx_port_registers __iomem *port_regs = 2215 qdev->mem_map_registers; 2216 u32 value; 2217 int handled = 1; 2218 u32 var; 2219 2220 value = ql_read_common_reg_l(qdev, 2221 &port_regs->CommonRegs.ispControlStatus); 2222 2223 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2224 spin_lock(&qdev->adapter_lock); 2225 netif_stop_queue(qdev->ndev); 2226 netif_carrier_off(qdev->ndev); 2227 ql_disable_interrupts(qdev); 2228 qdev->port_link_state = LS_DOWN; 2229 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2230 2231 if (value & ISP_CONTROL_FE) { 2232 /* 2233 * Chip Fatal Error. 2234 */ 2235 var = 2236 ql_read_page0_reg_l(qdev, 2237 &port_regs->PortFatalErrStatus); 2238 netdev_warn(ndev, 2239 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2240 var); 2241 set_bit(QL_RESET_START, &qdev->flags) ; 2242 } else { 2243 /* 2244 * Soft Reset Requested. 2245 */ 2246 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2247 netdev_err(ndev, 2248 "Another function issued a reset to the chip. ISR value = %x\n", 2249 value); 2250 } 2251 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2252 spin_unlock(&qdev->adapter_lock); 2253 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2254 ql_disable_interrupts(qdev); 2255 if (likely(napi_schedule_prep(&qdev->napi))) 2256 __napi_schedule(&qdev->napi); 2257 } else 2258 return IRQ_NONE; 2259 2260 return IRQ_RETVAL(handled); 2261} 2262 2263/* 2264 * Get the total number of segments needed for the given number of fragments. 2265 * This is necessary because outbound address lists (OAL) will be used when 2266 * more than two frags are given. Each address list has 5 addr/len pairs. 2267 * The 5th pair in each OAL is used to point to the next OAL if more frags 2268 * are coming. That is why the frags:segment count ratio is not linear. 2269 */ 2270static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2271{ 2272 if (qdev->device_id == QL3022_DEVICE_ID) 2273 return 1; 2274 2275 if (frags <= 2) 2276 return frags + 1; 2277 else if (frags <= 6) 2278 return frags + 2; 2279 else if (frags <= 10) 2280 return frags + 3; 2281 else if (frags <= 14) 2282 return frags + 4; 2283 else if (frags <= 18) 2284 return frags + 5; 2285 return -1; 2286} 2287 2288static void ql_hw_csum_setup(const struct sk_buff *skb, 2289 struct ob_mac_iocb_req *mac_iocb_ptr) 2290{ 2291 const struct iphdr *ip = ip_hdr(skb); 2292 2293 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2294 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2295 2296 if (ip->protocol == IPPROTO_TCP) { 2297 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2298 OB_3032MAC_IOCB_REQ_IC; 2299 } else { 2300 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2301 OB_3032MAC_IOCB_REQ_IC; 2302 } 2303 2304} 2305 2306/* 2307 * Map the buffers for this transmit. 2308 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2309 */ 2310static int ql_send_map(struct ql3_adapter *qdev, 2311 struct ob_mac_iocb_req *mac_iocb_ptr, 2312 struct ql_tx_buf_cb *tx_cb, 2313 struct sk_buff *skb) 2314{ 2315 struct oal *oal; 2316 struct oal_entry *oal_entry; 2317 int len = skb_headlen(skb); 2318 dma_addr_t map; 2319 int err; 2320 int completed_segs, i; 2321 int seg_cnt, seg = 0; 2322 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2323 2324 seg_cnt = tx_cb->seg_count; 2325 /* 2326 * Map the skb buffer first. 2327 */ 2328 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2329 2330 err = pci_dma_mapping_error(qdev->pdev, map); 2331 if (err) { 2332 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2333 err); 2334 2335 return NETDEV_TX_BUSY; 2336 } 2337 2338 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2339 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2340 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2341 oal_entry->len = cpu_to_le32(len); 2342 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2343 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2344 seg++; 2345 2346 if (seg_cnt == 1) { 2347 /* Terminate the last segment. */ 2348 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2349 return NETDEV_TX_OK; 2350 } 2351 oal = tx_cb->oal; 2352 for (completed_segs = 0; 2353 completed_segs < frag_cnt; 2354 completed_segs++, seg++) { 2355 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2356 oal_entry++; 2357 /* 2358 * Check for continuation requirements. 2359 * It's strange but necessary. 2360 * Continuation entry points to outbound address list. 2361 */ 2362 if ((seg == 2 && seg_cnt > 3) || 2363 (seg == 7 && seg_cnt > 8) || 2364 (seg == 12 && seg_cnt > 13) || 2365 (seg == 17 && seg_cnt > 18)) { 2366 map = pci_map_single(qdev->pdev, oal, 2367 sizeof(struct oal), 2368 PCI_DMA_TODEVICE); 2369 2370 err = pci_dma_mapping_error(qdev->pdev, map); 2371 if (err) { 2372 netdev_err(qdev->ndev, 2373 "PCI mapping outbound address list with error: %d\n", 2374 err); 2375 goto map_error; 2376 } 2377 2378 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2379 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2380 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2381 OAL_CONT_ENTRY); 2382 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2383 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2384 sizeof(struct oal)); 2385 oal_entry = (struct oal_entry *)oal; 2386 oal++; 2387 seg++; 2388 } 2389 2390 map = pci_map_page(qdev->pdev, frag->page, 2391 frag->page_offset, frag->size, 2392 PCI_DMA_TODEVICE); 2393 2394 err = pci_dma_mapping_error(qdev->pdev, map); 2395 if (err) { 2396 netdev_err(qdev->ndev, 2397 "PCI mapping frags failed with error: %d\n", 2398 err); 2399 goto map_error; 2400 } 2401 2402 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2403 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2404 oal_entry->len = cpu_to_le32(frag->size); 2405 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2406 dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); 2407 } 2408 /* Terminate the last segment. */ 2409 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2410 return NETDEV_TX_OK; 2411 2412map_error: 2413 /* A PCI mapping failed and now we will need to back out 2414 * We need to traverse through the oal's and associated pages which 2415 * have been mapped and now we must unmap them to clean up properly 2416 */ 2417 2418 seg = 1; 2419 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2420 oal = tx_cb->oal; 2421 for (i = 0; i < completed_segs; i++, seg++) { 2422 oal_entry++; 2423 2424 /* 2425 * Check for continuation requirements. 2426 * It's strange but necessary. 2427 */ 2428 2429 if ((seg == 2 && seg_cnt > 3) || 2430 (seg == 7 && seg_cnt > 8) || 2431 (seg == 12 && seg_cnt > 13) || 2432 (seg == 17 && seg_cnt > 18)) { 2433 pci_unmap_single(qdev->pdev, 2434 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2435 dma_unmap_len(&tx_cb->map[seg], maplen), 2436 PCI_DMA_TODEVICE); 2437 oal++; 2438 seg++; 2439 } 2440 2441 pci_unmap_page(qdev->pdev, 2442 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2443 dma_unmap_len(&tx_cb->map[seg], maplen), 2444 PCI_DMA_TODEVICE); 2445 } 2446 2447 pci_unmap_single(qdev->pdev, 2448 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2449 dma_unmap_addr(&tx_cb->map[0], maplen), 2450 PCI_DMA_TODEVICE); 2451 2452 return NETDEV_TX_BUSY; 2453 2454} 2455 2456/* 2457 * The difference between 3022 and 3032 sends: 2458 * 3022 only supports a simple single segment transmission. 2459 * 3032 supports checksumming and scatter/gather lists (fragments). 2460 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2461 * in the IOCB plus a chain of outbound address lists (OAL) that 2462 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2463 * will used to point to an OAL when more ALP entries are required. 2464 * The IOCB is always the top of the chain followed by one or more 2465 * OALs (when necessary). 2466 */ 2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2468 struct net_device *ndev) 2469{ 2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2471 struct ql3xxx_port_registers __iomem *port_regs = 2472 qdev->mem_map_registers; 2473 struct ql_tx_buf_cb *tx_cb; 2474 u32 tot_len = skb->len; 2475 struct ob_mac_iocb_req *mac_iocb_ptr; 2476 2477 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2478 return NETDEV_TX_BUSY; 2479 2480 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2481 tx_cb->seg_count = ql_get_seg_count(qdev, 2482 skb_shinfo(skb)->nr_frags); 2483 if (tx_cb->seg_count == -1) { 2484 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2485 return NETDEV_TX_OK; 2486 } 2487 2488 mac_iocb_ptr = tx_cb->queue_entry; 2489 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2490 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2491 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2492 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2493 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2494 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2495 tx_cb->skb = skb; 2496 if (qdev->device_id == QL3032_DEVICE_ID && 2497 skb->ip_summed == CHECKSUM_PARTIAL) 2498 ql_hw_csum_setup(skb, mac_iocb_ptr); 2499 2500 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2501 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2502 return NETDEV_TX_BUSY; 2503 } 2504 2505 wmb(); 2506 qdev->req_producer_index++; 2507 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2508 qdev->req_producer_index = 0; 2509 wmb(); 2510 ql_write_common_reg_l(qdev, 2511 &port_regs->CommonRegs.reqQProducerIndex, 2512 qdev->req_producer_index); 2513 2514 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2515 "tx queued, slot %d, len %d\n", 2516 qdev->req_producer_index, skb->len); 2517 2518 atomic_dec(&qdev->tx_count); 2519 return NETDEV_TX_OK; 2520} 2521 2522static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2523{ 2524 qdev->req_q_size = 2525 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2526 2527 qdev->req_q_virt_addr = 2528 pci_alloc_consistent(qdev->pdev, 2529 (size_t) qdev->req_q_size, 2530 &qdev->req_q_phy_addr); 2531 2532 if ((qdev->req_q_virt_addr == NULL) || 2533 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2534 netdev_err(qdev->ndev, "reqQ failed\n"); 2535 return -ENOMEM; 2536 } 2537 2538 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2539 2540 qdev->rsp_q_virt_addr = 2541 pci_alloc_consistent(qdev->pdev, 2542 (size_t) qdev->rsp_q_size, 2543 &qdev->rsp_q_phy_addr); 2544 2545 if ((qdev->rsp_q_virt_addr == NULL) || 2546 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2547 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2548 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2549 qdev->req_q_virt_addr, 2550 qdev->req_q_phy_addr); 2551 return -ENOMEM; 2552 } 2553 2554 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2555 2556 return 0; 2557} 2558 2559static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2560{ 2561 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2562 netdev_info(qdev->ndev, "Already done\n"); 2563 return; 2564 } 2565 2566 pci_free_consistent(qdev->pdev, 2567 qdev->req_q_size, 2568 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2569 2570 qdev->req_q_virt_addr = NULL; 2571 2572 pci_free_consistent(qdev->pdev, 2573 qdev->rsp_q_size, 2574 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2575 2576 qdev->rsp_q_virt_addr = NULL; 2577 2578 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2579} 2580 2581static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2582{ 2583 /* Create Large Buffer Queue */ 2584 qdev->lrg_buf_q_size = 2585 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2586 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2587 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2588 else 2589 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2590 2591 qdev->lrg_buf = 2592 kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), 2593 GFP_KERNEL); 2594 if (qdev->lrg_buf == NULL) { 2595 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); 2596 return -ENOMEM; 2597 } 2598 2599 qdev->lrg_buf_q_alloc_virt_addr = 2600 pci_alloc_consistent(qdev->pdev, 2601 qdev->lrg_buf_q_alloc_size, 2602 &qdev->lrg_buf_q_alloc_phy_addr); 2603 2604 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2605 netdev_err(qdev->ndev, "lBufQ failed\n"); 2606 return -ENOMEM; 2607 } 2608 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2609 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2610 2611 /* Create Small Buffer Queue */ 2612 qdev->small_buf_q_size = 2613 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2614 if (qdev->small_buf_q_size < PAGE_SIZE) 2615 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2616 else 2617 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2618 2619 qdev->small_buf_q_alloc_virt_addr = 2620 pci_alloc_consistent(qdev->pdev, 2621 qdev->small_buf_q_alloc_size, 2622 &qdev->small_buf_q_alloc_phy_addr); 2623 2624 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2625 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2626 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2627 qdev->lrg_buf_q_alloc_virt_addr, 2628 qdev->lrg_buf_q_alloc_phy_addr); 2629 return -ENOMEM; 2630 } 2631 2632 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2633 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2634 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2635 return 0; 2636} 2637 2638static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2639{ 2640 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2641 netdev_info(qdev->ndev, "Already done\n"); 2642 return; 2643 } 2644 kfree(qdev->lrg_buf); 2645 pci_free_consistent(qdev->pdev, 2646 qdev->lrg_buf_q_alloc_size, 2647 qdev->lrg_buf_q_alloc_virt_addr, 2648 qdev->lrg_buf_q_alloc_phy_addr); 2649 2650 qdev->lrg_buf_q_virt_addr = NULL; 2651 2652 pci_free_consistent(qdev->pdev, 2653 qdev->small_buf_q_alloc_size, 2654 qdev->small_buf_q_alloc_virt_addr, 2655 qdev->small_buf_q_alloc_phy_addr); 2656 2657 qdev->small_buf_q_virt_addr = NULL; 2658 2659 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2660} 2661 2662static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2663{ 2664 int i; 2665 struct bufq_addr_element *small_buf_q_entry; 2666 2667 /* Currently we allocate on one of memory and use it for smallbuffers */ 2668 qdev->small_buf_total_size = 2669 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2670 QL_SMALL_BUFFER_SIZE); 2671 2672 qdev->small_buf_virt_addr = 2673 pci_alloc_consistent(qdev->pdev, 2674 qdev->small_buf_total_size, 2675 &qdev->small_buf_phy_addr); 2676 2677 if (qdev->small_buf_virt_addr == NULL) { 2678 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2679 return -ENOMEM; 2680 } 2681 2682 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2683 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2684 2685 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2686 2687 /* Initialize the small buffer queue. */ 2688 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2689 small_buf_q_entry->addr_high = 2690 cpu_to_le32(qdev->small_buf_phy_addr_high); 2691 small_buf_q_entry->addr_low = 2692 cpu_to_le32(qdev->small_buf_phy_addr_low + 2693 (i * QL_SMALL_BUFFER_SIZE)); 2694 small_buf_q_entry++; 2695 } 2696 qdev->small_buf_index = 0; 2697 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2698 return 0; 2699} 2700 2701static void ql_free_small_buffers(struct ql3_adapter *qdev) 2702{ 2703 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2704 netdev_info(qdev->ndev, "Already done\n"); 2705 return; 2706 } 2707 if (qdev->small_buf_virt_addr != NULL) { 2708 pci_free_consistent(qdev->pdev, 2709 qdev->small_buf_total_size, 2710 qdev->small_buf_virt_addr, 2711 qdev->small_buf_phy_addr); 2712 2713 qdev->small_buf_virt_addr = NULL; 2714 } 2715} 2716 2717static void ql_free_large_buffers(struct ql3_adapter *qdev) 2718{ 2719 int i = 0; 2720 struct ql_rcv_buf_cb *lrg_buf_cb; 2721 2722 for (i = 0; i < qdev->num_large_buffers; i++) { 2723 lrg_buf_cb = &qdev->lrg_buf[i]; 2724 if (lrg_buf_cb->skb) { 2725 dev_kfree_skb(lrg_buf_cb->skb); 2726 pci_unmap_single(qdev->pdev, 2727 dma_unmap_addr(lrg_buf_cb, mapaddr), 2728 dma_unmap_len(lrg_buf_cb, maplen), 2729 PCI_DMA_FROMDEVICE); 2730 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2731 } else { 2732 break; 2733 } 2734 } 2735} 2736 2737static void ql_init_large_buffers(struct ql3_adapter *qdev) 2738{ 2739 int i; 2740 struct ql_rcv_buf_cb *lrg_buf_cb; 2741 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2742 2743 for (i = 0; i < qdev->num_large_buffers; i++) { 2744 lrg_buf_cb = &qdev->lrg_buf[i]; 2745 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2746 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2747 buf_addr_ele++; 2748 } 2749 qdev->lrg_buf_index = 0; 2750 qdev->lrg_buf_skb_check = 0; 2751} 2752 2753static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2754{ 2755 int i; 2756 struct ql_rcv_buf_cb *lrg_buf_cb; 2757 struct sk_buff *skb; 2758 dma_addr_t map; 2759 int err; 2760 2761 for (i = 0; i < qdev->num_large_buffers; i++) { 2762 skb = netdev_alloc_skb(qdev->ndev, 2763 qdev->lrg_buffer_len); 2764 if (unlikely(!skb)) { 2765 /* Better luck next round */ 2766 netdev_err(qdev->ndev, 2767 "large buff alloc failed for %d bytes at index %d\n", 2768 qdev->lrg_buffer_len * 2, i); 2769 ql_free_large_buffers(qdev); 2770 return -ENOMEM; 2771 } else { 2772 2773 lrg_buf_cb = &qdev->lrg_buf[i]; 2774 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2775 lrg_buf_cb->index = i; 2776 lrg_buf_cb->skb = skb; 2777 /* 2778 * We save some space to copy the ethhdr from first 2779 * buffer 2780 */ 2781 skb_reserve(skb, QL_HEADER_SPACE); 2782 map = pci_map_single(qdev->pdev, 2783 skb->data, 2784 qdev->lrg_buffer_len - 2785 QL_HEADER_SPACE, 2786 PCI_DMA_FROMDEVICE); 2787 2788 err = pci_dma_mapping_error(qdev->pdev, map); 2789 if (err) { 2790 netdev_err(qdev->ndev, 2791 "PCI mapping failed with error: %d\n", 2792 err); 2793 ql_free_large_buffers(qdev); 2794 return -ENOMEM; 2795 } 2796 2797 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2798 dma_unmap_len_set(lrg_buf_cb, maplen, 2799 qdev->lrg_buffer_len - 2800 QL_HEADER_SPACE); 2801 lrg_buf_cb->buf_phy_addr_low = 2802 cpu_to_le32(LS_64BITS(map)); 2803 lrg_buf_cb->buf_phy_addr_high = 2804 cpu_to_le32(MS_64BITS(map)); 2805 } 2806 } 2807 return 0; 2808} 2809 2810static void ql_free_send_free_list(struct ql3_adapter *qdev) 2811{ 2812 struct ql_tx_buf_cb *tx_cb; 2813 int i; 2814 2815 tx_cb = &qdev->tx_buf[0]; 2816 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2817 kfree(tx_cb->oal); 2818 tx_cb->oal = NULL; 2819 tx_cb++; 2820 } 2821} 2822 2823static int ql_create_send_free_list(struct ql3_adapter *qdev) 2824{ 2825 struct ql_tx_buf_cb *tx_cb; 2826 int i; 2827 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2828 2829 /* Create free list of transmit buffers */ 2830 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2831 2832 tx_cb = &qdev->tx_buf[i]; 2833 tx_cb->skb = NULL; 2834 tx_cb->queue_entry = req_q_curr; 2835 req_q_curr++; 2836 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2837 if (tx_cb->oal == NULL) 2838 return -1; 2839 } 2840 return 0; 2841} 2842 2843static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2844{ 2845 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2846 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2847 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2848 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2849 /* 2850 * Bigger buffers, so less of them. 2851 */ 2852 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2853 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2854 } else { 2855 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2856 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2857 return -ENOMEM; 2858 } 2859 qdev->num_large_buffers = 2860 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2861 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2862 qdev->max_frame_size = 2863 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2864 2865 /* 2866 * First allocate a page of shared memory and use it for shadow 2867 * locations of Network Request Queue Consumer Address Register and 2868 * Network Completion Queue Producer Index Register 2869 */ 2870 qdev->shadow_reg_virt_addr = 2871 pci_alloc_consistent(qdev->pdev, 2872 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2873 2874 if (qdev->shadow_reg_virt_addr != NULL) { 2875 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; 2876 qdev->req_consumer_index_phy_addr_high = 2877 MS_64BITS(qdev->shadow_reg_phy_addr); 2878 qdev->req_consumer_index_phy_addr_low = 2879 LS_64BITS(qdev->shadow_reg_phy_addr); 2880 2881 qdev->prsp_producer_index = 2882 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2883 qdev->rsp_producer_index_phy_addr_high = 2884 qdev->req_consumer_index_phy_addr_high; 2885 qdev->rsp_producer_index_phy_addr_low = 2886 qdev->req_consumer_index_phy_addr_low + 8; 2887 } else { 2888 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2889 return -ENOMEM; 2890 } 2891 2892 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2893 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2894 goto err_req_rsp; 2895 } 2896 2897 if (ql_alloc_buffer_queues(qdev) != 0) { 2898 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2899 goto err_buffer_queues; 2900 } 2901 2902 if (ql_alloc_small_buffers(qdev) != 0) { 2903 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2904 goto err_small_buffers; 2905 } 2906 2907 if (ql_alloc_large_buffers(qdev) != 0) { 2908 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2909 goto err_small_buffers; 2910 } 2911 2912 /* Initialize the large buffer queue. */ 2913 ql_init_large_buffers(qdev); 2914 if (ql_create_send_free_list(qdev)) 2915 goto err_free_list; 2916 2917 qdev->rsp_current = qdev->rsp_q_virt_addr; 2918 2919 return 0; 2920err_free_list: 2921 ql_free_send_free_list(qdev); 2922err_small_buffers: 2923 ql_free_buffer_queues(qdev); 2924err_buffer_queues: 2925 ql_free_net_req_rsp_queues(qdev); 2926err_req_rsp: 2927 pci_free_consistent(qdev->pdev, 2928 PAGE_SIZE, 2929 qdev->shadow_reg_virt_addr, 2930 qdev->shadow_reg_phy_addr); 2931 2932 return -ENOMEM; 2933} 2934 2935static void ql_free_mem_resources(struct ql3_adapter *qdev) 2936{ 2937 ql_free_send_free_list(qdev); 2938 ql_free_large_buffers(qdev); 2939 ql_free_small_buffers(qdev); 2940 ql_free_buffer_queues(qdev); 2941 ql_free_net_req_rsp_queues(qdev); 2942 if (qdev->shadow_reg_virt_addr != NULL) { 2943 pci_free_consistent(qdev->pdev, 2944 PAGE_SIZE, 2945 qdev->shadow_reg_virt_addr, 2946 qdev->shadow_reg_phy_addr); 2947 qdev->shadow_reg_virt_addr = NULL; 2948 } 2949} 2950 2951static int ql_init_misc_registers(struct ql3_adapter *qdev) 2952{ 2953 struct ql3xxx_local_ram_registers __iomem *local_ram = 2954 (void __iomem *)qdev->mem_map_registers; 2955 2956 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2957 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2958 2) << 4)) 2959 return -1; 2960 2961 ql_write_page2_reg(qdev, 2962 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2963 2964 ql_write_page2_reg(qdev, 2965 &local_ram->maxBufletCount, 2966 qdev->nvram_data.bufletCount); 2967 2968 ql_write_page2_reg(qdev, 2969 &local_ram->freeBufletThresholdLow, 2970 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2971 (qdev->nvram_data.tcpWindowThreshold0)); 2972 2973 ql_write_page2_reg(qdev, 2974 &local_ram->freeBufletThresholdHigh, 2975 qdev->nvram_data.tcpWindowThreshold50); 2976 2977 ql_write_page2_reg(qdev, 2978 &local_ram->ipHashTableBase, 2979 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2980 qdev->nvram_data.ipHashTableBaseLo); 2981 ql_write_page2_reg(qdev, 2982 &local_ram->ipHashTableCount, 2983 qdev->nvram_data.ipHashTableSize); 2984 ql_write_page2_reg(qdev, 2985 &local_ram->tcpHashTableBase, 2986 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2987 qdev->nvram_data.tcpHashTableBaseLo); 2988 ql_write_page2_reg(qdev, 2989 &local_ram->tcpHashTableCount, 2990 qdev->nvram_data.tcpHashTableSize); 2991 ql_write_page2_reg(qdev, 2992 &local_ram->ncbBase, 2993 (qdev->nvram_data.ncbTableBaseHi << 16) | 2994 qdev->nvram_data.ncbTableBaseLo); 2995 ql_write_page2_reg(qdev, 2996 &local_ram->maxNcbCount, 2997 qdev->nvram_data.ncbTableSize); 2998 ql_write_page2_reg(qdev, 2999 &local_ram->drbBase, 3000 (qdev->nvram_data.drbTableBaseHi << 16) | 3001 qdev->nvram_data.drbTableBaseLo); 3002 ql_write_page2_reg(qdev, 3003 &local_ram->maxDrbCount, 3004 qdev->nvram_data.drbTableSize); 3005 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3006 return 0; 3007} 3008 3009static int ql_adapter_initialize(struct ql3_adapter *qdev) 3010{ 3011 u32 value; 3012 struct ql3xxx_port_registers __iomem *port_regs = 3013 qdev->mem_map_registers; 3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3016 (void __iomem *)port_regs; 3017 u32 delay = 10; 3018 int status = 0; 3019 unsigned long hw_flags = 0; 3020 3021 if (ql_mii_setup(qdev)) 3022 return -1; 3023 3024 /* Bring out PHY out of reset */ 3025 ql_write_common_reg(qdev, spir, 3026 (ISP_SERIAL_PORT_IF_WE | 3027 (ISP_SERIAL_PORT_IF_WE << 16))); 3028 /* Give the PHY time to come out of reset. */ 3029 mdelay(100); 3030 qdev->port_link_state = LS_DOWN; 3031 netif_carrier_off(qdev->ndev); 3032 3033 /* V2 chip fix for ARS-39168. */ 3034 ql_write_common_reg(qdev, spir, 3035 (ISP_SERIAL_PORT_IF_SDE | 3036 (ISP_SERIAL_PORT_IF_SDE << 16))); 3037 3038 /* Request Queue Registers */ 3039 *((u32 *)(qdev->preq_consumer_index)) = 0; 3040 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3041 qdev->req_producer_index = 0; 3042 3043 ql_write_page1_reg(qdev, 3044 &hmem_regs->reqConsumerIndexAddrHigh, 3045 qdev->req_consumer_index_phy_addr_high); 3046 ql_write_page1_reg(qdev, 3047 &hmem_regs->reqConsumerIndexAddrLow, 3048 qdev->req_consumer_index_phy_addr_low); 3049 3050 ql_write_page1_reg(qdev, 3051 &hmem_regs->reqBaseAddrHigh, 3052 MS_64BITS(qdev->req_q_phy_addr)); 3053 ql_write_page1_reg(qdev, 3054 &hmem_regs->reqBaseAddrLow, 3055 LS_64BITS(qdev->req_q_phy_addr)); 3056 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3057 3058 /* Response Queue Registers */ 3059 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3060 qdev->rsp_consumer_index = 0; 3061 qdev->rsp_current = qdev->rsp_q_virt_addr; 3062 3063 ql_write_page1_reg(qdev, 3064 &hmem_regs->rspProducerIndexAddrHigh, 3065 qdev->rsp_producer_index_phy_addr_high); 3066 3067 ql_write_page1_reg(qdev, 3068 &hmem_regs->rspProducerIndexAddrLow, 3069 qdev->rsp_producer_index_phy_addr_low); 3070 3071 ql_write_page1_reg(qdev, 3072 &hmem_regs->rspBaseAddrHigh, 3073 MS_64BITS(qdev->rsp_q_phy_addr)); 3074 3075 ql_write_page1_reg(qdev, 3076 &hmem_regs->rspBaseAddrLow, 3077 LS_64BITS(qdev->rsp_q_phy_addr)); 3078 3079 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3080 3081 /* Large Buffer Queue */ 3082 ql_write_page1_reg(qdev, 3083 &hmem_regs->rxLargeQBaseAddrHigh, 3084 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3085 3086 ql_write_page1_reg(qdev, 3087 &hmem_regs->rxLargeQBaseAddrLow, 3088 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3089 3090 ql_write_page1_reg(qdev, 3091 &hmem_regs->rxLargeQLength, 3092 qdev->num_lbufq_entries); 3093 3094 ql_write_page1_reg(qdev, 3095 &hmem_regs->rxLargeBufferLength, 3096 qdev->lrg_buffer_len); 3097 3098 /* Small Buffer Queue */ 3099 ql_write_page1_reg(qdev, 3100 &hmem_regs->rxSmallQBaseAddrHigh, 3101 MS_64BITS(qdev->small_buf_q_phy_addr)); 3102 3103 ql_write_page1_reg(qdev, 3104 &hmem_regs->rxSmallQBaseAddrLow, 3105 LS_64BITS(qdev->small_buf_q_phy_addr)); 3106 3107 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3108 ql_write_page1_reg(qdev, 3109 &hmem_regs->rxSmallBufferLength, 3110 QL_SMALL_BUFFER_SIZE); 3111 3112 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3113 qdev->small_buf_release_cnt = 8; 3114 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3115 qdev->lrg_buf_release_cnt = 8; 3116 qdev->lrg_buf_next_free = 3117 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; 3118 qdev->small_buf_index = 0; 3119 qdev->lrg_buf_index = 0; 3120 qdev->lrg_buf_free_count = 0; 3121 qdev->lrg_buf_free_head = NULL; 3122 qdev->lrg_buf_free_tail = NULL; 3123 3124 ql_write_common_reg(qdev, 3125 &port_regs->CommonRegs. 3126 rxSmallQProducerIndex, 3127 qdev->small_buf_q_producer_index); 3128 ql_write_common_reg(qdev, 3129 &port_regs->CommonRegs. 3130 rxLargeQProducerIndex, 3131 qdev->lrg_buf_q_producer_index); 3132 3133 /* 3134 * Find out if the chip has already been initialized. If it has, then 3135 * we skip some of the initialization. 3136 */ 3137 clear_bit(QL_LINK_MASTER, &qdev->flags); 3138 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3139 if ((value & PORT_STATUS_IC) == 0) { 3140 3141 /* Chip has not been configured yet, so let it rip. */ 3142 if (ql_init_misc_registers(qdev)) { 3143 status = -1; 3144 goto out; 3145 } 3146 3147 value = qdev->nvram_data.tcpMaxWindowSize; 3148 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3149 3150 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3151 3152 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3153 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3154 * 2) << 13)) { 3155 status = -1; 3156 goto out; 3157 } 3158 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3159 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3160 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3161 16) | (INTERNAL_CHIP_SD | 3162 INTERNAL_CHIP_WE))); 3163 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3164 } 3165 3166 if (qdev->mac_index) 3167 ql_write_page0_reg(qdev, 3168 &port_regs->mac1MaxFrameLengthReg, 3169 qdev->max_frame_size); 3170 else 3171 ql_write_page0_reg(qdev, 3172 &port_regs->mac0MaxFrameLengthReg, 3173 qdev->max_frame_size); 3174 3175 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3176 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3177 2) << 7)) { 3178 status = -1; 3179 goto out; 3180 } 3181 3182 PHY_Setup(qdev); 3183 ql_init_scan_mode(qdev); 3184 ql_get_phy_owner(qdev); 3185 3186 /* Load the MAC Configuration */ 3187 3188 /* Program lower 32 bits of the MAC address */ 3189 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3190 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3191 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3192 ((qdev->ndev->dev_addr[2] << 24) 3193 | (qdev->ndev->dev_addr[3] << 16) 3194 | (qdev->ndev->dev_addr[4] << 8) 3195 | qdev->ndev->dev_addr[5])); 3196 3197 /* Program top 16 bits of the MAC address */ 3198 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3199 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3200 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3201 ((qdev->ndev->dev_addr[0] << 8) 3202 | qdev->ndev->dev_addr[1])); 3203 3204 /* Enable Primary MAC */ 3205 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3206 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3207 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3208 3209 /* Clear Primary and Secondary IP addresses */ 3210 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3211 ((IP_ADDR_INDEX_REG_MASK << 16) | 3212 (qdev->mac_index << 2))); 3213 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3214 3215 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3216 ((IP_ADDR_INDEX_REG_MASK << 16) | 3217 ((qdev->mac_index << 2) + 1))); 3218 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3219 3220 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3221 3222 /* Indicate Configuration Complete */ 3223 ql_write_page0_reg(qdev, 3224 &port_regs->portControl, 3225 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3226 3227 do { 3228 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3229 if (value & PORT_STATUS_IC) 3230 break; 3231 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3232 msleep(500); 3233 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3234 } while (--delay); 3235 3236 if (delay == 0) { 3237 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3238 status = -1; 3239 goto out; 3240 } 3241 3242 /* Enable Ethernet Function */ 3243 if (qdev->device_id == QL3032_DEVICE_ID) { 3244 value = 3245 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3246 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3247 QL3032_PORT_CONTROL_ET); 3248 ql_write_page0_reg(qdev, &port_regs->functionControl, 3249 ((value << 16) | value)); 3250 } else { 3251 value = 3252 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3253 PORT_CONTROL_HH); 3254 ql_write_page0_reg(qdev, &port_regs->portControl, 3255 ((value << 16) | value)); 3256 } 3257 3258 3259out: 3260 return status; 3261} 3262 3263/* 3264 * Caller holds hw_lock. 3265 */ 3266static int ql_adapter_reset(struct ql3_adapter *qdev) 3267{ 3268 struct ql3xxx_port_registers __iomem *port_regs = 3269 qdev->mem_map_registers; 3270 int status = 0; 3271 u16 value; 3272 int max_wait_time; 3273 3274 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3275 clear_bit(QL_RESET_DONE, &qdev->flags); 3276 3277 /* 3278 * Issue soft reset to chip. 3279 */ 3280 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3281 ql_write_common_reg(qdev, 3282 &port_regs->CommonRegs.ispControlStatus, 3283 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3284 3285 /* Wait 3 seconds for reset to complete. */ 3286 netdev_printk(KERN_DEBUG, qdev->ndev, 3287 "Wait 10 milliseconds for reset to complete\n"); 3288 3289 /* Wait until the firmware tells us the Soft Reset is done */ 3290 max_wait_time = 5; 3291 do { 3292 value = 3293 ql_read_common_reg(qdev, 3294 &port_regs->CommonRegs.ispControlStatus); 3295 if ((value & ISP_CONTROL_SR) == 0) 3296 break; 3297 3298 ssleep(1); 3299 } while ((--max_wait_time)); 3300 3301 /* 3302 * Also, make sure that the Network Reset Interrupt bit has been 3303 * cleared after the soft reset has taken place. 3304 */ 3305 value = 3306 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3307 if (value & ISP_CONTROL_RI) { 3308 netdev_printk(KERN_DEBUG, qdev->ndev, 3309 "clearing RI after reset\n"); 3310 ql_write_common_reg(qdev, 3311 &port_regs->CommonRegs. 3312 ispControlStatus, 3313 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3314 } 3315 3316 if (max_wait_time == 0) { 3317 /* Issue Force Soft Reset */ 3318 ql_write_common_reg(qdev, 3319 &port_regs->CommonRegs. 3320 ispControlStatus, 3321 ((ISP_CONTROL_FSR << 16) | 3322 ISP_CONTROL_FSR)); 3323 /* 3324 * Wait until the firmware tells us the Force Soft Reset is 3325 * done 3326 */ 3327 max_wait_time = 5; 3328 do { 3329 value = ql_read_common_reg(qdev, 3330 &port_regs->CommonRegs. 3331 ispControlStatus); 3332 if ((value & ISP_CONTROL_FSR) == 0) 3333 break; 3334 ssleep(1); 3335 } while ((--max_wait_time)); 3336 } 3337 if (max_wait_time == 0) 3338 status = 1; 3339 3340 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3341 set_bit(QL_RESET_DONE, &qdev->flags); 3342 return status; 3343} 3344 3345static void ql_set_mac_info(struct ql3_adapter *qdev) 3346{ 3347 struct ql3xxx_port_registers __iomem *port_regs = 3348 qdev->mem_map_registers; 3349 u32 value, port_status; 3350 u8 func_number; 3351 3352 /* Get the function number */ 3353 value = 3354 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3355 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3356 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3357 switch (value & ISP_CONTROL_FN_MASK) { 3358 case ISP_CONTROL_FN0_NET: 3359 qdev->mac_index = 0; 3360 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3361 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3362 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3363 if (port_status & PORT_STATUS_SM0) 3364 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3365 else 3366 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3367 break; 3368 3369 case ISP_CONTROL_FN1_NET: 3370 qdev->mac_index = 1; 3371 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3372 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3373 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3374 if (port_status & PORT_STATUS_SM1) 3375 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3376 else 3377 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3378 break; 3379 3380 case ISP_CONTROL_FN0_SCSI: 3381 case ISP_CONTROL_FN1_SCSI: 3382 default: 3383 netdev_printk(KERN_DEBUG, qdev->ndev, 3384 "Invalid function number, ispControlStatus = 0x%x\n", 3385 value); 3386 break; 3387 } 3388 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3389} 3390 3391static void ql_display_dev_info(struct net_device *ndev) 3392{ 3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3394 struct pci_dev *pdev = qdev->pdev; 3395 3396 netdev_info(ndev, 3397 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3398 DRV_NAME, qdev->index, qdev->chip_rev_id, 3399 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3400 qdev->pci_slot); 3401 netdev_info(ndev, "%s Interface\n", 3402 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3403 3404 /* 3405 * Print PCI bus width/type. 3406 */ 3407 netdev_info(ndev, "Bus interface is %s %s\n", 3408 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3409 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3410 3411 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3412 qdev->mem_map_registers); 3413 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3414 3415 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3416} 3417 3418static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3419{ 3420 struct net_device *ndev = qdev->ndev; 3421 int retval = 0; 3422 3423 netif_stop_queue(ndev); 3424 netif_carrier_off(ndev); 3425 3426 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3427 clear_bit(QL_LINK_MASTER, &qdev->flags); 3428 3429 ql_disable_interrupts(qdev); 3430 3431 free_irq(qdev->pdev->irq, ndev); 3432 3433 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3434 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3435 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3436 pci_disable_msi(qdev->pdev); 3437 } 3438 3439 del_timer_sync(&qdev->adapter_timer); 3440 3441 napi_disable(&qdev->napi); 3442 3443 if (do_reset) { 3444 int soft_reset; 3445 unsigned long hw_flags; 3446 3447 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3448 if (ql_wait_for_drvr_lock(qdev)) { 3449 soft_reset = ql_adapter_reset(qdev); 3450 if (soft_reset) { 3451 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3452 qdev->index); 3453 } 3454 netdev_err(ndev, 3455 "Releasing driver lock via chip reset\n"); 3456 } else { 3457 netdev_err(ndev, 3458 "Could not acquire driver lock to do reset!\n"); 3459 retval = -1; 3460 } 3461 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3462 } 3463 ql_free_mem_resources(qdev); 3464 return retval; 3465} 3466 3467static int ql_adapter_up(struct ql3_adapter *qdev) 3468{ 3469 struct net_device *ndev = qdev->ndev; 3470 int err; 3471 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; 3472 unsigned long hw_flags; 3473 3474 if (ql_alloc_mem_resources(qdev)) { 3475 netdev_err(ndev, "Unable to allocate buffers\n"); 3476 return -ENOMEM; 3477 } 3478 3479 if (qdev->msi) { 3480 if (pci_enable_msi(qdev->pdev)) { 3481 netdev_err(ndev, 3482 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3483 qdev->msi = 0; 3484 } else { 3485 netdev_info(ndev, "MSI Enabled...\n"); 3486 set_bit(QL_MSI_ENABLED, &qdev->flags); 3487 irq_flags &= ~IRQF_SHARED; 3488 } 3489 } 3490 3491 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3492 irq_flags, ndev->name, ndev); 3493 if (err) { 3494 netdev_err(ndev, 3495 "Failed to reserve interrupt %d - already in use\n", 3496 qdev->pdev->irq); 3497 goto err_irq; 3498 } 3499 3500 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3501 3502 err = ql_wait_for_drvr_lock(qdev); 3503 if (err) { 3504 err = ql_adapter_initialize(qdev); 3505 if (err) { 3506 netdev_err(ndev, "Unable to initialize adapter\n"); 3507 goto err_init; 3508 } 3509 netdev_err(ndev, "Releasing driver lock\n"); 3510 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3511 } else { 3512 netdev_err(ndev, "Could not acquire driver lock\n"); 3513 goto err_lock; 3514 } 3515 3516 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3517 3518 set_bit(QL_ADAPTER_UP, &qdev->flags); 3519 3520 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3521 3522 napi_enable(&qdev->napi); 3523 ql_enable_interrupts(qdev); 3524 return 0; 3525 3526err_init: 3527 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3528err_lock: 3529 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3530 free_irq(qdev->pdev->irq, ndev); 3531err_irq: 3532 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3533 netdev_info(ndev, "calling pci_disable_msi()\n"); 3534 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3535 pci_disable_msi(qdev->pdev); 3536 } 3537 return err; 3538} 3539 3540static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3541{ 3542 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3543 netdev_err(qdev->ndev, 3544 "Driver up/down cycle failed, closing device\n"); 3545 rtnl_lock(); 3546 dev_close(qdev->ndev); 3547 rtnl_unlock(); 3548 return -1; 3549 } 3550 return 0; 3551} 3552 3553static int ql3xxx_close(struct net_device *ndev) 3554{ 3555 struct ql3_adapter *qdev = netdev_priv(ndev); 3556 3557 /* 3558 * Wait for device to recover from a reset. 3559 * (Rarely happens, but possible.) 3560 */ 3561 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3562 msleep(50); 3563 3564 ql_adapter_down(qdev, QL_DO_RESET); 3565 return 0; 3566} 3567 3568static int ql3xxx_open(struct net_device *ndev) 3569{ 3570 struct ql3_adapter *qdev = netdev_priv(ndev); 3571 return ql_adapter_up(qdev); 3572} 3573 3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3575{ 3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3577 struct ql3xxx_port_registers __iomem *port_regs = 3578 qdev->mem_map_registers; 3579 struct sockaddr *addr = p; 3580 unsigned long hw_flags; 3581 3582 if (netif_running(ndev)) 3583 return -EBUSY; 3584 3585 if (!is_valid_ether_addr(addr->sa_data)) 3586 return -EADDRNOTAVAIL; 3587 3588 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3589 3590 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3591 /* Program lower 32 bits of the MAC address */ 3592 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3593 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3594 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3595 ((ndev->dev_addr[2] << 24) | (ndev-> 3596 dev_addr[3] << 16) | 3597 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3598 3599 /* Program top 16 bits of the MAC address */ 3600 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3601 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3602 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3603 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3604 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3605 3606 return 0; 3607} 3608 3609static void ql3xxx_tx_timeout(struct net_device *ndev) 3610{ 3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3612 3613 netdev_err(ndev, "Resetting...\n"); 3614 /* 3615 * Stop the queues, we've got a problem. 3616 */ 3617 netif_stop_queue(ndev); 3618 3619 /* 3620 * Wake up the worker to process this event. 3621 */ 3622 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3623} 3624 3625static void ql_reset_work(struct work_struct *work) 3626{ 3627 struct ql3_adapter *qdev = 3628 container_of(work, struct ql3_adapter, reset_work.work); 3629 struct net_device *ndev = qdev->ndev; 3630 u32 value; 3631 struct ql_tx_buf_cb *tx_cb; 3632 int max_wait_time, i; 3633 struct ql3xxx_port_registers __iomem *port_regs = 3634 qdev->mem_map_registers; 3635 unsigned long hw_flags; 3636 3637 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3638 clear_bit(QL_LINK_MASTER, &qdev->flags); 3639 3640 /* 3641 * Loop through the active list and return the skb. 3642 */ 3643 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3644 int j; 3645 tx_cb = &qdev->tx_buf[i]; 3646 if (tx_cb->skb) { 3647 netdev_printk(KERN_DEBUG, ndev, 3648 "Freeing lost SKB\n"); 3649 pci_unmap_single(qdev->pdev, 3650 dma_unmap_addr(&tx_cb->map[0], 3651 mapaddr), 3652 dma_unmap_len(&tx_cb->map[0], maplen), 3653 PCI_DMA_TODEVICE); 3654 for (j = 1; j < tx_cb->seg_count; j++) { 3655 pci_unmap_page(qdev->pdev, 3656 dma_unmap_addr(&tx_cb->map[j], 3657 mapaddr), 3658 dma_unmap_len(&tx_cb->map[j], 3659 maplen), 3660 PCI_DMA_TODEVICE); 3661 } 3662 dev_kfree_skb(tx_cb->skb); 3663 tx_cb->skb = NULL; 3664 } 3665 } 3666 3667 netdev_err(ndev, "Clearing NRI after reset\n"); 3668 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3669 ql_write_common_reg(qdev, 3670 &port_regs->CommonRegs. 3671 ispControlStatus, 3672 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3673 /* 3674 * Wait the for Soft Reset to Complete. 3675 */ 3676 max_wait_time = 10; 3677 do { 3678 value = ql_read_common_reg(qdev, 3679 &port_regs->CommonRegs. 3680 3681 ispControlStatus); 3682 if ((value & ISP_CONTROL_SR) == 0) { 3683 netdev_printk(KERN_DEBUG, ndev, 3684 "reset completed\n"); 3685 break; 3686 } 3687 3688 if (value & ISP_CONTROL_RI) { 3689 netdev_printk(KERN_DEBUG, ndev, 3690 "clearing NRI after reset\n"); 3691 ql_write_common_reg(qdev, 3692 &port_regs-> 3693 CommonRegs. 3694 ispControlStatus, 3695 ((ISP_CONTROL_RI << 3696 16) | ISP_CONTROL_RI)); 3697 } 3698 3699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3700 ssleep(1); 3701 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3702 } while (--max_wait_time); 3703 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3704 3705 if (value & ISP_CONTROL_SR) { 3706 3707 /* 3708 * Set the reset flags and clear the board again. 3709 * Nothing else to do... 3710 */ 3711 netdev_err(ndev, 3712 "Timed out waiting for reset to complete\n"); 3713 netdev_err(ndev, "Do a reset\n"); 3714 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3715 clear_bit(QL_RESET_START, &qdev->flags); 3716 ql_cycle_adapter(qdev, QL_DO_RESET); 3717 return; 3718 } 3719 3720 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3721 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3722 clear_bit(QL_RESET_START, &qdev->flags); 3723 ql_cycle_adapter(qdev, QL_NO_RESET); 3724 } 3725} 3726 3727static void ql_tx_timeout_work(struct work_struct *work) 3728{ 3729 struct ql3_adapter *qdev = 3730 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3731 3732 ql_cycle_adapter(qdev, QL_DO_RESET); 3733} 3734 3735static void ql_get_board_info(struct ql3_adapter *qdev) 3736{ 3737 struct ql3xxx_port_registers __iomem *port_regs = 3738 qdev->mem_map_registers; 3739 u32 value; 3740 3741 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3742 3743 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3744 if (value & PORT_STATUS_64) 3745 qdev->pci_width = 64; 3746 else 3747 qdev->pci_width = 32; 3748 if (value & PORT_STATUS_X) 3749 qdev->pci_x = 1; 3750 else 3751 qdev->pci_x = 0; 3752 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3753} 3754 3755static void ql3xxx_timer(unsigned long ptr) 3756{ 3757 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; 3758 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3759} 3760 3761static const struct net_device_ops ql3xxx_netdev_ops = { 3762 .ndo_open = ql3xxx_open, 3763 .ndo_start_xmit = ql3xxx_send, 3764 .ndo_stop = ql3xxx_close, 3765 .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ 3766 .ndo_change_mtu = eth_change_mtu, 3767 .ndo_validate_addr = eth_validate_addr, 3768 .ndo_set_mac_address = ql3xxx_set_mac_address, 3769 .ndo_tx_timeout = ql3xxx_tx_timeout, 3770}; 3771 3772static int __devinit ql3xxx_probe(struct pci_dev *pdev, 3773 const struct pci_device_id *pci_entry) 3774{ 3775 struct net_device *ndev = NULL; 3776 struct ql3_adapter *qdev = NULL; 3777 static int cards_found; 3778 int uninitialized_var(pci_using_dac), err; 3779 3780 err = pci_enable_device(pdev); 3781 if (err) { 3782 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3783 goto err_out; 3784 } 3785 3786 err = pci_request_regions(pdev, DRV_NAME); 3787 if (err) { 3788 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3789 goto err_out_disable_pdev; 3790 } 3791 3792 pci_set_master(pdev); 3793 3794 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3795 pci_using_dac = 1; 3796 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3797 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3798 pci_using_dac = 0; 3799 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3800 } 3801 3802 if (err) { 3803 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3804 goto err_out_free_regions; 3805 } 3806 3807 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3808 if (!ndev) { 3809 pr_err("%s could not alloc etherdev\n", pci_name(pdev)); 3810 err = -ENOMEM; 3811 goto err_out_free_regions; 3812 } 3813 3814 SET_NETDEV_DEV(ndev, &pdev->dev); 3815 3816 pci_set_drvdata(pdev, ndev); 3817 3818 qdev = netdev_priv(ndev); 3819 qdev->index = cards_found; 3820 qdev->ndev = ndev; 3821 qdev->pdev = pdev; 3822 qdev->device_id = pci_entry->device; 3823 qdev->port_link_state = LS_DOWN; 3824 if (msi) 3825 qdev->msi = 1; 3826 3827 qdev->msg_enable = netif_msg_init(debug, default_msg); 3828 3829 if (pci_using_dac) 3830 ndev->features |= NETIF_F_HIGHDMA; 3831 if (qdev->device_id == QL3032_DEVICE_ID) 3832 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3833 3834 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3835 if (!qdev->mem_map_registers) { 3836 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3837 err = -EIO; 3838 goto err_out_free_ndev; 3839 } 3840 3841 spin_lock_init(&qdev->adapter_lock); 3842 spin_lock_init(&qdev->hw_lock); 3843 3844 /* Set driver entry points */ 3845 ndev->netdev_ops = &ql3xxx_netdev_ops; 3846 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3847 ndev->watchdog_timeo = 5 * HZ; 3848 3849 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3850 3851 ndev->irq = pdev->irq; 3852 3853 /* make sure the EEPROM is good */ 3854 if (ql_get_nvram_params(qdev)) { 3855 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3856 __func__, qdev->index); 3857 err = -EIO; 3858 goto err_out_iounmap; 3859 } 3860 3861 ql_set_mac_info(qdev); 3862 3863 /* Validate and set parameters */ 3864 if (qdev->mac_index) { 3865 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3866 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3867 } else { 3868 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3869 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3870 } 3871 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 3872 3873 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3874 3875 /* Record PCI bus information. */ 3876 ql_get_board_info(qdev); 3877 3878 /* 3879 * Set the Maximum Memory Read Byte Count value. We do this to handle 3880 * jumbo frames. 3881 */ 3882 if (qdev->pci_x) 3883 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3884 3885 err = register_netdev(ndev); 3886 if (err) { 3887 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3888 goto err_out_iounmap; 3889 } 3890 3891 /* we're going to reset, so assume we have no link for now */ 3892 3893 netif_carrier_off(ndev); 3894 netif_stop_queue(ndev); 3895 3896 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3897 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3898 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3899 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3900 3901 init_timer(&qdev->adapter_timer); 3902 qdev->adapter_timer.function = ql3xxx_timer; 3903 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3904 qdev->adapter_timer.data = (unsigned long)qdev; 3905 3906 if (!cards_found) { 3907 pr_alert("%s\n", DRV_STRING); 3908 pr_alert("Driver name: %s, Version: %s\n", 3909 DRV_NAME, DRV_VERSION); 3910 } 3911 ql_display_dev_info(ndev); 3912 3913 cards_found++; 3914 return 0; 3915 3916err_out_iounmap: 3917 iounmap(qdev->mem_map_registers); 3918err_out_free_ndev: 3919 free_netdev(ndev); 3920err_out_free_regions: 3921 pci_release_regions(pdev); 3922err_out_disable_pdev: 3923 pci_disable_device(pdev); 3924 pci_set_drvdata(pdev, NULL); 3925err_out: 3926 return err; 3927} 3928 3929static void __devexit ql3xxx_remove(struct pci_dev *pdev) 3930{ 3931 struct net_device *ndev = pci_get_drvdata(pdev); 3932 struct ql3_adapter *qdev = netdev_priv(ndev); 3933 3934 unregister_netdev(ndev); 3935 3936 ql_disable_interrupts(qdev); 3937 3938 if (qdev->workqueue) { 3939 cancel_delayed_work(&qdev->reset_work); 3940 cancel_delayed_work(&qdev->tx_timeout_work); 3941 destroy_workqueue(qdev->workqueue); 3942 qdev->workqueue = NULL; 3943 } 3944 3945 iounmap(qdev->mem_map_registers); 3946 pci_release_regions(pdev); 3947 pci_set_drvdata(pdev, NULL); 3948 free_netdev(ndev); 3949} 3950 3951static struct pci_driver ql3xxx_driver = { 3952 3953 .name = DRV_NAME, 3954 .id_table = ql3xxx_pci_tbl, 3955 .probe = ql3xxx_probe, 3956 .remove = __devexit_p(ql3xxx_remove), 3957}; 3958 3959static int __init ql3xxx_init_module(void) 3960{ 3961 return pci_register_driver(&ql3xxx_driver); 3962} 3963 3964static void __exit ql3xxx_exit(void) 3965{ 3966 pci_unregister_driver(&ql3xxx_driver); 3967} 3968 3969module_init(ql3xxx_init_module); 3970module_exit(ql3xxx_exit); 3971