• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/

Lines Matching defs:qdev

104 static int ql_sem_spinlock(struct ql3_adapter *qdev,
108 qdev->mem_map_registers;
123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
126 qdev->mem_map_registers;
131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
134 qdev->mem_map_registers;
145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
153 if (ql_sem_lock(qdev,
155 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
157 netdev_printk(KERN_DEBUG, qdev->ndev,
163 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
167 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
170 qdev->mem_map_registers;
175 qdev->current_page = page;
178 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
183 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
185 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
190 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
195 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
200 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
202 if (qdev->current_page != 0)
203 ql_set_register_page(qdev, 0);
206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
210 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
212 if (qdev->current_page != 0)
213 ql_set_register_page(qdev, 0);
217 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
222 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
225 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
228 static void ql_write_common_reg(struct ql3_adapter *qdev,
235 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
243 static void ql_write_page0_reg(struct ql3_adapter *qdev,
246 if (qdev->current_page != 0)
247 ql_set_register_page(qdev, 0);
255 static void ql_write_page1_reg(struct ql3_adapter *qdev,
258 if (qdev->current_page != 1)
259 ql_set_register_page(qdev, 1);
267 static void ql_write_page2_reg(struct ql3_adapter *qdev,
270 if (qdev->current_page != 2)
271 ql_set_register_page(qdev, 2);
276 static void ql_disable_interrupts(struct ql3_adapter *qdev)
279 qdev->mem_map_registers;
281 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
286 static void ql_enable_interrupts(struct ql3_adapter *qdev)
289 qdev->mem_map_registers;
291 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
296 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
303 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
304 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
306 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
307 qdev->lrg_buf_free_tail = lrg_buf_cb;
311 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
312 qdev->lrg_buffer_len);
314 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
315 qdev->lrg_buf_skb_check++;
322 map = pci_map_single(qdev->pdev,
324 qdev->lrg_buffer_len -
327 err = pci_dma_mapping_error(qdev->pdev, map);
329 netdev_err(qdev->ndev,
335 qdev->lrg_buf_skb_check++;
345 qdev->lrg_buffer_len -
350 qdev->lrg_buf_free_count++;
354 *qdev)
356 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
359 qdev->lrg_buf_free_head = lrg_buf_cb->next;
360 if (qdev->lrg_buf_free_head == NULL)
361 qdev->lrg_buf_free_tail = NULL;
362 qdev->lrg_buf_free_count--;
371 static void fm93c56a_deselect(struct ql3_adapter *qdev);
372 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
378 static void fm93c56a_select(struct ql3_adapter *qdev)
381 qdev->mem_map_registers;
384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
386 ql_write_nvram_reg(qdev, spir,
387 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
393 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
400 qdev->mem_map_registers;
404 ql_write_nvram_reg(qdev, spir,
405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
407 ql_write_nvram_reg(qdev, spir,
408 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
410 ql_write_nvram_reg(qdev, spir,
411 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
423 ql_write_nvram_reg(qdev, spir,
425 qdev->eeprom_cmd_data | dataBit));
428 ql_write_nvram_reg(qdev, spir,
429 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
431 ql_write_nvram_reg(qdev, spir,
432 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
448 ql_write_nvram_reg(qdev, spir,
450 qdev->eeprom_cmd_data | dataBit));
453 ql_write_nvram_reg(qdev, spir,
454 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
456 ql_write_nvram_reg(qdev, spir,
457 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
466 static void fm93c56a_deselect(struct ql3_adapter *qdev)
469 qdev->mem_map_registers;
472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
479 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
485 qdev->mem_map_registers;
491 ql_write_nvram_reg(qdev, spir,
492 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
494 ql_write_nvram_reg(qdev, spir,
495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
497 dataBit = (ql_read_common_reg(qdev, spir) &
507 static void eeprom_readword(struct ql3_adapter *qdev,
510 fm93c56a_select(qdev);
511 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
512 fm93c56a_datain(qdev, value);
513 fm93c56a_deselect(qdev);
524 static int ql_get_nvram_params(struct ql3_adapter *qdev)
531 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
533 pEEPROMData = (u16 *)&qdev->nvram_data;
534 qdev->eeprom_cmd_data = 0;
535 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
536 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
539 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
544 eeprom_readword(qdev, index, pEEPROMData);
548 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
551 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
553 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
557 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
565 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
568 qdev->mem_map_registers;
573 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
582 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
585 qdev->mem_map_registers;
588 if (qdev->numPorts > 1) {
601 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
604 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
609 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
613 qdev->mem_map_registers;
616 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
629 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
632 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
639 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
643 qdev->mem_map_registers;
646 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
648 if (ql_wait_for_mii_ready(qdev)) {
649 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
656 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
659 if (ql_wait_for_mii_ready(qdev)) {
660 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
665 ql_mii_enable_scan_mode(qdev);
670 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
674 qdev->mem_map_registers;
678 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
680 if (ql_wait_for_mii_ready(qdev)) {
681 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
685 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
688 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
691 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
695 if (ql_wait_for_mii_ready(qdev)) {
696 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
700 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
704 ql_mii_enable_scan_mode(qdev);
709 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
712 qdev->mem_map_registers;
714 ql_mii_disable_scan_mode(qdev);
716 if (ql_wait_for_mii_ready(qdev)) {
717 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
721 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
722 qdev->PHYAddr | regAddr);
724 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
727 if (ql_wait_for_mii_ready(qdev)) {
728 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
732 ql_mii_enable_scan_mode(qdev);
737 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
741 qdev->mem_map_registers;
743 ql_mii_disable_scan_mode(qdev);
745 if (ql_wait_for_mii_ready(qdev)) {
746 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
750 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
751 qdev->PHYAddr | regAddr);
753 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
756 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
760 if (ql_wait_for_mii_ready(qdev)) {
761 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
765 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
768 ql_mii_enable_scan_mode(qdev);
773 static void ql_petbi_reset(struct ql3_adapter *qdev)
775 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
778 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
783 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
785 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
787 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
790 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
796 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
798 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
799 PHYAddr[qdev->mac_index]);
802 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
807 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
808 PHYAddr[qdev->mac_index]);
810 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
811 PHYAddr[qdev->mac_index]);
813 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
815 PHYAddr[qdev->mac_index]);
817 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
820 PHYAddr[qdev->mac_index]);
823 static void ql_petbi_init(struct ql3_adapter *qdev)
825 ql_petbi_reset(qdev);
826 ql_petbi_start_neg(qdev);
829 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
831 ql_petbi_reset_ex(qdev);
832 ql_petbi_start_neg_ex(qdev);
835 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
839 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
845 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
847 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
849 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
851 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
853 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
855 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
857 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
859 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
861 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
863 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
865 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
867 ql_mii_write_reg_ex(qdev, 0x11,
868 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
874 ql_mii_write_reg(qdev, 0x12, 0x840a);
875 ql_mii_write_reg(qdev, 0x00, 0x1140);
876 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
879 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
902 netdev_info(qdev->ndev, "Phy: %s\n",
912 static int ql_phy_get_speed(struct ql3_adapter *qdev)
916 switch (qdev->phyType) {
918 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
925 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
943 static int ql_is_full_dup(struct ql3_adapter *qdev)
947 switch (qdev->phyType) {
949 if (ql_mii_read_reg(qdev, 0x1A, &reg))
956 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
963 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
967 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
973 static int PHY_Setup(struct ql3_adapter *qdev)
982 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
984 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
988 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
990 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
999 if (qdev->mac_index == 0)
1004 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1006 netdev_err(qdev->ndev,
1011 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1013 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1023 qdev->phyType = getPhyType(qdev, reg1, reg2);
1025 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1027 phyAgereSpecificInit(qdev, miiAddr);
1028 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1029 netdev_err(qdev->ndev, "PHY is unknown\n");
1039 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1042 qdev->mem_map_registers;
1050 if (qdev->mac_index)
1051 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1053 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1059 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1062 qdev->mem_map_registers;
1070 if (qdev->mac_index)
1071 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1073 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1079 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1082 qdev->mem_map_registers;
1090 if (qdev->mac_index)
1091 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1093 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1099 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1102 qdev->mem_map_registers;
1110 if (qdev->mac_index)
1111 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1113 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1119 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1122 qdev->mem_map_registers;
1132 if (qdev->mac_index)
1133 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1135 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1141 static int ql_is_fiber(struct ql3_adapter *qdev)
1144 qdev->mem_map_registers;
1148 switch (qdev->mac_index) {
1157 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1161 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1164 ql_mii_read_reg(qdev, 0x00, &reg);
1171 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1174 qdev->mem_map_registers;
1178 switch (qdev->mac_index) {
1187 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1189 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1192 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1199 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1201 if (ql_is_fiber(qdev))
1202 return ql_is_petbi_neg_pause(qdev);
1204 return ql_is_phy_neg_pause(qdev);
1207 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1210 qdev->mem_map_registers;
1214 switch (qdev->mac_index) {
1222 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1226 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1228 if (ql_is_fiber(qdev))
1231 return ql_phy_get_speed(qdev);
1234 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1236 if (ql_is_fiber(qdev))
1239 return ql_is_full_dup(qdev);
1245 static int ql_link_down_detect(struct ql3_adapter *qdev)
1248 qdev->mem_map_registers;
1252 switch (qdev->mac_index) {
1262 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1269 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1272 qdev->mem_map_registers;
1274 switch (qdev->mac_index) {
1276 ql_write_common_reg(qdev,
1283 ql_write_common_reg(qdev,
1299 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1302 qdev->mem_map_registers;
1306 switch (qdev->mac_index) {
1317 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1324 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1328 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1330 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1331 PHYAddr[qdev->mac_index]);
1334 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1339 if (qdev->phyType == PHY_AGERE_ET1011C)
1340 ql_mii_write_reg(qdev, 0x13, 0x0000);
1343 if (qdev->mac_index == 0)
1345 qdev->nvram_data.macCfg_port0.portConfiguration;
1348 qdev->nvram_data.macCfg_port1.portConfiguration;
1356 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1357 PHYAddr[qdev->mac_index]);
1367 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1368 PHYAddr[qdev->mac_index]);
1371 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1372 PHYAddr[qdev->mac_index]);
1397 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1398 PHYAddr[qdev->mac_index]);
1400 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1402 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1404 PHYAddr[qdev->mac_index]);
1407 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1409 ql_phy_reset_ex(qdev);
1410 PHY_Setup(qdev);
1411 ql_phy_start_neg_ex(qdev);
1417 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1420 qdev->mem_map_registers;
1424 switch (qdev->mac_index) {
1433 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1442 static int ql_port_start(struct ql3_adapter *qdev)
1444 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1445 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1447 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1451 if (ql_is_fiber(qdev)) {
1452 ql_petbi_init(qdev);
1455 ql_phy_init_ex(qdev);
1458 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1462 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1465 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1466 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1470 if (!ql_auto_neg_error(qdev)) {
1471 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1473 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1475 ql_mac_cfg_soft_reset(qdev, 1);
1476 ql_mac_cfg_gig(qdev,
1478 (qdev) ==
1480 ql_mac_cfg_full_dup(qdev,
1482 (qdev));
1483 ql_mac_cfg_pause(qdev,
1485 (qdev));
1486 ql_mac_cfg_soft_reset(qdev, 0);
1489 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1491 ql_mac_enable(qdev, 1);
1494 qdev->port_link_state = LS_UP;
1495 netif_start_queue(qdev->ndev);
1496 netif_carrier_on(qdev->ndev);
1497 netif_info(qdev, link, qdev->ndev,
1499 ql_get_link_speed(qdev),
1500 ql_is_link_full_dup(qdev) ? "full" : "half");
1504 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1505 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1511 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1512 if (ql_port_start(qdev)) /* Restart port */
1517 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1523 struct ql3_adapter *qdev =
1529 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1531 curr_link_state = ql_get_link_state(qdev);
1533 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1534 netif_info(qdev, link, qdev->ndev,
1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1540 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1545 switch (qdev->port_link_state) {
1547 if (test_bit(QL_LINK_MASTER, &qdev->flags))
1548 ql_port_start(qdev);
1549 qdev->port_link_state = LS_DOWN;
1554 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1555 if (ql_is_auto_neg_complete(qdev))
1556 ql_finish_auto_neg(qdev);
1558 if (qdev->port_link_state == LS_UP)
1559 ql_link_down_detect_clear(qdev);
1561 qdev->port_link_state = LS_UP;
1571 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1572 qdev->port_link_state = LS_DOWN;
1574 if (ql_link_down_detect(qdev))
1575 qdev->port_link_state = LS_DOWN;
1578 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1581 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1587 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1589 if (ql_this_adapter_controls_port(qdev))
1590 set_bit(QL_LINK_MASTER, &qdev->flags);
1592 clear_bit(QL_LINK_MASTER, &qdev->flags);
1598 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1600 ql_mii_enable_scan_mode(qdev);
1602 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1603 if (ql_this_adapter_controls_port(qdev))
1604 ql_petbi_init_ex(qdev);
1606 if (ql_this_adapter_controls_port(qdev))
1607 ql_phy_init_ex(qdev);
1617 static int ql_mii_setup(struct ql3_adapter *qdev)
1621 qdev->mem_map_registers;
1623 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1624 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1628 if (qdev->device_id == QL3032_DEVICE_ID)
1629 ql_write_page0_reg(qdev,
1635 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1638 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1654 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1656 if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1662 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1666 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1667 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1669 (qdev->mac_index) * 2) << 7)) {
1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1673 status = ql_is_auto_cfg(qdev);
1674 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1675 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1679 static u32 ql_get_speed(struct ql3_adapter *qdev)
1683 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1684 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1686 (qdev->mac_index) * 2) << 7)) {
1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1690 status = ql_get_link_speed(qdev);
1691 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1692 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1696 static int ql_get_full_dup(struct ql3_adapter *qdev)
1700 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1701 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1703 (qdev->mac_index) * 2) << 7)) {
1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1707 status = ql_is_link_full_dup(qdev);
1708 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1709 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1715 struct ql3_adapter *qdev = netdev_priv(ndev);
1718 ecmd->supported = ql_supported_modes(qdev);
1720 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1724 ecmd->phy_address = qdev->PHYAddr;
1726 ecmd->advertising = ql_supported_modes(qdev);
1727 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1728 ecmd->speed = ql_get_speed(qdev);
1729 ecmd->duplex = ql_get_full_dup(qdev);
1736 struct ql3_adapter *qdev = netdev_priv(ndev);
1740 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1747 struct ql3_adapter *qdev = netdev_priv(ndev);
1748 return qdev->msg_enable;
1753 struct ql3_adapter *qdev = netdev_priv(ndev);
1754 qdev->msg_enable = value;
1760 struct ql3_adapter *qdev = netdev_priv(ndev);
1762 qdev->mem_map_registers;
1765 if (qdev->mac_index == 0)
1766 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1768 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1770 pause->autoneg = ql_get_auto_cfg_status(qdev);
1784 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1786 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1793 netdev_alloc_skb(qdev->ndev,
1794 qdev->lrg_buffer_len);
1796 netdev_printk(KERN_DEBUG, qdev->ndev,
1805 map = pci_map_single(qdev->pdev,
1807 qdev->lrg_buffer_len -
1811 err = pci_dma_mapping_error(qdev->pdev, map);
1813 netdev_err(qdev->ndev,
1828 qdev->lrg_buffer_len -
1830 --qdev->lrg_buf_skb_check;
1831 if (!qdev->lrg_buf_skb_check)
1843 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1846 qdev->mem_map_registers;
1848 if (qdev->small_buf_release_cnt >= 16) {
1849 while (qdev->small_buf_release_cnt >= 16) {
1850 qdev->small_buf_q_producer_index++;
1852 if (qdev->small_buf_q_producer_index ==
1854 qdev->small_buf_q_producer_index = 0;
1855 qdev->small_buf_release_cnt -= 8;
1858 writel(qdev->small_buf_q_producer_index,
1866 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1872 qdev->mem_map_registers;
1874 if ((qdev->lrg_buf_free_count >= 8) &&
1875 (qdev->lrg_buf_release_cnt >= 16)) {
1877 if (qdev->lrg_buf_skb_check)
1878 if (!ql_populate_free_queue(qdev))
1881 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1883 while ((qdev->lrg_buf_release_cnt >= 16) &&
1884 (qdev->lrg_buf_free_count >= 8)) {
1888 ql_get_from_lrg_buf_free_list(qdev);
1895 qdev->lrg_buf_release_cnt--;
1898 qdev->lrg_buf_q_producer_index++;
1900 if (qdev->lrg_buf_q_producer_index ==
1901 qdev->num_lbufq_entries)
1902 qdev->lrg_buf_q_producer_index = 0;
1904 if (qdev->lrg_buf_q_producer_index ==
1905 (qdev->num_lbufq_entries - 1)) {
1906 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1910 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1911 writel(qdev->lrg_buf_q_producer_index,
1916 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1924 netdev_warn(qdev->ndev,
1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1932 netdev_err(qdev->ndev,
1935 qdev->ndev->stats.tx_errors++;
1941 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1944 qdev->ndev->stats.tx_errors++;
1949 pci_unmap_single(qdev->pdev,
1956 pci_unmap_page(qdev->pdev,
1963 qdev->ndev->stats.tx_packets++;
1964 qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1971 atomic_inc(&qdev->tx_count);
1974 static void ql_get_sbuf(struct ql3_adapter *qdev)
1976 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1977 qdev->small_buf_index = 0;
1978 qdev->small_buf_release_cnt++;
1981 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1984 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1985 qdev->lrg_buf_release_cnt++;
1986 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1987 qdev->lrg_buf_index = 0;
2003 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2014 ql_get_sbuf(qdev);
2016 if (qdev->device_id == QL3022_DEVICE_ID)
2017 lrg_buf_cb1 = ql_get_lbuf(qdev);
2020 lrg_buf_cb2 = ql_get_lbuf(qdev);
2023 qdev->ndev->stats.rx_packets++;
2024 qdev->ndev->stats.rx_bytes += length;
2027 pci_unmap_single(qdev->pdev,
2033 skb->protocol = eth_type_trans(skb, qdev->ndev);
2038 if (qdev->device_id == QL3022_DEVICE_ID)
2039 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2040 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2043 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2049 struct net_device *ndev = qdev->ndev;
2057 ql_get_sbuf(qdev);
2059 if (qdev->device_id == QL3022_DEVICE_ID) {
2061 lrg_buf_cb1 = ql_get_lbuf(qdev);
2069 lrg_buf_cb2 = ql_get_lbuf(qdev);
2073 pci_unmap_single(qdev->pdev,
2080 if (qdev->device_id == QL3022_DEVICE_ID) {
2103 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2110 if (qdev->device_id == QL3022_DEVICE_ID)
2111 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2112 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2115 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2119 struct net_device *ndev = qdev->ndev;
2123 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2124 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2126 net_rsp = qdev->rsp_current;
2132 if (qdev->device_id == QL3032_DEVICE_ID)
2138 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2145 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2152 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2170 qdev->rsp_consumer_index++;
2172 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2173 qdev->rsp_consumer_index = 0;
2174 qdev->rsp_current = qdev->rsp_q_virt_addr;
2176 qdev->rsp_current++;
2187 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2191 qdev->mem_map_registers;
2193 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2196 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2198 ql_update_small_bufq_prod_index(qdev);
2199 ql_update_lrg_bufq_prod_index(qdev);
2200 writel(qdev->rsp_consumer_index,
2202 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2204 ql_enable_interrupts(qdev);
2213 struct ql3_adapter *qdev = netdev_priv(ndev);
2215 qdev->mem_map_registers;
2220 value = ql_read_common_reg_l(qdev,
2224 spin_lock(&qdev->adapter_lock);
2225 netif_stop_queue(qdev->ndev);
2226 netif_carrier_off(qdev->ndev);
2227 ql_disable_interrupts(qdev);
2228 qdev->port_link_state = LS_DOWN;
2229 set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2236 ql_read_page0_reg_l(qdev,
2241 set_bit(QL_RESET_START, &qdev->flags) ;
2246 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2251 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2252 spin_unlock(&qdev->adapter_lock);
2254 ql_disable_interrupts(qdev);
2255 if (likely(napi_schedule_prep(&qdev->napi)))
2256 __napi_schedule(&qdev->napi);
2270 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2272 if (qdev->device_id == QL3022_DEVICE_ID)
2310 static int ql_send_map(struct ql3_adapter *qdev,
2328 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2330 err = pci_dma_mapping_error(qdev->pdev, map);
2332 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2366 map = pci_map_single(qdev->pdev, oal,
2370 err = pci_dma_mapping_error(qdev->pdev, map);
2372 netdev_err(qdev->ndev,
2390 map = pci_map_page(qdev->pdev, frag->page,
2394 err = pci_dma_mapping_error(qdev->pdev, map);
2396 netdev_err(qdev->ndev,
2433 pci_unmap_single(qdev->pdev,
2441 pci_unmap_page(qdev->pdev,
2447 pci_unmap_single(qdev->pdev,
2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2472 qdev->mem_map_registers;
2477 if (unlikely(atomic_read(&qdev->tx_count) < 2))
2480 tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2481 tx_cb->seg_count = ql_get_seg_count(qdev,
2490 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2492 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2493 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2496 if (qdev->device_id == QL3032_DEVICE_ID &&
2500 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2506 qdev->req_producer_index++;
2507 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2508 qdev->req_producer_index = 0;
2510 ql_write_common_reg_l(qdev,
2512 qdev->req_producer_index);
2514 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2516 qdev->req_producer_index, skb->len);
2518 atomic_dec(&qdev->tx_count);
2522 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2524 qdev->req_q_size =
2527 qdev->req_q_virt_addr =
2528 pci_alloc_consistent(qdev->pdev,
2529 (size_t) qdev->req_q_size,
2530 &qdev->req_q_phy_addr);
2532 if ((qdev->req_q_virt_addr == NULL) ||
2533 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2534 netdev_err(qdev->ndev, "reqQ failed\n");
2538 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2540 qdev->rsp_q_virt_addr =
2541 pci_alloc_consistent(qdev->pdev,
2542 (size_t) qdev->rsp_q_size,
2543 &qdev->rsp_q_phy_addr);
2545 if ((qdev->rsp_q_virt_addr == NULL) ||
2546 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2547 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2548 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2549 qdev->req_q_virt_addr,
2550 qdev->req_q_phy_addr);
2554 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2559 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2561 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2562 netdev_info(qdev->ndev, "Already done\n");
2566 pci_free_consistent(qdev->pdev,
2567 qdev->req_q_size,
2568 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2570 qdev->req_q_virt_addr = NULL;
2572 pci_free_consistent(qdev->pdev,
2573 qdev->rsp_q_size,
2574 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2576 qdev->rsp_q_virt_addr = NULL;
2578 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2581 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2584 qdev->lrg_buf_q_size =
2585 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2586 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2587 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2589 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2591 qdev->lrg_buf =
2592 kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
2594 if (qdev->lrg_buf == NULL) {
2595 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2599 qdev->lrg_buf_q_alloc_virt_addr =
2600 pci_alloc_consistent(qdev->pdev,
2601 qdev->lrg_buf_q_alloc_size,
2602 &qdev->lrg_buf_q_alloc_phy_addr);
2604 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2605 netdev_err(qdev->ndev, "lBufQ failed\n");
2608 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2609 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2612 qdev->small_buf_q_size =
2614 if (qdev->small_buf_q_size < PAGE_SIZE)
2615 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2617 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2619 qdev->small_buf_q_alloc_virt_addr =
2620 pci_alloc_consistent(qdev->pdev,
2621 qdev->small_buf_q_alloc_size,
2622 &qdev->small_buf_q_alloc_phy_addr);
2624 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2625 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2626 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2627 qdev->lrg_buf_q_alloc_virt_addr,
2628 qdev->lrg_buf_q_alloc_phy_addr);
2632 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2633 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2634 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2638 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2640 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2641 netdev_info(qdev->ndev, "Already done\n");
2644 kfree(qdev->lrg_buf);
2645 pci_free_consistent(qdev->pdev,
2646 qdev->lrg_buf_q_alloc_size,
2647 qdev->lrg_buf_q_alloc_virt_addr,
2648 qdev->lrg_buf_q_alloc_phy_addr);
2650 qdev->lrg_buf_q_virt_addr = NULL;
2652 pci_free_consistent(qdev->pdev,
2653 qdev->small_buf_q_alloc_size,
2654 qdev->small_buf_q_alloc_virt_addr,
2655 qdev->small_buf_q_alloc_phy_addr);
2657 qdev->small_buf_q_virt_addr = NULL;
2659 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2662 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2668 qdev->small_buf_total_size =
2672 qdev->small_buf_virt_addr =
2673 pci_alloc_consistent(qdev->pdev,
2674 qdev->small_buf_total_size,
2675 &qdev->small_buf_phy_addr);
2677 if (qdev->small_buf_virt_addr == NULL) {
2678 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2682 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2683 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2685 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2690 cpu_to_le32(qdev->small_buf_phy_addr_high);
2692 cpu_to_le32(qdev->small_buf_phy_addr_low +
2696 qdev->small_buf_index = 0;
2697 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2701 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2703 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2704 netdev_info(qdev->ndev, "Already done\n");
2707 if (qdev->small_buf_virt_addr != NULL) {
2708 pci_free_consistent(qdev->pdev,
2709 qdev->small_buf_total_size,
2710 qdev->small_buf_virt_addr,
2711 qdev->small_buf_phy_addr);
2713 qdev->small_buf_virt_addr = NULL;
2717 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2722 for (i = 0; i < qdev->num_large_buffers; i++) {
2723 lrg_buf_cb = &qdev->lrg_buf[i];
2726 pci_unmap_single(qdev->pdev,
2737 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2741 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2743 for (i = 0; i < qdev->num_large_buffers; i++) {
2744 lrg_buf_cb = &qdev->lrg_buf[i];
2749 qdev->lrg_buf_index = 0;
2750 qdev->lrg_buf_skb_check = 0;
2753 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2761 for (i = 0; i < qdev->num_large_buffers; i++) {
2762 skb = netdev_alloc_skb(qdev->ndev,
2763 qdev->lrg_buffer_len);
2766 netdev_err(qdev->ndev,
2768 qdev->lrg_buffer_len * 2, i);
2769 ql_free_large_buffers(qdev);
2773 lrg_buf_cb = &qdev->lrg_buf[i];
2782 map = pci_map_single(qdev->pdev,
2784 qdev->lrg_buffer_len -
2788 err = pci_dma_mapping_error(qdev->pdev, map);
2790 netdev_err(qdev->ndev,
2793 ql_free_large_buffers(qdev);
2799 qdev->lrg_buffer_len -
2810 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2815 tx_cb = &qdev->tx_buf[0];
2823 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2827 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2832 tx_cb = &qdev->tx_buf[i];
2843 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2845 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2846 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2847 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2848 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2852 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2853 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2855 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2856 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2859 qdev->num_large_buffers =
2860 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2861 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2862 qdev->max_frame_size =
2863 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2870 qdev->shadow_reg_virt_addr =
2871 pci_alloc_consistent(qdev->pdev,
2872 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2874 if (qdev->shadow_reg_virt_addr != NULL) {
2875 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2876 qdev->req_consumer_index_phy_addr_high =
2877 MS_64BITS(qdev->shadow_reg_phy_addr);
2878 qdev->req_consumer_index_phy_addr_low =
2879 LS_64BITS(qdev->shadow_reg_phy_addr);
2881 qdev->prsp_producer_index =
2882 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2883 qdev->rsp_producer_index_phy_addr_high =
2884 qdev->req_consumer_index_phy_addr_high;
2885 qdev->rsp_producer_index_phy_addr_low =
2886 qdev->req_consumer_index_phy_addr_low + 8;
2888 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2892 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2893 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2897 if (ql_alloc_buffer_queues(qdev) != 0) {
2898 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2902 if (ql_alloc_small_buffers(qdev) != 0) {
2903 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2907 if (ql_alloc_large_buffers(qdev) != 0) {
2908 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2913 ql_init_large_buffers(qdev);
2914 if (ql_create_send_free_list(qdev))
2917 qdev->rsp_current = qdev->rsp_q_virt_addr;
2921 ql_free_send_free_list(qdev);
2923 ql_free_buffer_queues(qdev);
2925 ql_free_net_req_rsp_queues(qdev);
2927 pci_free_consistent(qdev->pdev,
2929 qdev->shadow_reg_virt_addr,
2930 qdev->shadow_reg_phy_addr);
2935 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2937 ql_free_send_free_list(qdev);
2938 ql_free_large_buffers(qdev);
2939 ql_free_small_buffers(qdev);
2940 ql_free_buffer_queues(qdev);
2941 ql_free_net_req_rsp_queues(qdev);
2942 if (qdev->shadow_reg_virt_addr != NULL) {
2943 pci_free_consistent(qdev->pdev,
2945 qdev->shadow_reg_virt_addr,
2946 qdev->shadow_reg_phy_addr);
2947 qdev->shadow_reg_virt_addr = NULL;
2951 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2954 (void __iomem *)qdev->mem_map_registers;
2956 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2957 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2961 ql_write_page2_reg(qdev,
2962 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2964 ql_write_page2_reg(qdev,
2966 qdev->nvram_data.bufletCount);
2968 ql_write_page2_reg(qdev,
2970 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2971 (qdev->nvram_data.tcpWindowThreshold0));
2973 ql_write_page2_reg(qdev,
2975 qdev->nvram_data.tcpWindowThreshold50);
2977 ql_write_page2_reg(qdev,
2979 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2980 qdev->nvram_data.ipHashTableBaseLo);
2981 ql_write_page2_reg(qdev,
2983 qdev->nvram_data.ipHashTableSize);
2984 ql_write_page2_reg(qdev,
2986 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2987 qdev->nvram_data.tcpHashTableBaseLo);
2988 ql_write_page2_reg(qdev,
2990 qdev->nvram_data.tcpHashTableSize);
2991 ql_write_page2_reg(qdev,
2993 (qdev->nvram_data.ncbTableBaseHi << 16) |
2994 qdev->nvram_data.ncbTableBaseLo);
2995 ql_write_page2_reg(qdev,
2997 qdev->nvram_data.ncbTableSize);
2998 ql_write_page2_reg(qdev,
3000 (qdev->nvram_data.drbTableBaseHi << 16) |
3001 qdev->nvram_data.drbTableBaseLo);
3002 ql_write_page2_reg(qdev,
3004 qdev->nvram_data.drbTableSize);
3005 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3009 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3013 qdev->mem_map_registers;
3021 if (ql_mii_setup(qdev))
3025 ql_write_common_reg(qdev, spir,
3030 qdev->port_link_state = LS_DOWN;
3031 netif_carrier_off(qdev->ndev);
3034 ql_write_common_reg(qdev, spir,
3039 *((u32 *)(qdev->preq_consumer_index)) = 0;
3040 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3041 qdev->req_producer_index = 0;
3043 ql_write_page1_reg(qdev,
3045 qdev->req_consumer_index_phy_addr_high);
3046 ql_write_page1_reg(qdev,
3048 qdev->req_consumer_index_phy_addr_low);
3050 ql_write_page1_reg(qdev,
3052 MS_64BITS(qdev->req_q_phy_addr));
3053 ql_write_page1_reg(qdev,
3055 LS_64BITS(qdev->req_q_phy_addr));
3056 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3059 *((__le16 *) (qdev->prsp_producer_index)) = 0;
3060 qdev->rsp_consumer_index = 0;
3061 qdev->rsp_current = qdev->rsp_q_virt_addr;
3063 ql_write_page1_reg(qdev,
3065 qdev->rsp_producer_index_phy_addr_high);
3067 ql_write_page1_reg(qdev,
3069 qdev->rsp_producer_index_phy_addr_low);
3071 ql_write_page1_reg(qdev,
3073 MS_64BITS(qdev->rsp_q_phy_addr));
3075 ql_write_page1_reg(qdev,
3077 LS_64BITS(qdev->rsp_q_phy_addr));
3079 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3082 ql_write_page1_reg(qdev,
3084 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3086 ql_write_page1_reg(qdev,
3088 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3090 ql_write_page1_reg(qdev,
3092 qdev->num_lbufq_entries);
3094 ql_write_page1_reg(qdev,
3096 qdev->lrg_buffer_len);
3099 ql_write_page1_reg(qdev,
3101 MS_64BITS(qdev->small_buf_q_phy_addr));
3103 ql_write_page1_reg(qdev,
3105 LS_64BITS(qdev->small_buf_q_phy_addr));
3107 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3108 ql_write_page1_reg(qdev,
3112 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3113 qdev->small_buf_release_cnt = 8;
3114 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3115 qdev->lrg_buf_release_cnt = 8;
3116 qdev->lrg_buf_next_free =
3117 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3118 qdev->small_buf_index = 0;
3119 qdev->lrg_buf_index = 0;
3120 qdev->lrg_buf_free_count = 0;
3121 qdev->lrg_buf_free_head = NULL;
3122 qdev->lrg_buf_free_tail = NULL;
3124 ql_write_common_reg(qdev,
3127 qdev->small_buf_q_producer_index);
3128 ql_write_common_reg(qdev,
3131 qdev->lrg_buf_q_producer_index);
3137 clear_bit(QL_LINK_MASTER, &qdev->flags);
3138 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3142 if (ql_init_misc_registers(qdev)) {
3147 value = qdev->nvram_data.tcpMaxWindowSize;
3148 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3150 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3152 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3153 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3158 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3159 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3163 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3166 if (qdev->mac_index)
3167 ql_write_page0_reg(qdev,
3169 qdev->max_frame_size);
3171 ql_write_page0_reg(qdev,
3173 qdev->max_frame_size);
3175 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3176 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3182 PHY_Setup(qdev);
3183 ql_init_scan_mode(qdev);
3184 ql_get_phy_owner(qdev);
3189 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3191 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3192 ((qdev->ndev->dev_addr[2] << 24)
3193 | (qdev->ndev->dev_addr[3] << 16)
3194 | (qdev->ndev->dev_addr[4] << 8)
3195 | qdev->ndev->dev_addr[5]));
3198 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3200 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3201 ((qdev->ndev->dev_addr[0] << 8)
3202 | qdev->ndev->dev_addr[1]));
3205 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3210 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3212 (qdev->mac_index << 2)));
3213 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3215 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3217 ((qdev->mac_index << 2) + 1)));
3218 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3220 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3223 ql_write_page0_reg(qdev,
3228 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3231 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3233 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3237 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3243 if (qdev->device_id == QL3032_DEVICE_ID) {
3248 ql_write_page0_reg(qdev, &port_regs->functionControl,
3254 ql_write_page0_reg(qdev, &port_regs->portControl,
3266 static int ql_adapter_reset(struct ql3_adapter *qdev)
3269 qdev->mem_map_registers;
3274 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3275 clear_bit(QL_RESET_DONE, &qdev->flags);
3280 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3281 ql_write_common_reg(qdev,
3286 netdev_printk(KERN_DEBUG, qdev->ndev,
3293 ql_read_common_reg(qdev,
3306 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3308 netdev_printk(KERN_DEBUG, qdev->ndev,
3310 ql_write_common_reg(qdev,
3318 ql_write_common_reg(qdev,
3329 value = ql_read_common_reg(qdev,
3340 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3341 set_bit(QL_RESET_DONE, &qdev->flags);
3345 static void ql_set_mac_info(struct ql3_adapter *qdev)
3348 qdev->mem_map_registers;
3354 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3356 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3359 qdev->mac_index = 0;
3360 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3361 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3362 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3364 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3366 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3370 qdev->mac_index = 1;
3371 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3372 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3373 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3375 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3377 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3383 netdev_printk(KERN_DEBUG, qdev->ndev,
3388 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3394 struct pci_dev *pdev = qdev->pdev;
3398 DRV_NAME, qdev->index, qdev->chip_rev_id,
3399 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3400 qdev->pci_slot);
3402 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3408 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3409 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3412 qdev->mem_map_registers);
3415 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3418 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3420 struct net_device *ndev = qdev->ndev;
3426 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3427 clear_bit(QL_LINK_MASTER, &qdev->flags);
3429 ql_disable_interrupts(qdev);
3431 free_irq(qdev->pdev->irq, ndev);
3433 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3434 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3435 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3436 pci_disable_msi(qdev->pdev);
3439 del_timer_sync(&qdev->adapter_timer);
3441 napi_disable(&qdev->napi);
3447 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3448 if (ql_wait_for_drvr_lock(qdev)) {
3449 soft_reset = ql_adapter_reset(qdev);
3452 qdev->index);
3461 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3463 ql_free_mem_resources(qdev);
3467 static int ql_adapter_up(struct ql3_adapter *qdev)
3469 struct net_device *ndev = qdev->ndev;
3474 if (ql_alloc_mem_resources(qdev)) {
3479 if (qdev->msi) {
3480 if (pci_enable_msi(qdev->pdev)) {
3483 qdev->msi = 0;
3486 set_bit(QL_MSI_ENABLED, &qdev->flags);
3491 err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3496 qdev->pdev->irq);
3500 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3502 err = ql_wait_for_drvr_lock(qdev);
3504 err = ql_adapter_initialize(qdev);
3510 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3516 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3518 set_bit(QL_ADAPTER_UP, &qdev->flags);
3520 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3522 napi_enable(&qdev->napi);
3523 ql_enable_interrupts(qdev);
3527 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3529 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3530 free_irq(qdev->pdev->irq, ndev);
3532 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3534 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3535 pci_disable_msi(qdev->pdev);
3540 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3542 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3543 netdev_err(qdev->ndev,
3546 dev_close(qdev->ndev);
3555 struct ql3_adapter *qdev = netdev_priv(ndev);
3561 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3564 ql_adapter_down(qdev, QL_DO_RESET);
3570 struct ql3_adapter *qdev = netdev_priv(ndev);
3571 return ql_adapter_up(qdev);
3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3578 qdev->mem_map_registers;
3590 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3592 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3594 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3600 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3602 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3604 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3622 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3627 struct ql3_adapter *qdev =
3629 struct net_device *ndev = qdev->ndev;
3634 qdev->mem_map_registers;
3637 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3638 clear_bit(QL_LINK_MASTER, &qdev->flags);
3645 tx_cb = &qdev->tx_buf[i];
3649 pci_unmap_single(qdev->pdev,
3655 pci_unmap_page(qdev->pdev,
3668 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3669 ql_write_common_reg(qdev,
3678 value = ql_read_common_reg(qdev,
3691 ql_write_common_reg(qdev,
3699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3701 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3703 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3714 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3715 clear_bit(QL_RESET_START, &qdev->flags);
3716 ql_cycle_adapter(qdev, QL_DO_RESET);
3720 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3721 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3722 clear_bit(QL_RESET_START, &qdev->flags);
3723 ql_cycle_adapter(qdev, QL_NO_RESET);
3729 struct ql3_adapter *qdev =
3732 ql_cycle_adapter(qdev, QL_DO_RESET);
3735 static void ql_get_board_info(struct ql3_adapter *qdev)
3738 qdev->mem_map_registers;
3741 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3743 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3745 qdev->pci_width = 64;
3747 qdev->pci_width = 32;
3749 qdev->pci_x = 1;
3751 qdev->pci_x = 0;
3752 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3757 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3758 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3776 struct ql3_adapter *qdev = NULL;
3818 qdev = netdev_priv(ndev);
3819 qdev->index = cards_found;
3820 qdev->ndev = ndev;
3821 qdev->pdev = pdev;
3822 qdev->device_id = pci_entry->device;
3823 qdev->port_link_state = LS_DOWN;
3825 qdev->msi = 1;
3827 qdev->msg_enable = netif_msg_init(debug, default_msg);
3831 if (qdev->device_id == QL3032_DEVICE_ID)
3834 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3835 if (!qdev->mem_map_registers) {
3841 spin_lock_init(&qdev->adapter_lock);
3842 spin_lock_init(&qdev->hw_lock);
3849 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3854 if (ql_get_nvram_params(qdev)) {
3856 __func__, qdev->index);
3861 ql_set_mac_info(qdev);
3864 if (qdev->mac_index) {
3865 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3866 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3868 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3869 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3876 ql_get_board_info(qdev);
3882 if (qdev->pci_x)
3896 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3897 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3898 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3899 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3901 init_timer(&qdev->adapter_timer);
3902 qdev->adapter_timer.function = ql3xxx_timer;
3903 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3904 qdev->adapter_timer.data = (unsigned long)qdev;
3917 iounmap(qdev->mem_map_registers);
3932 struct ql3_adapter *qdev = netdev_priv(ndev);
3936 ql_disable_interrupts(qdev);
3938 if (qdev->workqueue) {
3939 cancel_delayed_work(&qdev->reset_work);
3940 cancel_delayed_work(&qdev->tx_timeout_work);
3941 destroy_workqueue(qdev->workqueue);
3942 qdev->workqueue = NULL;
3945 iounmap(qdev->mem_map_registers);