• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/qlge/

Lines Matching refs:qdev

101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
143 if (!ql_sem_trylock(qdev, sem_mask))
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 temp = ql_read32(qdev, reg);
171 netif_alert(qdev, probe, qdev->ndev,
180 netif_alert(qdev, probe, qdev->ndev,
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 temp = ql_read32(qdev, CFG);
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 status = ql_wait_cfg(qdev, bit);
234 netif_err(qdev, ifup, qdev->ndev,
239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
244 ql_write32(qdev, CFG, (mask | value));
249 status = ql_wait_cfg(qdev, bit);
251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
252 pci_unmap_single(qdev->pdev, map, size, direction);
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
268 ql_wait_reg_rdy(qdev,
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 ql_wait_reg_rdy(qdev,
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
282 ql_wait_reg_rdy(qdev,
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 ql_wait_reg_rdy(qdev,
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
297 ql_wait_reg_rdy(qdev,
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 netif_crit(qdev, ifup, qdev->ndev,
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
341 ql_wait_reg_rdy(qdev,
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
350 ql_wait_reg_rdy(qdev,
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
360 ql_wait_reg_rdy(qdev,
374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
381 ql_wait_reg_rdy(qdev,
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
390 ql_wait_reg_rdy(qdev,
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
399 ql_wait_reg_rdy(qdev,
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
411 (qdev->
414 if (qdev->vlgrp)
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
428 netif_info(qdev, ifup, qdev->ndev,
435 ql_wait_reg_rdy(qdev,
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
447 netif_crit(qdev, ifup, qdev->ndev,
459 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
466 addr = &qdev->current_mac_addr[0];
467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
482 netif_err(qdev, ifup, qdev->ndev,
487 void ql_link_on(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
494 void ql_link_off(struct ql_adapter *qdev)
496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
504 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
512 ql_write32(qdev, RT_IDX,
514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
517 *value = ql_read32(qdev, RT_DATA);
527 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
629 netif_err(qdev, ifup, qdev->ndev,
636 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
640 ql_write32(qdev, RT_IDX, value);
641 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 static void ql_enable_interrupts(struct ql_adapter *qdev)
649 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
652 static void ql_disable_interrupts(struct ql_adapter *qdev)
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
663 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
667 struct intr_context *ctx = qdev->intr_context + intr;
669 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673 ql_write32(qdev, INTR_EN,
675 var = ql_read32(qdev, STS);
679 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
681 ql_write32(qdev, INTR_EN,
683 var = ql_read32(qdev, STS);
685 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
689 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
697 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
700 ctx = qdev->intr_context + intr;
701 spin_lock(&qdev->hw_lock);
703 ql_write32(qdev, INTR_EN,
705 var = ql_read32(qdev, STS);
708 spin_unlock(&qdev->hw_lock);
712 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
715 for (i = 0; i < qdev->intr_count; i++) {
720 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
722 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
723 ql_enable_completion_interrupt(qdev, i);
728 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
732 __le16 *flash = (__le16 *)&qdev->flash;
734 status = strncmp((char *)&qdev->flash, str, 4);
736 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
744 netif_err(qdev, ifup, qdev->ndev,
750 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
754 status = ql_wait_reg_rdy(qdev,
759 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
761 status = ql_wait_reg_rdy(qdev,
769 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
774 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
778 __le32 *p = (__le32 *)&qdev->flash;
785 if (!qdev->port)
790 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
795 status = ql_read_flash_word(qdev, i+offset, p);
797 netif_err(qdev, ifup, qdev->ndev,
803 status = ql_validate_flash(qdev,
807 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
815 if (qdev->flash.flash_params_8000.data_type1 == 2)
817 qdev->flash.flash_params_8000.mac_addr1,
818 qdev->ndev->addr_len);
821 qdev->flash.flash_params_8000.mac_addr,
822 qdev->ndev->addr_len);
825 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
830 memcpy(qdev->ndev->dev_addr,
832 qdev->ndev->addr_len);
835 ql_sem_unlock(qdev, SEM_FLASH_MASK);
839 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
843 __le32 *p = (__le32 *)&qdev->flash;
850 if (qdev->port)
853 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
857 status = ql_read_flash_word(qdev, i+offset, p);
859 netif_err(qdev, ifup, qdev->ndev,
866 status = ql_validate_flash(qdev,
870 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
875 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
880 memcpy(qdev->ndev->dev_addr,
881 qdev->flash.flash_params_8012.mac_addr,
882 qdev->ndev->addr_len);
885 ql_sem_unlock(qdev, SEM_FLASH_MASK);
893 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
897 status = ql_wait_reg_rdy(qdev,
902 ql_write32(qdev, XGMAC_DATA, data);
904 ql_write32(qdev, XGMAC_ADDR, reg);
912 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
916 status = ql_wait_reg_rdy(qdev,
921 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
923 status = ql_wait_reg_rdy(qdev,
928 *data = ql_read32(qdev, XGMAC_DATA);
934 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 status = ql_read_xgmac_reg(qdev, reg, &lo);
944 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
954 static int ql_8000_port_initialize(struct ql_adapter *qdev)
961 status = ql_mb_about_fw(qdev);
964 status = ql_mb_get_fw_state(qdev);
968 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
979 static int ql_8012_port_initialize(struct ql_adapter *qdev)
984 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988 netif_info(qdev, link, qdev->ndev,
990 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
992 netif_crit(qdev, link, qdev->ndev,
998 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1000 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1004 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1013 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1018 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1023 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1028 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1033 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1043 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1048 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1050 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1054 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1056 return PAGE_SIZE << qdev->lbq_buf_order;
1070 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1075 pci_dma_sync_single_for_cpu(qdev->pdev,
1084 == ql_lbq_block_size(qdev))
1085 pci_unmap_page(qdev->pdev,
1087 ql_lbq_block_size(qdev),
1119 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1126 qdev->lbq_buf_order);
1128 netif_err(qdev, drv, qdev->ndev,
1133 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1134 0, ql_lbq_block_size(qdev),
1136 if (pci_dma_mapping_error(qdev->pdev, map)) {
1138 qdev->lbq_buf_order);
1139 netif_err(qdev, drv, qdev->ndev,
1156 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1167 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1177 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1182 netif_err(qdev, ifup, qdev->ndev,
1194 pci_dma_sync_single_for_device(qdev->pdev, map,
1210 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1219 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234 netif_printk(qdev, rx_status, KERN_DEBUG,
1235 qdev->ndev,
1239 netdev_alloc_skb(qdev->ndev,
1242 netif_err(qdev, probe, qdev->ndev,
1248 map = pci_map_single(qdev->pdev,
1252 if (pci_dma_mapping_error(qdev->pdev, map)) {
1253 netif_err(qdev, ifup, qdev->ndev,
1278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1286 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1289 ql_update_sbq(qdev, rx_ring);
1290 ql_update_lbq(qdev, rx_ring);
1296 static void ql_unmap_send(struct ql_adapter *qdev,
1312 netif_printk(qdev, tx_done, KERN_DEBUG,
1313 qdev->ndev,
1316 pci_unmap_single(qdev->pdev,
1323 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1325 pci_unmap_page(qdev->pdev,
1338 static int ql_map_send(struct ql_adapter *qdev,
1349 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1357 err = pci_dma_mapping_error(qdev->pdev, map);
1359 netif_err(qdev, tx_queued, qdev->ndev,
1401 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1430 pci_map_page(qdev->pdev, frag->page,
1434 err = pci_dma_mapping_error(qdev->pdev, map);
1436 netif_err(qdev, tx_queued, qdev->ndev,
1462 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1479 napi->dev = qdev->ndev;
1483 netif_err(qdev, drv, qdev->ndev,
1506 if (qdev->vlgrp && (vlan_id != 0xffff))
1507 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1513 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1519 struct net_device *ndev = qdev->ndev;
1522 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527 netif_err(qdev, drv, qdev->ndev,
1540 netif_info(qdev, drv, qdev->ndev,
1550 netif_err(qdev, drv, qdev->ndev,
1556 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1571 if (qdev->rx_csum &&
1575 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1585 netif_printk(qdev, rx_status, KERN_DEBUG,
1586 qdev->ndev,
1594 if (qdev->vlgrp && (vlan_id != 0xffff))
1595 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1599 if (qdev->vlgrp && (vlan_id != 0xffff))
1600 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1611 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1617 struct net_device *ndev = qdev->ndev;
1624 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1626 netif_err(qdev, probe, qdev->ndev,
1637 netif_info(qdev, drv, qdev->ndev,
1645 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646 ql_check_lb_frame(qdev, skb);
1663 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1684 if (qdev->rx_csum &&
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1698 netif_printk(qdev, rx_status, KERN_DEBUG,
1699 qdev->ndev,
1707 if (qdev->vlgrp && (vlan_id != 0xffff))
1708 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1713 if (qdev->vlgrp && (vlan_id != 0xffff))
1714 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1739 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1760 pci_unmap_single(qdev->pdev,
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1781 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1792 pci_dma_sync_single_for_cpu(qdev->pdev,
1800 pci_dma_sync_single_for_device(qdev->pdev,
1809 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1816 pci_unmap_single(qdev->pdev,
1826 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1835 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1850 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1851 skb = netdev_alloc_skb(qdev->ndev, length);
1853 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857 pci_unmap_page(qdev->pdev,
1863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1892 pci_unmap_single(qdev->pdev,
1906 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1914 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1943 struct net_device *ndev = qdev->ndev;
1948 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1958 netif_info(qdev, drv, qdev->ndev,
1975 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1976 ql_check_lb_frame(qdev, skb);
1984 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2004 if (qdev->rx_csum &&
2008 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2028 if (qdev->vlgrp &&
2031 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2036 if (qdev->vlgrp &&
2039 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2046 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2061 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2068 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2076 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2082 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2088 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2096 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2103 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2116 netif_warn(qdev, tx_done, qdev->ndev,
2120 netif_warn(qdev, tx_done, qdev->ndev,
2124 netif_warn(qdev, tx_done, qdev->ndev,
2128 netif_warn(qdev, tx_done, qdev->ndev,
2136 void ql_queue_fw_error(struct ql_adapter *qdev)
2138 ql_link_off(qdev);
2139 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2142 void ql_queue_asic_error(struct ql_adapter *qdev)
2144 ql_link_off(qdev);
2145 ql_disable_interrupts(qdev);
2150 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2151 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2154 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2159 netif_err(qdev, rx_err, qdev->ndev,
2161 ql_queue_fw_error(qdev);
2165 netif_err(qdev, link, qdev->ndev,
2167 netif_err(qdev, drv, qdev->ndev,
2169 ql_queue_asic_error(qdev);
2173 netif_err(qdev, rx_err, qdev->ndev,
2175 ql_queue_asic_error(qdev);
2179 netif_err(qdev, rx_err, qdev->ndev,
2182 ql_queue_asic_error(qdev);
2186 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2188 ql_queue_asic_error(qdev);
2195 struct ql_adapter *qdev = rx_ring->qdev;
2204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214 ql_process_mac_tx_intr(qdev, net_rsp);
2217 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2226 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2227 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2235 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2243 struct ql_adapter *qdev = rx_ring->qdev;
2251 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2259 ql_process_mac_rx_intr(qdev, rx_ring,
2265 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2269 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2280 ql_update_buffer_queues(qdev, rx_ring);
2288 struct ql_adapter *qdev = rx_ring->qdev;
2291 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2293 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2299 trx_ring = &qdev->rx_ring[i];
2306 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2318 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2326 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2333 struct ql_adapter *qdev = netdev_priv(ndev);
2335 qdev->vlgrp = grp;
2337 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2339 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2342 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2344 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2350 struct ql_adapter *qdev = netdev_priv(ndev);
2354 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2358 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2359 netif_err(qdev, ifup, qdev->ndev,
2362 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2367 struct ql_adapter *qdev = netdev_priv(ndev);
2371 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2377 netif_err(qdev, ifup, qdev->ndev,
2380 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2400 struct ql_adapter *qdev = rx_ring->qdev;
2401 struct intr_context *intr_context = &qdev->intr_context[0];
2405 spin_lock(&qdev->hw_lock);
2406 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2407 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2409 spin_unlock(&qdev->hw_lock);
2412 spin_unlock(&qdev->hw_lock);
2414 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2420 ql_queue_asic_error(qdev);
2421 netif_err(qdev, intr, qdev->ndev,
2423 var = ql_read32(qdev, ERR_STS);
2424 netif_err(qdev, intr, qdev->ndev,
2433 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2438 netif_err(qdev, intr, qdev->ndev,
2440 ql_disable_completion_interrupt(qdev, intr_context->intr);
2441 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2443 qdev->workqueue, &qdev->mpi_work, 0);
2452 var = ql_read32(qdev, ISR1);
2454 netif_info(qdev, intr, qdev->ndev,
2456 ql_disable_completion_interrupt(qdev, intr_context->intr);
2460 ql_enable_completion_interrupt(qdev, intr_context->intr);
2541 struct ql_adapter *qdev = netdev_priv(ndev);
2546 tx_ring = &qdev->tx_ring[tx_ring_idx];
2552 netif_info(qdev, tx_queued, qdev->ndev,
2574 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2575 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2588 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2590 netif_err(qdev, tx_queued, qdev->ndev,
2602 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2611 static void ql_free_shadow_space(struct ql_adapter *qdev)
2613 if (qdev->rx_ring_shadow_reg_area) {
2614 pci_free_consistent(qdev->pdev,
2616 qdev->rx_ring_shadow_reg_area,
2617 qdev->rx_ring_shadow_reg_dma);
2618 qdev->rx_ring_shadow_reg_area = NULL;
2620 if (qdev->tx_ring_shadow_reg_area) {
2621 pci_free_consistent(qdev->pdev,
2623 qdev->tx_ring_shadow_reg_area,
2624 qdev->tx_ring_shadow_reg_dma);
2625 qdev->tx_ring_shadow_reg_area = NULL;
2629 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2631 qdev->rx_ring_shadow_reg_area =
2632 pci_alloc_consistent(qdev->pdev,
2633 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2634 if (qdev->rx_ring_shadow_reg_area == NULL) {
2635 netif_err(qdev, ifup, qdev->ndev,
2639 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2640 qdev->tx_ring_shadow_reg_area =
2641 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2642 &qdev->tx_ring_shadow_reg_dma);
2643 if (qdev->tx_ring_shadow_reg_area == NULL) {
2644 netif_err(qdev, ifup, qdev->ndev,
2648 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2652 pci_free_consistent(qdev->pdev,
2654 qdev->rx_ring_shadow_reg_area,
2655 qdev->rx_ring_shadow_reg_dma);
2659 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2678 static void ql_free_tx_resources(struct ql_adapter *qdev,
2682 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2690 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2694 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2699 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2709 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2714 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2726 pci_unmap_page(qdev->pdev,
2728 ql_lbq_block_size(qdev),
2742 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2750 netif_err(qdev, ifup, qdev->ndev,
2755 pci_unmap_single(qdev->pdev,
2768 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2773 for (i = 0; i < qdev->rx_ring_count; i++) {
2774 rx_ring = &qdev->rx_ring[i];
2776 ql_free_lbq_buffers(qdev, rx_ring);
2778 ql_free_sbq_buffers(qdev, rx_ring);
2782 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2787 for (i = 0; i < qdev->rx_ring_count; i++) {
2788 rx_ring = &qdev->rx_ring[i];
2790 ql_update_buffer_queues(qdev, rx_ring);
2794 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2811 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2828 static void ql_free_rx_resources(struct ql_adapter *qdev,
2833 pci_free_consistent(qdev->pdev,
2845 pci_free_consistent(qdev->pdev,
2857 pci_free_consistent(qdev->pdev,
2866 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2874 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2878 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2887 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2891 netif_err(qdev, ifup, qdev->ndev,
2903 netif_err(qdev, ifup, qdev->ndev,
2908 ql_init_sbq_ring(qdev, rx_ring);
2916 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2920 netif_err(qdev, ifup, qdev->ndev,
2931 netif_err(qdev, ifup, qdev->ndev,
2936 ql_init_lbq_ring(qdev, rx_ring);
2942 ql_free_rx_resources(qdev, rx_ring);
2946 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2956 for (j = 0; j < qdev->tx_ring_count; j++) {
2957 tx_ring = &qdev->tx_ring[j];
2961 netif_err(qdev, ifdown, qdev->ndev,
2965 ql_unmap_send(qdev, tx_ring_desc,
2974 static void ql_free_mem_resources(struct ql_adapter *qdev)
2978 for (i = 0; i < qdev->tx_ring_count; i++)
2979 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2980 for (i = 0; i < qdev->rx_ring_count; i++)
2981 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2982 ql_free_shadow_space(qdev);
2985 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2990 if (ql_alloc_shadow_space(qdev))
2993 for (i = 0; i < qdev->rx_ring_count; i++) {
2994 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2995 netif_err(qdev, ifup, qdev->ndev,
3001 for (i = 0; i < qdev->tx_ring_count; i++) {
3002 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3003 netif_err(qdev, ifup, qdev->ndev,
3011 ql_free_mem_resources(qdev);
3019 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3022 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3024 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3027 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3126 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3127 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3133 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3135 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3136 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3139 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3142 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3147 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3153 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3157 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3158 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3160 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3188 ql_init_tx_ring(qdev, tx_ring);
3190 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3193 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3196 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3201 static void ql_disable_msix(struct ql_adapter *qdev)
3203 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3204 pci_disable_msix(qdev->pdev);
3205 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3206 kfree(qdev->msi_x_entry);
3207 qdev->msi_x_entry = NULL;
3208 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3209 pci_disable_msi(qdev->pdev);
3210 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3215 * stored in qdev->intr_count. If we don't get that
3218 static void ql_enable_msix(struct ql_adapter *qdev)
3227 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3230 if (!qdev->msi_x_entry) {
3235 for (i = 0; i < qdev->intr_count; i++)
3236 qdev->msi_x_entry[i].entry = i;
3242 err = pci_enable_msix(qdev->pdev,
3243 qdev->msi_x_entry, qdev->intr_count);
3245 qdev->intr_count = err;
3249 kfree(qdev->msi_x_entry);
3250 qdev->msi_x_entry = NULL;
3251 netif_warn(qdev, ifup, qdev->ndev,
3253 qdev->intr_count = 1;
3256 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3257 netif_info(qdev, ifup, qdev->ndev,
3259 qdev->intr_count);
3264 qdev->intr_count = 1;
3266 if (!pci_enable_msi(qdev->pdev)) {
3267 set_bit(QL_MSI_ENABLED, &qdev->flags);
3268 netif_info(qdev, ifup, qdev->ndev,
3274 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3287 static void ql_set_tx_vect(struct ql_adapter *qdev)
3290 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3292 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3294 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3295 i < qdev->rx_ring_count; i++) {
3300 qdev->rx_ring[i].irq = vect;
3307 for (i = 0; i < qdev->rx_ring_count; i++)
3308 qdev->rx_ring[i].irq = 0;
3317 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3320 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3322 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3326 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3331 (1 << qdev->rx_ring[qdev->rss_ring_count +
3338 for (j = 0; j < qdev->rx_ring_count; j++)
3339 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3349 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3352 struct intr_context *intr_context = &qdev->intr_context[0];
3354 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3360 qdev->rx_ring[i].irq = i;
3362 intr_context->qdev = qdev;
3366 ql_set_irq_mask(qdev, intr_context);
3391 qdev->ndev->name, i);
3398 qdev->ndev->name, i);
3407 intr_context->qdev = qdev;
3423 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3429 ql_set_irq_mask(qdev, intr_context);
3434 ql_set_tx_vect(qdev);
3437 static void ql_free_irq(struct ql_adapter *qdev)
3440 struct intr_context *intr_context = &qdev->intr_context[0];
3442 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3444 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3445 free_irq(qdev->msi_x_entry[i].vector,
3446 &qdev->rx_ring[i]);
3447 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3450 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3451 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3456 ql_disable_msix(qdev);
3459 static int ql_request_irq(struct ql_adapter *qdev)
3463 struct pci_dev *pdev = qdev->pdev;
3464 struct intr_context *intr_context = &qdev->intr_context[0];
3466 ql_resolve_queues_to_irqs(qdev);
3468 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3470 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3471 status = request_irq(qdev->msi_x_entry[i].vector,
3475 &qdev->rx_ring[i]);
3477 netif_err(qdev, ifup, qdev->ndev,
3482 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3485 qdev->rx_ring[i].type == DEFAULT_Q ?
3487 qdev->rx_ring[i].type == TX_Q ?
3489 qdev->rx_ring[i].type == RX_Q ?
3494 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503 &qdev->rx_ring[0]);
3507 &qdev->
3509 intr_context->name, &qdev->rx_ring[0]);
3513 netif_err(qdev, ifup, qdev->ndev,
3516 qdev->rx_ring[0].type == DEFAULT_Q ?
3518 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3519 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3526 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3527 ql_free_irq(qdev);
3531 static int ql_start_rss(struct ql_adapter *qdev)
3539 struct ricb *ricb = &qdev->ricb;
3555 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3560 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3562 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3564 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3567 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3572 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3576 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3581 status = ql_set_routing_reg(qdev, i, 0, 0);
3583 netif_err(qdev, ifup, qdev->ndev,
3588 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3593 static int ql_route_initialize(struct ql_adapter *qdev)
3598 status = ql_clear_routing_entries(qdev);
3602 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3606 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3609 netif_err(qdev, ifup, qdev->ndev,
3614 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3617 netif_err(qdev, ifup, qdev->ndev,
3622 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3624 netif_err(qdev, ifup, qdev->ndev,
3631 if (qdev->rss_ring_count > 1) {
3632 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3635 netif_err(qdev, ifup, qdev->ndev,
3641 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3644 netif_err(qdev, ifup, qdev->ndev,
3647 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3651 int ql_cam_route_initialize(struct ql_adapter *qdev)
3659 set = ql_read32(qdev, STS);
3660 set &= qdev->port_link_up;
3661 status = ql_set_mac_addr(qdev, set);
3663 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3667 status = ql_route_initialize(qdev);
3669 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3674 static int ql_adapter_initialize(struct ql_adapter *qdev)
3685 ql_write32(qdev, SYS, mask | value);
3690 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3693 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3703 ql_write32(qdev, FSC, mask | value);
3705 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3712 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3717 value = ql_read32(qdev, MGMT_RCV_CFG);
3722 ql_write32(qdev, MGMT_RCV_CFG, mask);
3723 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3726 if (qdev->pdev->subsystem_device == 0x0068 ||
3727 qdev->pdev->subsystem_device == 0x0180)
3728 qdev->wol = WAKE_MAGIC;
3731 for (i = 0; i < qdev->rx_ring_count; i++) {
3732 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3734 netif_err(qdev, ifup, qdev->ndev,
3743 if (qdev->rss_ring_count > 1) {
3744 status = ql_start_rss(qdev);
3746 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3752 for (i = 0; i < qdev->tx_ring_count; i++) {
3753 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3755 netif_err(qdev, ifup, qdev->ndev,
3762 status = qdev->nic_ops->port_initialize(qdev);
3764 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3767 status = ql_cam_route_initialize(qdev);
3769 netif_err(qdev, ifup, qdev->ndev,
3775 for (i = 0; i < qdev->rss_ring_count; i++) {
3776 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3778 napi_enable(&qdev->rx_ring[i].napi);
3785 static int ql_adapter_reset(struct ql_adapter *qdev)
3792 status = ql_clear_routing_entries(qdev);
3794 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3802 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3805 ql_wait_fifo_empty(qdev);
3807 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3810 value = ql_read32(qdev, RST_FO);
3817 netif_err(qdev, ifdown, qdev->ndev,
3823 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3829 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3831 netif_info(qdev, probe, qdev->ndev,
3834 qdev->func,
3835 qdev->port,
3836 qdev->chip_rev_id & 0x0000000f,
3837 qdev->chip_rev_id >> 4 & 0x0000000f,
3838 qdev->chip_rev_id >> 8 & 0x0000000f,
3839 qdev->chip_rev_id >> 12 & 0x0000000f);
3840 netif_info(qdev, probe, qdev->ndev,
3844 int ql_wol(struct ql_adapter *qdev)
3856 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3858 netif_err(qdev, ifdown, qdev->ndev,
3859 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3860 qdev->wol);
3864 if (qdev->wol & WAKE_MAGIC) {
3865 status = ql_mb_wol_set_magic(qdev, 1);
3867 netif_err(qdev, ifdown, qdev->ndev,
3869 qdev->ndev->name);
3872 netif_info(qdev, drv, qdev->ndev,
3874 qdev->ndev->name);
3879 if (qdev->wol) {
3881 status = ql_mb_wol_mode(qdev, wol);
3882 netif_err(qdev, drv, qdev->ndev,
3885 wol, qdev->ndev->name);
3891 static int ql_adapter_down(struct ql_adapter *qdev)
3895 ql_link_off(qdev);
3900 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3901 cancel_delayed_work_sync(&qdev->asic_reset_work);
3902 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3903 cancel_delayed_work_sync(&qdev->mpi_work);
3904 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3905 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3906 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3908 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi);
3911 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3913 ql_disable_interrupts(qdev);
3915 ql_tx_ring_clean(qdev);
3919 for (i = 0; i < qdev->rss_ring_count; i++)
3920 netif_napi_del(&qdev->rx_ring[i].napi);
3922 status = ql_adapter_reset(qdev);
3924 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3925 qdev->func);
3926 ql_free_rx_buffers(qdev);
3931 static int ql_adapter_up(struct ql_adapter *qdev)
3935 err = ql_adapter_initialize(qdev);
3937 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3940 set_bit(QL_ADAPTER_UP, &qdev->flags);
3941 ql_alloc_rx_buffers(qdev);
3945 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3946 (ql_read32(qdev, STS) & qdev->port_link_up))
3947 ql_link_on(qdev);
3949 clear_bit(QL_ALLMULTI, &qdev->flags);
3950 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3951 qlge_set_multicast_list(qdev->ndev);
3953 ql_enable_interrupts(qdev);
3954 ql_enable_all_completion_interrupts(qdev);
3955 netif_tx_start_all_queues(qdev->ndev);
3959 ql_adapter_reset(qdev);
3963 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3965 ql_free_mem_resources(qdev);
3966 ql_free_irq(qdev);
3969 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3973 if (ql_alloc_mem_resources(qdev)) {
3974 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3977 status = ql_request_irq(qdev);
3983 struct ql_adapter *qdev = netdev_priv(ndev);
3989 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3990 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3991 clear_bit(QL_EEH_FATAL, &qdev->flags);
3999 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4001 ql_adapter_down(qdev);
4002 ql_release_adapter_resources(qdev);
4006 static int ql_configure_rings(struct ql_adapter *qdev)
4012 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4015 qdev->lbq_buf_order = get_order(lbq_buf_len);
4024 qdev->intr_count = cpu_cnt;
4025 ql_enable_msix(qdev);
4027 qdev->rss_ring_count = qdev->intr_count;
4028 qdev->tx_ring_count = cpu_cnt;
4029 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4031 for (i = 0; i < qdev->tx_ring_count; i++) {
4032 tx_ring = &qdev->tx_ring[i];
4034 tx_ring->qdev = qdev;
4036 tx_ring->wq_len = qdev->tx_ring_size;
4044 tx_ring->cq_id = qdev->rss_ring_count + i;
4047 for (i = 0; i < qdev->rx_ring_count; i++) {
4048 rx_ring = &qdev->rx_ring[i];
4050 rx_ring->qdev = qdev;
4053 if (i < qdev->rss_ring_count) {
4057 rx_ring->cq_len = qdev->rx_ring_size;
4064 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4067 qdev->lbq_buf_order);
4078 rx_ring->cq_len = qdev->tx_ring_size;
4096 struct ql_adapter *qdev = netdev_priv(ndev);
4098 err = ql_adapter_reset(qdev);
4102 err = ql_configure_rings(qdev);
4106 err = ql_get_adapter_resources(qdev);
4110 err = ql_adapter_up(qdev);
4117 ql_release_adapter_resources(qdev);
4121 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4128 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4130 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4131 netif_err(qdev, ifup, qdev->ndev,
4137 netif_err(qdev, ifup, qdev->ndev,
4143 status = ql_adapter_down(qdev);
4148 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4150 qdev->lbq_buf_order = get_order(lbq_buf_len);
4152 for (i = 0; i < qdev->rss_ring_count; i++) {
4153 rx_ring = &qdev->rx_ring[i];
4158 status = ql_adapter_up(qdev);
4164 netif_alert(qdev, ifup, qdev->ndev,
4166 set_bit(QL_ADAPTER_UP, &qdev->flags);
4167 dev_close(qdev->ndev);
4173 struct ql_adapter *qdev = netdev_priv(ndev);
4177 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4179 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4183 queue_delayed_work(qdev->workqueue,
4184 &qdev->mpi_port_cfg_work, 3*HZ);
4188 if (!netif_running(qdev->ndev)) {
4192 status = ql_change_rx_buffers(qdev);
4194 netif_err(qdev, ifup, qdev->ndev,
4204 struct ql_adapter *qdev = netdev_priv(ndev);
4205 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4206 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4212 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4227 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4240 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4244 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4252 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4254 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4255 netif_err(qdev, hw, qdev->ndev,
4258 set_bit(QL_PROMISCUOUS, &qdev->flags);
4262 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4265 netif_err(qdev, hw, qdev->ndev,
4268 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4279 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4281 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4282 netif_err(qdev, hw, qdev->ndev,
4285 set_bit(QL_ALLMULTI, &qdev->flags);
4289 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4291 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4292 netif_err(qdev, hw, qdev->ndev,
4295 clear_bit(QL_ALLMULTI, &qdev->flags);
4301 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4306 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4308 netif_err(qdev, hw, qdev->ndev,
4310 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4315 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4317 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4318 netif_err(qdev, hw, qdev->ndev,
4321 set_bit(QL_ALLMULTI, &qdev->flags);
4325 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4330 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4338 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4340 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4343 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4344 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4346 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4347 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4353 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4354 ql_queue_asic_error(qdev);
4359 struct ql_adapter *qdev =
4363 status = ql_adapter_down(qdev);
4367 status = ql_adapter_up(qdev);
4372 clear_bit(QL_ALLMULTI, &qdev->flags);
4373 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4374 qlge_set_multicast_list(qdev->ndev);
4379 netif_alert(qdev, ifup, qdev->ndev,
4382 set_bit(QL_ADAPTER_UP, &qdev->flags);
4383 dev_close(qdev->ndev);
4404 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4410 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4420 if (qdev->func == nic_func1)
4421 qdev->alt_func = nic_func2;
4422 else if (qdev->func == nic_func2)
4423 qdev->alt_func = nic_func1;
4430 static int ql_get_board_info(struct ql_adapter *qdev)
4433 qdev->func =
4434 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4435 if (qdev->func > 3)
4438 status = ql_get_alt_pcie_func(qdev);
4442 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4443 if (qdev->port) {
4444 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4445 qdev->port_link_up = STS_PL1;
4446 qdev->port_init = STS_PI1;
4447 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4448 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4450 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4451 qdev->port_link_up = STS_PL0;
4452 qdev->port_init = STS_PI0;
4453 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4454 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4456 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4457 qdev->device_id = qdev->pdev->device;
4458 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4459 qdev->nic_ops = &qla8012_nic_ops;
4460 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4461 qdev->nic_ops = &qla8000_nic_ops;
4468 struct ql_adapter *qdev = netdev_priv(ndev);
4470 if (qdev->workqueue) {
4471 destroy_workqueue(qdev->workqueue);
4472 qdev->workqueue = NULL;
4475 if (qdev->reg_base)
4476 iounmap(qdev->reg_base);
4477 if (qdev->doorbell_area)
4478 iounmap(qdev->doorbell_area);
4479 vfree(qdev->mpi_coredump);
4487 struct ql_adapter *qdev = netdev_priv(ndev);
4490 memset((void *)qdev, 0, sizeof(*qdev));
4497 qdev->ndev = ndev;
4498 qdev->pdev = pdev;
4516 set_bit(QL_DMA64, &qdev->flags);
4532 qdev->reg_base =
4535 if (!qdev->reg_base) {
4541 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4542 qdev->doorbell_area =
4545 if (!qdev->doorbell_area) {
4551 err = ql_get_board_info(qdev);
4557 qdev->msg_enable = netif_msg_init(debug, default_msg);
4558 spin_lock_init(&qdev->hw_lock);
4559 spin_lock_init(&qdev->stats_lock);
4562 qdev->mpi_coredump =
4564 if (qdev->mpi_coredump == NULL) {
4570 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4573 err = qdev->nic_ops->get_flash(qdev);
4581 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4584 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4585 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4588 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4589 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4590 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4591 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4596 qdev->rx_csum = 1;
4597 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4598 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4599 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4601 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4602 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4603 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4604 init_completion(&qdev->ide_completion);
4636 struct ql_adapter *qdev = (struct ql_adapter *)data;
4639 var = ql_read32(qdev, STS);
4640 if (pci_channel_offline(qdev->pdev)) {
4641 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4645 mod_timer(&qdev->timer, jiffies + (5*HZ));
4652 struct ql_adapter *qdev = NULL;
4667 qdev = netdev_priv(ndev);
4679 if (test_bit(QL_DMA64, &qdev->flags))
4685 ndev->tx_queue_len = qdev->tx_ring_size;
4702 init_timer_deferrable(&qdev->timer);
4703 qdev->timer.data = (unsigned long)qdev;
4704 qdev->timer.function = ql_timer;
4705 qdev->timer.expires = jiffies + (5*HZ);
4706 add_timer(&qdev->timer);
4707 ql_link_off(qdev);
4709 atomic_set(&qdev->lb_count, 0);
4727 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer);
4739 struct ql_adapter *qdev = netdev_priv(ndev);
4747 del_timer_sync(&qdev->timer);
4748 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4749 cancel_delayed_work_sync(&qdev->asic_reset_work);
4750 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4751 cancel_delayed_work_sync(&qdev->mpi_work);
4752 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4753 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4754 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4756 for (i = 0; i < qdev->rss_ring_count; i++)
4757 netif_napi_del(&qdev->rx_ring[i].napi);
4759 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4760 ql_tx_ring_clean(qdev);
4761 ql_free_rx_buffers(qdev);
4762 ql_release_adapter_resources(qdev);
4773 struct ql_adapter *qdev = netdev_priv(ndev);
4788 set_bit(QL_EEH_FATAL, &qdev->flags);
4805 struct ql_adapter *qdev = netdev_priv(ndev);
4811 netif_err(qdev, ifup, qdev->ndev,
4817 if (ql_adapter_reset(qdev)) {
4818 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4819 set_bit(QL_EEH_FATAL, &qdev->flags);
4829 struct ql_adapter *qdev = netdev_priv(ndev);
4835 netif_err(qdev, ifup, qdev->ndev,
4840 netif_err(qdev, ifup, qdev->ndev,
4843 mod_timer(&qdev->timer, jiffies + (5*HZ));
4856 struct ql_adapter *qdev = netdev_priv(ndev);
4860 del_timer_sync(&qdev->timer);
4863 err = ql_adapter_down(qdev);
4868 ql_wol(qdev);
4884 struct ql_adapter *qdev = netdev_priv(ndev);
4891 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4900 err = ql_adapter_up(qdev);
4905 mod_timer(&qdev->timer, jiffies + (5*HZ));