• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/enic/

Lines Matching defs:enic

46 #include "enic.h"
120 static int enic_is_dynamic(struct enic *enic)
122 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
128 struct enic *enic = netdev_priv(netdev);
136 ecmd->speed = vnic_dev_port_speed(enic->vdev);
148 static int enic_dev_fw_info(struct enic *enic,
153 spin_lock(&enic->devcmd_lock);
154 err = vnic_dev_fw_info(enic->vdev, fw_info);
155 spin_unlock(&enic->devcmd_lock);
163 struct enic *enic = netdev_priv(netdev);
166 enic_dev_fw_info(enic, &fw_info);
172 strncpy(drvinfo->bus_info, pci_name(enic->pdev),
204 static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
208 spin_lock(&enic->devcmd_lock);
209 err = vnic_dev_stats_dump(enic->vdev, vstats);
210 spin_unlock(&enic->devcmd_lock);
218 struct enic *enic = netdev_priv(netdev);
222 enic_dev_stats_dump(enic, &vstats);
232 struct enic *enic = netdev_priv(netdev);
233 return enic->csum_rx_enabled;
238 struct enic *enic = netdev_priv(netdev);
240 if (data && !ENIC_SETTING(enic, RXCSUM))
243 enic->csum_rx_enabled = !!data;
250 struct enic *enic = netdev_priv(netdev);
252 if (data && !ENIC_SETTING(enic, TXCSUM))
265 struct enic *enic = netdev_priv(netdev);
267 if (data && !ENIC_SETTING(enic, TSO))
282 struct enic *enic = netdev_priv(netdev);
283 return enic->msg_enable;
288 struct enic *enic = netdev_priv(netdev);
289 enic->msg_enable = value;
295 struct enic *enic = netdev_priv(netdev);
297 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
298 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
306 struct enic *enic = netdev_priv(netdev);
317 switch (vnic_dev_get_intr_mode(enic->vdev)) {
322 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
329 vnic_intr_coalescing_timer_set(&enic->intr[0],
333 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
335 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
342 enic->tx_coalesce_usecs = tx_coalesce_usecs;
343 enic->rx_coalesce_usecs = rx_coalesce_usecs;
372 struct enic *enic = vnic_dev_priv(wq->vdev);
375 pci_unmap_single(enic->pdev, buf->dma_addr,
378 pci_unmap_page(enic->pdev, buf->dma_addr,
394 struct enic *enic = vnic_dev_priv(vdev);
396 spin_lock(&enic->wq_lock[q_number]);
398 vnic_wq_service(&enic->wq[q_number], cq_desc,
402 if (netif_queue_stopped(enic->netdev) &&
403 vnic_wq_desc_avail(&enic->wq[q_number]) >=
405 netif_wake_queue(enic->netdev);
407 spin_unlock(&enic->wq_lock[q_number]);
412 static void enic_log_q_error(struct enic *enic)
417 for (i = 0; i < enic->wq_count; i++) {
418 error_status = vnic_wq_error_status(&enic->wq[i]);
420 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
424 for (i = 0; i < enic->rq_count; i++) {
425 error_status = vnic_rq_error_status(&enic->rq[i]);
427 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
432 static void enic_msglvl_check(struct enic *enic)
434 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
436 if (msg_enable != enic->msg_enable) {
437 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
438 enic->msg_enable, msg_enable);
439 enic->msg_enable = msg_enable;
443 static void enic_mtu_check(struct enic *enic)
445 u32 mtu = vnic_dev_mtu(enic->vdev);
446 struct net_device *netdev = enic->netdev;
448 if (mtu && mtu != enic->port_mtu) {
449 enic->port_mtu = mtu;
458 static void enic_link_check(struct enic *enic)
460 int link_status = vnic_dev_link_status(enic->vdev);
461 int carrier_ok = netif_carrier_ok(enic->netdev);
464 netdev_info(enic->netdev, "Link UP\n");
465 netif_carrier_on(enic->netdev);
467 netdev_info(enic->netdev, "Link DOWN\n");
468 netif_carrier_off(enic->netdev);
472 static void enic_notify_check(struct enic *enic)
474 enic_msglvl_check(enic);
475 enic_mtu_check(enic);
476 enic_link_check(enic);
484 struct enic *enic = netdev_priv(netdev);
487 vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);
489 pba = vnic_intr_legacy_pba(enic->legacy_pba);
491 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
496 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]);
497 enic_notify_check(enic);
501 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]);
502 enic_log_q_error(enic);
504 schedule_work(&enic->reset);
509 if (napi_schedule_prep(&enic->napi))
510 __napi_schedule(&enic->napi);
512 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
520 struct enic *enic = data;
538 napi_schedule(&enic->napi);
545 struct enic *enic = data;
548 napi_schedule(&enic->napi);
555 struct enic *enic = data;
559 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
562 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ],
572 struct enic *enic = data;
574 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]);
576 enic_log_q_error(enic);
579 schedule_work(&enic->reset);
586 struct enic *enic = data;
588 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]);
589 enic_notify_check(enic);
594 static inline void enic_queue_wq_skb_cont(struct enic *enic,
604 pci_map_page(enic->pdev, frag->page,
613 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
627 pci_map_single(enic->pdev, skb->data,
634 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
637 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
653 pci_map_single(enic->pdev, skb->data,
662 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
665 static inline void enic_queue_wq_skb_tso(struct enic *enic,
697 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
723 dma_addr = pci_map_page(enic->pdev, frag->page,
738 static inline void enic_queue_wq_skb(struct enic *enic,
746 if (enic->vlan_group && vlan_tx_tag_present(skb)) {
750 } else if (enic->loop_enable) {
751 vlan_tag = enic->loop_tag;
756 enic_queue_wq_skb_tso(enic, wq, skb, mss,
759 enic_queue_wq_skb_csum_l4(enic, wq, skb,
762 enic_queue_wq_skb_vlan(enic, wq, skb,
770 struct enic *enic = netdev_priv(netdev);
771 struct vnic_wq *wq = &enic->wq[0];
791 spin_lock_irqsave(&enic->wq_lock[0], flags);
798 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
802 enic_queue_wq_skb(enic, wq, skb);
807 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
815 struct enic *enic = netdev_priv(netdev);
819 enic_dev_stats_dump(enic, &stats);
830 net_stats->rx_over_errors = enic->rq_truncated_pkts;
831 net_stats->rx_crc_errors = enic->rq_bad_fcs;
837 static void enic_reset_multicast_list(struct enic *enic)
839 enic->mc_count = 0;
840 enic->flags = 0;
845 struct enic *enic = netdev_priv(netdev);
847 if (enic_is_dynamic(enic)) {
860 static int enic_dev_add_station_addr(struct enic *enic)
864 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
865 spin_lock(&enic->devcmd_lock);
866 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
867 spin_unlock(&enic->devcmd_lock);
873 static int enic_dev_del_station_addr(struct enic *enic)
877 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
878 spin_lock(&enic->devcmd_lock);
879 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
880 spin_unlock(&enic->devcmd_lock);
888 struct enic *enic = netdev_priv(netdev);
893 if (netif_running(enic->netdev)) {
894 err = enic_dev_del_station_addr(enic);
903 if (netif_running(enic->netdev)) {
904 err = enic_dev_add_station_addr(enic);
917 static int enic_dev_packet_filter(struct enic *enic, int directed,
922 spin_lock(&enic->devcmd_lock);
923 err = vnic_dev_packet_filter(enic->vdev, directed,
925 spin_unlock(&enic->devcmd_lock);
930 static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
934 spin_lock(&enic->devcmd_lock);
935 err = vnic_dev_add_addr(enic->vdev, addr);
936 spin_unlock(&enic->devcmd_lock);
941 static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
945 spin_lock(&enic->devcmd_lock);
946 err = vnic_dev_del_addr(enic->vdev, addr);
947 spin_unlock(&enic->devcmd_lock);
955 struct enic *enic = netdev_priv(netdev);
971 if (enic->flags != flags) {
972 enic->flags = flags;
973 enic_dev_packet_filter(enic, directed,
979 * addrs from the last call in enic->mc_addr and
990 for (i = 0; i < enic->mc_count; i++) {
992 if (compare_ether_addr(enic->mc_addr[i],
996 enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
1000 for (j = 0; j < enic->mc_count; j++)
1002 enic->mc_addr[j]) == 0)
1004 if (j == enic->mc_count)
1005 enic_dev_add_multicast_addr(enic, mc_addr[i]);
1012 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
1014 enic->mc_count = mc_count;
1021 struct enic *enic = netdev_priv(netdev);
1022 enic->vlan_group = vlan_group;
1028 struct enic *enic = netdev_priv(netdev);
1030 spin_lock(&enic->devcmd_lock);
1031 enic_add_vlan(enic, vid);
1032 spin_unlock(&enic->devcmd_lock);
1038 struct enic *enic = netdev_priv(netdev);
1040 spin_lock(&enic->devcmd_lock);
1041 enic_del_vlan(enic, vid);
1042 spin_unlock(&enic->devcmd_lock);
1048 struct enic *enic = netdev_priv(netdev);
1049 schedule_work(&enic->reset);
1052 static int enic_vnic_dev_deinit(struct enic *enic)
1056 spin_lock(&enic->devcmd_lock);
1057 err = vnic_dev_deinit(enic->vdev);
1058 spin_unlock(&enic->devcmd_lock);
1063 static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
1067 spin_lock(&enic->devcmd_lock);
1068 err = vnic_dev_init_prov(enic->vdev,
1070 spin_unlock(&enic->devcmd_lock);
1075 static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1079 spin_lock(&enic->devcmd_lock);
1080 err = vnic_dev_init_done(enic->vdev, done, error);
1081 spin_unlock(&enic->devcmd_lock);
1086 static int enic_set_port_profile(struct enic *enic, u8 *mac)
1093 err = enic_vnic_dev_deinit(enic);
1097 switch (enic->pp.request) {
1101 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1114 strlen(enic->pp.name) + 1, enic->pp.name);
1120 if (enic->pp.set & ENIC_SET_INSTANCE) {
1121 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1127 if (enic->pp.set & ENIC_SET_HOST) {
1128 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1134 err = enic_dev_init_prov(enic, vp);
1147 enic->pp.set |= ENIC_SET_APPLIED;
1154 struct enic *enic = netdev_priv(netdev);
1156 memset(&enic->pp, 0, sizeof(enic->pp));
1159 enic->pp.set |= ENIC_SET_REQUEST;
1160 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1164 enic->pp.set |= ENIC_SET_NAME;
1165 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1170 enic->pp.set |= ENIC_SET_INSTANCE;
1171 memcpy(enic->pp.instance_uuid,
1176 enic->pp.set |= ENIC_SET_HOST;
1177 memcpy(enic->pp.host_uuid,
1185 if (!(enic->pp.set & ENIC_SET_REQUEST))
1188 if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
1199 return enic_set_port_profile(enic, netdev->dev_addr);
1205 struct enic *enic = netdev_priv(netdev);
1209 if (!(enic->pp.set & ENIC_SET_APPLIED))
1212 err = enic_dev_init_done(enic, &done, &error);
1235 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1237 if (enic->pp.set & ENIC_SET_NAME)
1239 enic->pp.name);
1240 if (enic->pp.set & ENIC_SET_INSTANCE)
1242 enic->pp.instance_uuid);
1243 if (enic->pp.set & ENIC_SET_HOST)
1245 enic->pp.host_uuid);
1255 struct enic *enic = vnic_dev_priv(rq->vdev);
1260 pci_unmap_single(enic->pdev, buf->dma_addr,
1267 struct enic *enic = vnic_dev_priv(rq->vdev);
1268 struct net_device *netdev = enic->netdev;
1278 dma_addr = pci_map_single(enic->pdev, skb->data,
1303 static int enic_dev_hw_version(struct enic *enic,
1308 spin_lock(&enic->devcmd_lock);
1309 err = vnic_dev_hw_version(enic->vdev, hw_ver);
1310 spin_unlock(&enic->devcmd_lock);
1315 static int enic_set_rq_alloc_buf(struct enic *enic)
1320 err = enic_dev_hw_version(enic, &hw_ver);
1326 enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
1330 enic->rq_alloc_buf = enic_rq_alloc_buf;
1343 struct enic *enic = vnic_dev_priv(rq->vdev);
1344 struct net_device *netdev = enic->netdev;
1360 pci_unmap_single(enic->pdev, buf->dma_addr,
1377 enic->rq_bad_fcs++;
1379 enic->rq_truncated_pkts++;
1395 if (enic->csum_rx_enabled && !csum_not_calc) {
1402 if (enic->vlan_group && vlan_stripped &&
1406 vlan_gro_receive(&enic->napi, enic->vlan_group,
1410 enic->vlan_group, vlan_tci);
1415 napi_gro_receive(&enic->napi, skb);
1433 struct enic *enic = vnic_dev_priv(vdev);
1435 vnic_rq_service(&enic->rq[q_number], cq_desc,
1444 struct enic *enic = container_of(napi, struct enic, napi);
1453 rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1456 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1467 vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
1472 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1488 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
1496 struct enic *enic = container_of(napi, struct enic, napi);
1504 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1513 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1518 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1534 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1542 struct enic *enic = (struct enic *)data;
1544 enic_notify_check(enic);
1546 mod_timer(&enic->notify_timer,
1550 static void enic_free_intr(struct enic *enic)
1552 struct net_device *netdev = enic->netdev;
1555 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1557 free_irq(enic->pdev->irq, netdev);
1560 free_irq(enic->pdev->irq, enic);
1563 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1564 if (enic->msix[i].requested)
1565 free_irq(enic->msix_entry[i].vector,
1566 enic->msix[i].devid);
1573 static int enic_request_intr(struct enic *enic)
1575 struct net_device *netdev = enic->netdev;
1579 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1583 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1589 err = request_irq(enic->pdev->irq, enic_isr_msi,
1590 0, netdev->name, enic);
1595 sprintf(enic->msix[ENIC_MSIX_RQ].devname,
1597 enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq;
1598 enic->msix[ENIC_MSIX_RQ].devid = enic;
1600 sprintf(enic->msix[ENIC_MSIX_WQ].devname,
1602 enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq;
1603 enic->msix[ENIC_MSIX_WQ].devid = enic;
1605 sprintf(enic->msix[ENIC_MSIX_ERR].devname,
1607 enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err;
1608 enic->msix[ENIC_MSIX_ERR].devid = enic;
1610 sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname,
1612 enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify;
1613 enic->msix[ENIC_MSIX_NOTIFY].devid = enic;
1615 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) {
1616 err = request_irq(enic->msix_entry[i].vector,
1617 enic->msix[i].isr, 0,
1618 enic->msix[i].devname,
1619 enic->msix[i].devid);
1621 enic_free_intr(enic);
1624 enic->msix[i].requested = 1;
1636 static void enic_synchronize_irqs(struct enic *enic)
1640 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1643 synchronize_irq(enic->pdev->irq);
1646 for (i = 0; i < enic->intr_count; i++)
1647 synchronize_irq(enic->msix_entry[i].vector);
1654 static int enic_dev_notify_set(struct enic *enic)
1658 spin_lock(&enic->devcmd_lock);
1659 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1661 err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY);
1664 err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY);
1667 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1670 spin_unlock(&enic->devcmd_lock);
1675 static int enic_dev_notify_unset(struct enic *enic)
1679 spin_lock(&enic->devcmd_lock);
1680 err = vnic_dev_notify_unset(enic->vdev);
1681 spin_unlock(&enic->devcmd_lock);
1686 static int enic_dev_enable(struct enic *enic)
1690 spin_lock(&enic->devcmd_lock);
1691 err = vnic_dev_enable(enic->vdev);
1692 spin_unlock(&enic->devcmd_lock);
1697 static int enic_dev_disable(struct enic *enic)
1701 spin_lock(&enic->devcmd_lock);
1702 err = vnic_dev_disable(enic->vdev);
1703 spin_unlock(&enic->devcmd_lock);
1708 static void enic_notify_timer_start(struct enic *enic)
1710 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1712 mod_timer(&enic->notify_timer, jiffies);
1723 struct enic *enic = netdev_priv(netdev);
1727 err = enic_request_intr(enic);
1733 err = enic_dev_notify_set(enic);
1740 for (i = 0; i < enic->rq_count; i++) {
1741 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
1743 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1750 for (i = 0; i < enic->wq_count; i++)
1751 vnic_wq_enable(&enic->wq[i]);
1752 for (i = 0; i < enic->rq_count; i++)
1753 vnic_rq_enable(&enic->rq[i]);
1755 enic_dev_add_station_addr(enic);
1759 napi_enable(&enic->napi);
1760 enic_dev_enable(enic);
1762 for (i = 0; i < enic->intr_count; i++)
1763 vnic_intr_unmask(&enic->intr[i]);
1765 enic_notify_timer_start(enic);
1770 enic_dev_notify_unset(enic);
1772 enic_free_intr(enic);
1780 struct enic *enic = netdev_priv(netdev);
1784 for (i = 0; i < enic->intr_count; i++) {
1785 vnic_intr_mask(&enic->intr[i]);
1786 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1789 enic_synchronize_irqs(enic);
1791 del_timer_sync(&enic->notify_timer);
1793 enic_dev_disable(enic);
1794 napi_disable(&enic->napi);
1797 enic_dev_del_station_addr(enic);
1799 for (i = 0; i < enic->wq_count; i++) {
1800 err = vnic_wq_disable(&enic->wq[i]);
1804 for (i = 0; i < enic->rq_count; i++) {
1805 err = vnic_rq_disable(&enic->rq[i]);
1810 enic_dev_notify_unset(enic);
1811 enic_free_intr(enic);
1813 for (i = 0; i < enic->wq_count; i++)
1814 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1815 for (i = 0; i < enic->rq_count; i++)
1816 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1817 for (i = 0; i < enic->cq_count; i++)
1818 vnic_cq_clean(&enic->cq[i]);
1819 for (i = 0; i < enic->intr_count; i++)
1820 vnic_intr_clean(&enic->intr[i]);
1827 struct enic *enic = netdev_priv(netdev);
1838 if (netdev->mtu > enic->port_mtu)
1841 netdev->mtu, enic->port_mtu);
1852 struct enic *enic = netdev_priv(netdev);
1853 struct vnic_dev *vdev = enic->vdev;
1857 enic_isr_msix_rq(enic->pdev->irq, enic);
1858 enic_isr_msix_wq(enic->pdev->irq, enic);
1861 enic_isr_msi(enic->pdev->irq, enic);
1864 enic_isr_legacy(enic->pdev->irq, netdev);
1907 static int enic_dev_open(struct enic *enic)
1911 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1914 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1920 static int enic_dev_hang_reset(struct enic *enic)
1924 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1927 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1933 static int enic_set_niccfg(struct enic *enic)
1947 spin_lock(&enic->devcmd_lock);
1948 err = enic_set_nic_cfg(enic,
1953 spin_unlock(&enic->devcmd_lock);
1958 static int enic_dev_hang_notify(struct enic *enic)
1962 spin_lock(&enic->devcmd_lock);
1963 err = vnic_dev_hang_notify(enic->vdev);
1964 spin_unlock(&enic->devcmd_lock);
1969 int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
1973 spin_lock(&enic->devcmd_lock);
1974 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1976 spin_unlock(&enic->devcmd_lock);
1983 struct enic *enic = container_of(work, struct enic, reset);
1985 if (!netif_running(enic->netdev))
1990 enic_dev_hang_notify(enic);
1991 enic_stop(enic->netdev);
1992 enic_dev_hang_reset(enic);
1993 enic_reset_multicast_list(enic);
1994 enic_init_vnic_resources(enic);
1995 enic_set_niccfg(enic);
1996 enic_dev_set_ig_vlan_rewrite_mode(enic);
1997 enic_open(enic->netdev);
2002 static int enic_set_intr_mode(struct enic *enic)
2018 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2020 enic->msix_entry[i].entry = i;
2022 if (enic->config.intr_mode < 1 &&
2023 enic->rq_count >= n &&
2024 enic->wq_count >= m &&
2025 enic->cq_count >= n + m &&
2026 enic->intr_count >= n + m + 2 &&
2027 !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2029 enic->rq_count = n;
2030 enic->wq_count = m;
2031 enic->cq_count = n + m;
2032 enic->intr_count = n + m + 2;
2034 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
2044 if (enic->config.intr_mode < 2 &&
2045 enic->rq_count >= 1 &&
2046 enic->wq_count >= 1 &&
2047 enic->cq_count >= 2 &&
2048 enic->intr_count >= 1 &&
2049 !pci_enable_msi(enic->pdev)) {
2051 enic->rq_count = 1;
2052 enic->wq_count = 1;
2053 enic->cq_count = 2;
2054 enic->intr_count = 1;
2056 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2069 if (enic->config.intr_mode < 3 &&
2070 enic->rq_count >= 1 &&
2071 enic->wq_count >= 1 &&
2072 enic->cq_count >= 2 &&
2073 enic->intr_count >= 3) {
2075 enic->rq_count = 1;
2076 enic->wq_count = 1;
2077 enic->cq_count = 2;
2078 enic->intr_count = 3;
2080 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2085 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2090 static void enic_clear_intr_mode(struct enic *enic)
2092 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2094 pci_disable_msix(enic->pdev);
2097 pci_disable_msi(enic->pdev);
2103 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2144 void enic_dev_deinit(struct enic *enic)
2146 netif_napi_del(&enic->napi);
2147 enic_free_vnic_resources(enic);
2148 enic_clear_intr_mode(enic);
2151 static int enic_dev_stats_clear(struct enic *enic)
2155 spin_lock(&enic->devcmd_lock);
2156 err = vnic_dev_stats_clear(enic->vdev);
2157 spin_unlock(&enic->devcmd_lock);
2162 int enic_dev_init(struct enic *enic)
2164 struct device *dev = enic_get_dev(enic);
2165 struct net_device *netdev = enic->netdev;
2171 err = enic_get_vnic_config(enic);
2180 enic_get_res_counts(enic);
2186 err = enic_set_intr_mode(enic);
2196 err = enic_alloc_vnic_resources(enic);
2202 enic_init_vnic_resources(enic);
2206 enic_dev_stats_clear(enic);
2208 err = enic_set_rq_alloc_buf(enic);
2214 err = enic_set_niccfg(enic);
2220 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2227 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2229 netif_napi_add(netdev, &enic->napi, enic_poll, 64);
2232 netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64);
2239 enic_clear_intr_mode(enic);
2240 enic_free_vnic_resources(enic);
2245 static void enic_iounmap(struct enic *enic)
2249 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2250 if (enic->bar[i].vaddr)
2251 iounmap(enic->bar[i].vaddr);
2259 struct enic *enic;
2268 netdev = alloc_etherdev(sizeof(struct enic));
2278 enic = netdev_priv(netdev);
2279 enic->netdev = netdev;
2280 enic->pdev = pdev;
2330 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2333 enic->bar[i].len = pci_resource_len(pdev, i);
2334 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2335 if (!enic->bar[i].vaddr) {
2340 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2346 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2347 ARRAY_SIZE(enic->bar));
2348 if (!enic->vdev) {
2357 err = enic_dev_open(enic);
2378 if (!enic_is_dynamic(enic)) {
2379 err = vnic_dev_init(enic->vdev, 0);
2389 spin_lock_init(&enic->devcmd_lock);
2391 err = enic_dev_init(enic);
2400 init_timer(&enic->notify_timer);
2401 enic->notify_timer.function = enic_notify_timer;
2402 enic->notify_timer.data = (unsigned long)enic;
2404 INIT_WORK(&enic->reset, enic_reset);
2406 for (i = 0; i < enic->wq_count; i++)
2407 spin_lock_init(&enic->wq_lock[i]);
2412 enic->port_mtu = enic->config.mtu;
2413 (void)enic_change_mtu(netdev, enic->port_mtu);
2415 err = enic_set_mac_addr(netdev, enic->mac_addr);
2421 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2422 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2424 if (enic_is_dynamic(enic))
2433 if (ENIC_SETTING(enic, LOOP)) {
2435 enic->loop_enable = 1;
2436 enic->loop_tag = enic->config.loop_tag;
2437 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2439 if (ENIC_SETTING(enic, TXCSUM))
2441 if (ENIC_SETTING(enic, TSO))
2444 if (ENIC_SETTING(enic, LRO))
2449 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2460 enic_dev_deinit(enic);
2462 vnic_dev_close(enic->vdev);
2464 vnic_dev_unregister(enic->vdev);
2466 enic_iounmap(enic);
2483 struct enic *enic = netdev_priv(netdev);
2487 enic_dev_deinit(enic);
2488 vnic_dev_close(enic->vdev);
2489 vnic_dev_unregister(enic->vdev);
2490 enic_iounmap(enic);