Lines Matching refs:lio

263 	struct lio *lio = GET_LIO(netdev);
265 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
266 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
267 struct oct_link_info *linfo = &lio->linfo;
270 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
274 netif_info(lio, link, lio->netdev, "Link Down\n");
286 struct lio *lio = (struct lio *)wk->ctxptr;
288 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
293 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
303 struct lio *lio = GET_LIO(netdev);
304 struct octeon_device *oct = lio->oct_dev;
306 lio->link_status_wq.wq = alloc_workqueue("link-status",
308 if (!lio->link_status_wq.wq) {
312 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
314 lio->link_status_wq.wk.ctxptr = lio;
321 struct lio *lio = GET_LIO(netdev);
323 if (lio->link_status_wq.wq) {
324 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
325 destroy_workqueue(lio->link_status_wq.wq);
340 struct lio *lio = GET_LIO(netdev);
341 int current_max_mtu = lio->linfo.link.s.mtu;
342 struct octeon_device *oct = lio->oct_dev;
344 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
345 lio->linfo.link.u64 = ls->u64;
348 lio->link_changes++;
350 if (lio->linfo.link.s.link_up) {
358 if (lio->linfo.link.s.mtu != current_max_mtu) {
361 current_max_mtu, lio->linfo.link.s.mtu);
362 netdev->max_mtu = lio->linfo.link.s.mtu;
365 if (lio->linfo.link.s.mtu < netdev->mtu) {
368 netdev->mtu, lio->linfo.link.s.mtu);
369 queue_delayed_work(lio->link_status_wq.wq,
370 &lio->link_status_wq.wk.work, 0);
592 * @lio: per-network private data
595 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
597 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
602 if (oct->props[lio->ifidx].rx_on == start_stop)
609 netif_info(lio, rx_err, lio->netdev,
622 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
632 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
642 oct->props[lio->ifidx].rx_on = start_stop;
662 struct lio *lio;
670 lio = GET_LIO(netdev);
674 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
677 if (oct->props[lio->ifidx].napi_enabled == 1) {
681 oct->props[lio->ifidx].napi_enabled = 0;
692 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
699 lio_delete_glists(lio);
714 struct lio *lio;
728 lio = GET_LIO(oct->props[i].netdev);
731 lio->linfo.rxpciq[j].s.q_no);
804 struct lio *lio;
808 lio = finfo->lio;
810 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
826 struct lio *lio;
830 lio = finfo->lio;
834 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
842 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
848 iq = skb_iq(lio->oct_dev, skb);
850 spin_lock(&lio->glist_lock[iq]);
851 list_add_tail(&g->list, &lio->glist[iq]);
852 spin_unlock(&lio->glist_lock[iq]);
868 struct lio *lio;
874 lio = finfo->lio;
878 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
886 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
892 iq = skb_iq(lio->oct_dev, skb);
894 spin_lock(&lio->glist_lock[iq]);
895 list_add_tail(&g->list, &lio->glist[iq]);
896 spin_unlock(&lio->glist_lock[iq]);
907 struct lio *lio = GET_LIO(netdev);
908 struct octeon_device *oct = lio->oct_dev;
913 if (!oct->props[lio->ifidx].napi_enabled) {
919 oct->props[lio->ifidx].napi_enabled = 1;
924 ifstate_set(lio, LIO_IFSTATE_RUNNING);
927 lio->intf_open = 1;
929 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
932 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
933 lio->stats_wk.ctxptr = lio;
934 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
938 ret = send_rx_ctrl_cmd(lio, 1);
953 struct lio *lio = GET_LIO(netdev);
954 struct octeon_device *oct = lio->oct_dev;
960 ret = send_rx_ctrl_cmd(lio, 0);
964 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
966 lio->intf_open = 0;
967 lio->linfo.link.s.link_up = 0;
970 lio->link_changes++;
972 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
978 netif_info(lio, rx_err, lio->netdev,
981 if (oct->props[lio->ifidx].napi_enabled == 1) {
985 oct->props[lio->ifidx].napi_enabled = 0;
992 cancel_delayed_work_sync(&lio->stats_wk.work);
1034 struct lio *lio = GET_LIO(netdev);
1035 struct octeon_device *oct = lio->oct_dev;
1040 if (lio->netdev_uc_count == netdev_uc_count(netdev))
1048 lio->netdev_uc_count = netdev_uc_count(netdev);
1052 nctrl.ncmd.s.more = lio->netdev_uc_count;
1054 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1065 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1075 struct lio *lio = GET_LIO(netdev);
1076 struct octeon_device *oct = lio->oct_dev;
1103 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1109 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1126 struct lio *lio = GET_LIO(netdev);
1127 struct octeon_device *oct = lio->oct_dev;
1137 if (lio->linfo.macaddr_is_admin_asgnd)
1146 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1153 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1166 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1175 struct lio *lio = GET_LIO(netdev);
1182 oct = lio->oct_dev;
1184 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1188 iq_no = lio->linfo.txpciq[i].s.q_no;
1204 oq_no = lio->linfo.rxpciq[i].s.q_no;
1245 struct lio *lio = GET_LIO(netdev);
1284 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1287 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1314 struct lio *lio;
1317 lio = finfo->lio;
1319 oct = lio->oct_dev;
1334 netif_info(lio, tx_done, lio->netdev,
1337 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1357 struct lio *lio;
1361 lio = finfo->lio;
1393 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1417 struct lio *lio;
1423 lio = GET_LIO(netdev);
1424 oct = lio->oct_dev;
1426 q_idx = skb_iq(lio->oct_dev, skb);
1428 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1435 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1436 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1437 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1438 lio->linfo.link.s.link_up);
1446 finfo->lio = lio;
1459 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1506 spin_lock(&lio->glist_lock[q_idx]);
1508 lio_list_delete_head(&lio->glist[q_idx]);
1509 spin_unlock(&lio->glist_lock[q_idx]);
1512 netif_info(lio, tx_err, lio->netdev,
1598 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1618 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1637 struct lio *lio;
1639 lio = GET_LIO(netdev);
1641 netif_info(lio, tx_err, lio->netdev,
1652 struct lio *lio = GET_LIO(netdev);
1653 struct octeon_device *oct = lio->oct_dev;
1662 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1666 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1680 struct lio *lio = GET_LIO(netdev);
1681 struct octeon_device *oct = lio->oct_dev;
1690 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1694 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1714 struct lio *lio = GET_LIO(netdev);
1715 struct octeon_device *oct = lio->oct_dev;
1724 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1728 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1749 struct lio *lio = GET_LIO(netdev);
1750 struct octeon_device *oct = lio->oct_dev;
1760 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1764 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1812 struct lio *lio = netdev_priv(netdev);
1815 !(lio->dev_capability & NETIF_F_RXCSUM))
1819 !(lio->dev_capability & NETIF_F_HW_CSUM))
1822 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1825 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1828 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1833 (lio->dev_capability & NETIF_F_LRO))
1846 struct lio *lio = netdev_priv(netdev);
1851 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1855 (lio->dev_capability & NETIF_F_LRO))
1859 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1864 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1941 struct lio *lio = NULL;
2053 lio = GET_LIO(netdev);
2055 memset(lio, 0, sizeof(struct lio));
2057 lio->ifidx = ifidx_or_pfnum;
2063 lio->linfo.num_rxpciq = num_oqueues;
2064 lio->linfo.num_txpciq = num_iqueues;
2067 lio->linfo.rxpciq[j].u64 =
2071 lio->linfo.txpciq[j].u64 =
2075 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2076 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2077 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2078 lio->linfo.macaddr_is_admin_asgnd =
2080 lio->linfo.macaddr_spoofchk =
2083 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2085 lio->dev_capability = NETIF_F_HIGHDMA
2096 lio->enc_dev_capability = NETIF_F_IP_CSUM
2105 (lio->enc_dev_capability & ~NETIF_F_LRO);
2108 netdev->vlan_features = lio->dev_capability;
2110 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2114 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2116 netdev->hw_features = lio->dev_capability;
2128 lio->oct_dev = octeon_dev;
2129 lio->octprops = props;
2130 lio->netdev = netdev;
2134 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2137 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2139 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2145 lio->linfo.num_txpciq,
2146 lio->linfo.num_rxpciq)) {
2151 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2162 lio->txq = lio->linfo.txpciq[0].s.q_no;
2163 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2165 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2166 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2168 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2176 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2201 lio->link_changes++;
2203 ifstate_set(lio, LIO_IFSTATE_REGISTERED);