Lines Matching refs:vsi

121 	struct ixl_vsi		*vsi = &pf->vsi;
122 struct ixl_queue *que = vsi->queues;
124 vsi->tx_itr_setting = pf->tx_itr;
126 for (int i = 0; i < vsi->num_queues; i++, que++) {
130 vsi->tx_itr_setting);
131 txr->itr = vsi->tx_itr_setting;
140 struct ixl_vsi *vsi = &pf->vsi;
141 struct ixl_queue *que = vsi->queues;
143 vsi->rx_itr_setting = pf->rx_itr;
145 for (int i = 0; i < vsi->num_queues; i++, que++) {
149 vsi->rx_itr_setting);
150 rxr->itr = vsi->rx_itr_setting;
180 struct ixl_vsi *vsi = &pf->vsi;
181 struct ifnet *ifp = vsi->ifp;
193 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
197 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
210 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
231 if (ixl_initialize_vsi(vsi)) {
232 device_printf(dev, "initialize vsi failed!!\n");
240 ixl_init_filters(vsi);
243 ixl_setup_vlan_filters(vsi);
252 ixl_enable_rings(vsi);
254 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
256 ixl_reconfigure_filters(vsi);
259 ixl_enable_intr(vsi);
344 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
346 device_t dev = vsi->dev;
354 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
355 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
361 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
374 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
387 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
388 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
393 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
406 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
577 struct ixl_vsi *vsi = que->vsi;
578 struct i40e_hw *hw = vsi->hw;
580 struct ifnet *ifp = vsi->ifp;
612 struct ixl_vsi *vsi = &pf->vsi;
613 struct ixl_queue *que = vsi->queues;
614 struct ifnet *ifp = vsi->ifp;
647 if (!drbr_empty(vsi->ifp, txr->br))
679 struct ixl_vsi *vsi = que->vsi;
680 struct i40e_hw *hw = vsi->hw;
685 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
699 if (!drbr_empty(vsi->ifp, txr->br))
804 ixl_set_promisc(struct ixl_vsi *vsi)
806 struct ifnet *ifp = vsi->ifp;
807 struct i40e_hw *hw = vsi->hw;
832 vsi->seid, uni, NULL, TRUE);
834 vsi->seid, multi, NULL);
845 ixl_add_multi(struct ixl_vsi *vsi)
848 struct ifnet *ifp = vsi->ifp;
849 struct i40e_hw *hw = vsi->hw;
868 ixl_del_hw_filters(vsi, mcnt);
870 vsi->seid, TRUE, NULL);
879 ixl_add_mc_filter(vsi,
886 ixl_add_hw_filters(vsi, flags, mcnt);
894 ixl_del_multi(struct ixl_vsi *vsi)
896 struct ifnet *ifp = vsi->ifp;
906 SLIST_FOREACH(f, &vsi->ftl, next) {
927 ixl_del_hw_filters(vsi, mcnt);
946 struct ixl_vsi *vsi = &pf->vsi;
947 struct ixl_queue *que = vsi->queues;
964 for (int i = 0; i < vsi->num_queues; i++, que++) {
987 if (hung == vsi->num_queues)
1006 struct ixl_vsi *vsi = &pf->vsi;
1008 struct ifnet *ifp = vsi->ifp;
1012 if (vsi->link_active == FALSE) {
1021 vsi->link_active = TRUE;
1025 if (vsi->link_active == TRUE) {
1029 vsi->link_active = FALSE;
1046 struct ixl_vsi *vsi = &pf->vsi;
1047 struct ifnet *ifp = vsi->ifp;
1056 ixl_disable_rings_intr(vsi);
1057 ixl_disable_rings(vsi);
1070 ixl_teardown_queue_msix(&pf->vsi);
1071 ixl_free_queue_tqs(&pf->vsi);
1083 struct ixl_vsi *vsi = &pf->vsi;
1084 struct ixl_queue *que = vsi->queues;
1093 " bus resource: vsi legacy/msi interrupt\n");
1154 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1156 struct ixl_queue *que = vsi->queues;
1157 device_t dev = vsi->dev;
1164 for (int i = 0; i < vsi->num_queues; i++, que++) {
1193 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1195 struct ixl_queue *que = vsi->queues;
1197 for (int i = 0; i < vsi->num_queues; i++, que++) {
1247 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1249 device_t dev = vsi->dev;
1250 struct ixl_queue *que = vsi->queues;
1255 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1410 pf->vsi.num_queues = queues;
1434 pf->vsi.num_queues = 1;
1491 struct ixl_vsi *vsi = &pf->vsi;
1495 for (int i = 0; i < vsi->num_queues; i++, vector++) {
1635 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1637 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1638 struct ixl_queue *que = vsi->queues;
1639 device_t dev = vsi->dev;
1647 for (int i = 0; i < vsi->num_queues; i++, que++) {
1680 ixl_teardown_queue_msix(&pf->vsi);
1700 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
1704 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1707 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1709 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1711 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1716 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1719 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1721 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1723 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1730 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1732 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1734 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1737 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1741 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1743 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1745 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1747 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1749 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1752 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1755 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1757 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1766 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1769 struct i40e_hw *hw = vsi->hw;
1770 struct ixl_queue *que = vsi->queues;
1776 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1785 ifp->if_softc = vsi;
1799 vsi->max_frame_size =
1840 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1861 ixl_add_ifmedia(vsi, abilities.phy_type);
1864 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1865 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1909 struct ixl_vsi *vsi = &pf->vsi;
1910 device_t dev = vsi->dev;
1939 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1940 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1941 vsi->seid = sw_config->element[0].seid;
1953 ixl_initialize_vsi(struct ixl_vsi *vsi)
1955 struct ixl_pf *pf = vsi->back;
1956 struct ixl_queue *que = vsi->queues;
1957 device_t dev = vsi->dev;
1958 struct i40e_hw *hw = vsi->hw;
1964 ctxt.seid = vsi->seid;
2005 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2011 vsi->vsi_num = ctxt.vsi_number;
2012 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2015 ixl_vsi_reset_stats(vsi);
2016 vsi->hw_filters_add = 0;
2017 vsi->hw_filters_del = 0;
2028 for (int i = 0; i < vsi->num_queues; i++, que++) {
2043 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2070 if (vsi->max_frame_size <= MCLBYTES)
2082 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2083 vsi->max_frame_size : max_rxmax;
2118 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2119 struct netmap_adapter *na = NA(vsi->ifp);
2122 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2125 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2137 ixl_free_vsi(struct ixl_vsi *vsi)
2139 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2140 struct ixl_queue *que = vsi->queues;
2143 if (!vsi->queues)
2146 for (int i = 0; i < vsi->num_queues; i++, que++) {
2168 free(vsi->queues, M_DEVBUF);
2172 ixl_free_mac_filters(vsi);
2176 ixl_free_mac_filters(struct ixl_vsi *vsi)
2180 while (!SLIST_EMPTY(&vsi->ftl)) {
2181 f = SLIST_FIRST(&vsi->ftl);
2182 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2195 struct ixl_vsi *vsi = &pf->vsi;
2204 que->vsi = vsi;
2299 struct ixl_vsi *vsi;
2303 vsi = &pf->vsi;
2304 vsi->back = (void *)pf;
2305 vsi->hw = &pf->hw;
2306 vsi->id = 0;
2307 vsi->num_vlans = 0;
2308 vsi->back = pf;
2311 if (!(vsi->queues =
2313 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2319 for (int i = 0; i < vsi->num_queues; i++) {
2320 que = &vsi->queues[i];
2336 struct ixl_vsi *vsi = que->vsi;
2337 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2338 struct i40e_hw *hw = vsi->hw;
2388 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2389 vsi->rx_itr_setting = pf->rx_itr;
2391 if (rxr->itr != vsi->rx_itr_setting) {
2392 rxr->itr = vsi->rx_itr_setting;
2410 struct ixl_vsi *vsi = que->vsi;
2411 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2412 struct i40e_hw *hw = vsi->hw;
2463 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2464 vsi->tx_itr_setting = pf->tx_itr;
2466 if (txr->itr != vsi->tx_itr_setting) {
2467 txr->itr = vsi->tx_itr_setting;
2478 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2487 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2489 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2491 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2510 val = rd32(que->vsi->hw, que->txr.tail);
2532 val = rd32(que->vsi->hw, que->rxr.tail);
2612 struct ixl_vsi *vsi = &pf->vsi;
2613 struct ixl_queue *queues = vsi->queues;
2636 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2637 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2640 for (int q = 0; q < vsi->num_queues; q++) {
2810 struct ixl_vsi *vsi = &pf->vsi;
2831 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2884 struct ixl_vsi *vsi = &pf->vsi;
2905 que_id = que_id % vsi->num_queues;
2907 que_id = i % vsi->num_queues;
2914 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
2946 struct ixl_vsi *vsi = ifp->if_softc;
2947 struct i40e_hw *hw = vsi->hw;
2948 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2957 ++vsi->num_vlans;
2958 ixl_add_filter(vsi, hw->mac.addr, vtag);
2970 struct ixl_vsi *vsi = ifp->if_softc;
2971 struct i40e_hw *hw = vsi->hw;
2972 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2981 --vsi->num_vlans;
2982 ixl_del_filter(vsi, hw->mac.addr, vtag);
2992 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
2997 if (vsi->num_vlans == 0)
3004 SLIST_FOREACH(f, &vsi->ftl, next) {
3018 ixl_add_hw_filters(vsi, flags, cnt);
3029 ixl_init_filters(struct ixl_vsi *vsi)
3031 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3034 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3042 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3049 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3054 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3058 f = ixl_get_filter(vsi);
3072 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3074 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3081 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3089 pf = vsi->back;
3093 f = ixl_find_filter(vsi, macaddr, vlan);
3101 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3102 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3104 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3105 ixl_add_filter(vsi, macaddr, 0);
3109 f = ixl_get_filter(vsi);
3120 vsi->num_macs++;
3122 ixl_add_hw_filters(vsi, f->flags, 1);
3127 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3131 f = ixl_find_filter(vsi, macaddr, vlan);
3136 ixl_del_hw_filters(vsi, 1);
3137 vsi->num_macs--;
3140 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3142 ixl_del_filter(vsi, macaddr, 0);
3143 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3152 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3157 SLIST_FOREACH(f, &vsi->ftl, next) {
3172 ** This routine takes additions to the vsi filter
3177 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3186 pf = vsi->back;
3203 SLIST_FOREACH(f, &vsi->ftl, next) {
3222 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3227 vsi->hw_filters_add += j;
3234 ** This routine takes removals in the vsi filter
3239 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3250 pf = vsi->back;
3261 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3267 /* delete entry from vsi list */
3268 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3276 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3281 vsi->hw_filters_del += sc;
3286 vsi->hw_filters_del += j;
3379 ixl_enable_rings(struct ixl_vsi *vsi)
3381 struct ixl_pf *pf = vsi->back;
3384 for (int i = 0; i < vsi->num_queues; i++) {
3469 ixl_disable_rings(struct ixl_vsi *vsi)
3471 struct ixl_pf *pf = vsi->back;
3474 for (int i = 0; i < vsi->num_queues; i++) {
3555 ixl_enable_intr(struct ixl_vsi *vsi)
3557 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3558 struct i40e_hw *hw = vsi->hw;
3559 struct ixl_queue *que = vsi->queues;
3562 for (int i = 0; i < vsi->num_queues; i++, que++)
3569 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3571 struct i40e_hw *hw = vsi->hw;
3572 struct ixl_queue *que = vsi->queues;
3574 for (int i = 0; i < vsi->num_queues; i++, que++)
3579 ixl_disable_intr(struct ixl_vsi *vsi)
3581 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3582 struct i40e_hw *hw = vsi->hw;
3655 struct ixl_vsi *vsi = &pf->vsi;
3817 /* Update vsi stats */
3818 ixl_update_vsi_stats(vsi);
3823 ixl_update_eth_stats(&pf->vfs[i].vsi);
3831 struct ixl_vsi *vsi = &pf->vsi;
3836 is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3981 ixl_update_eth_stats(struct ixl_vsi *vsi)
3983 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3988 u16 stat_idx = vsi->info.stat_counter_idx;
3990 es = &vsi->eth_stats;
3991 oes = &vsi->eth_stats_offsets;
3996 vsi->stat_offsets_loaded,
3999 vsi->stat_offsets_loaded,
4004 vsi->stat_offsets_loaded,
4008 vsi->stat_offsets_loaded,
4012 vsi->stat_offsets_loaded,
4016 vsi->stat_offsets_loaded,
4021 vsi->stat_offsets_loaded,
4025 vsi->stat_offsets_loaded,
4029 vsi->stat_offsets_loaded,
4033 vsi->stat_offsets_loaded,
4035 vsi->stat_offsets_loaded = true;
4039 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4048 pf = vsi->back;
4049 ifp = vsi->ifp;
4050 es = &vsi->eth_stats;
4053 ixl_update_eth_stats(vsi);
4056 for (int i = 0; i < vsi->num_queues; i++)
4057 tx_discards += vsi->queues[i].txr.br->br_drops;
4060 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4063 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4066 IXL_SET_IBYTES(vsi, es->rx_bytes);
4067 IXL_SET_OBYTES(vsi, es->tx_bytes);
4068 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4069 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4071 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4074 IXL_SET_OERRORS(vsi, es->tx_errors);
4075 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4076 IXL_SET_OQDROPS(vsi, tx_discards);
4077 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4078 IXL_SET_COLLISIONS(vsi, 0);
4093 * Resets all stats of the given vsi
4096 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4098 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4099 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4100 vsi->stat_offsets_loaded = false;
4677 struct ixl_vsi *vsi = ifp->if_softc;
4678 struct ixl_pf *pf = vsi->back;
4800 struct ixl_vsi *vsi = &pf->vsi;
4824 ixl_teardown_queue_msix(vsi);
4825 ixl_free_queue_tqs(vsi);
4827 error = ixl_setup_queue_msix(vsi);
4831 error = ixl_setup_queue_tqs(vsi);
4856 struct ixl_vsi *vsi = ifp->if_softc;
4857 struct ifmedia *ifm = &vsi->media;
4881 struct ixl_vsi *vsi = ifp->if_softc;
4882 struct ixl_pf *pf = vsi->back;
4927 vsi->max_frame_size =
4941 ixl_set_promisc(vsi);
4973 ixl_disable_intr(vsi);
4974 ixl_add_multi(vsi);
4975 ixl_enable_intr(vsi);
4983 ixl_disable_intr(vsi);
4984 ixl_del_multi(vsi);
4985 ixl_enable_intr(vsi);
4993 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5000 ixl_cap_txcsum_tso(vsi, ifp, mask);
5195 struct ixl_vsi *vsi = &pf->vsi;
5205 SLIST_FOREACH(f, &vsi->ftl, next) {
5218 SLIST_FOREACH(f, &vsi->ftl, next) {
5497 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5538 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));