Lines Matching refs:vsi

78 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
114 struct ice_vsi *vsi = NULL;
121 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
122 vsi = pf->vsi[v];
126 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
129 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
132 hw = &vsi->back->hw;
134 ice_for_each_txq(vsi, i) {
135 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
182 struct ice_vsi *vsi;
185 vsi = ice_get_main_vsi(pf);
186 if (!vsi)
189 perm_addr = vsi->port_info->mac.perm_addr;
190 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
206 struct ice_vsi *vsi = np->vsi;
208 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
228 struct ice_vsi *vsi = np->vsi;
238 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
247 * @vsi: VSI to be checked
251 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
253 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
254 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
259 * @vsi: the VSI being configured
263 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
267 if (vsi->type != ICE_VSI_PF)
270 if (ice_vsi_has_non_zero_vlans(vsi)) {
272 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
275 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
281 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
282 vsi->vsi_num, promisc_m);
288 * @vsi: the VSI being configured
292 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
296 if (vsi->type != ICE_VSI_PF)
299 if (ice_vsi_has_non_zero_vlans(vsi)) {
301 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
304 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
308 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
309 vsi->vsi_num, promisc_m);
315 * @vsi: ptr to the VSI
319 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
321 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
322 struct device *dev = ice_pf_to_dev(vsi->back);
323 struct net_device *netdev = vsi->netdev;
325 struct ice_pf *pf = vsi->back;
330 if (!vsi->netdev)
333 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
336 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
337 vsi->current_netdev_flags = vsi->netdev->flags;
339 INIT_LIST_HEAD(&vsi->tmp_sync_list);
340 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
342 if (ice_vsi_fltr_changed(vsi)) {
343 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
344 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
357 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
358 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
367 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
368 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
381 vsi->state)) {
384 vsi->vsi_num);
392 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
393 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
395 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
399 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
400 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
402 vsi->current_netdev_flags |= IFF_ALLMULTI;
409 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
410 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
411 if (vsi->current_netdev_flags & IFF_PROMISC) {
413 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
414 err = ice_set_dflt_vsi(vsi);
417 err, vsi->vsi_num);
418 vsi->current_netdev_flags &=
423 vlan_ops->dis_rx_filtering(vsi);
430 err = ice_set_promisc(vsi,
437 if (ice_is_vsi_dflt_vsi(vsi)) {
438 err = ice_clear_dflt_vsi(vsi);
441 err, vsi->vsi_num);
442 vsi->current_netdev_flags |=
446 if (vsi->netdev->features &
448 vlan_ops->ena_rx_filtering(vsi);
454 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
455 err = ice_clear_promisc(vsi,
459 err, vsi->vsi_num);
467 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
471 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
472 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
474 clear_bit(ICE_CFG_BUSY, vsi->state);
492 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
493 ice_vsi_sync_fltr(pf->vsi[v])) {
511 if (pf->vsi[v])
512 ice_dis_vsi(pf->vsi[v], locked);
551 struct ice_vsi *vsi;
579 vsi = ice_get_main_vsi(pf);
580 if (!vsi)
586 vsi->orig_rss_size = 0;
590 vsi->old_ena_tc = vsi->all_enatc;
591 vsi->old_numtc = vsi->all_numtc;
593 ice_remove_q_channels(vsi, true);
598 vsi->old_ena_tc = 0;
599 vsi->all_enatc = 0;
600 vsi->old_numtc = 0;
601 vsi->all_numtc = 0;
602 vsi->req_txq = 0;
603 vsi->req_rxq = 0;
605 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
753 * @vsi: the VSI whose topology status is being checked
755 static void ice_print_topo_conflict(struct ice_vsi *vsi)
757 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
763 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
766 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
767 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
769 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
778 * @vsi: the VSI whose link status is being queried
781 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
792 if (!vsi)
795 if (vsi->current_isup == isup)
798 vsi->current_isup = isup;
801 netdev_info(vsi->netdev, "NIC Link is Down\n");
805 switch (vsi->port_info->phy.link_info.link_speed) {
841 switch (vsi->port_info->fc.current_mode) {
860 switch (vsi->port_info->phy.link_info.fec_info) {
874 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
887 status = ice_aq_get_phy_caps(vsi->port_info, false,
890 netdev_info(vsi->netdev, "Get phy capability failed.\n");
906 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
908 ice_print_topo_conflict(vsi);
913 * @vsi: the VSI on which the link event occurred
916 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
918 if (!vsi)
921 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
924 if (vsi->type == ICE_VSI_PF) {
925 if (link_up == netif_carrier_ok(vsi->netdev))
929 netif_carrier_on(vsi->netdev);
930 netif_tx_wake_all_queues(vsi->netdev);
932 netif_carrier_off(vsi->netdev);
933 netif_tx_stop_all_queues(vsi->netdev);
1110 struct ice_vsi *vsi;
1138 vsi = ice_get_main_vsi(pf);
1139 if (!vsi || !vsi->port_info)
1146 ice_set_link(vsi, false);
1162 ice_vsi_link_event(vsi, link_up);
1163 ice_print_link_msg(vsi, link_up);
1195 if (pf->vsi[i] && pf->vsi[i]->netdev)
1196 ice_update_vsi_stats(pf->vsi[i]);
1899 * @vsi: VSI to force the physical link state to up/down
1909 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1917 if (!vsi || !vsi->port_info || !vsi->back)
1919 if (vsi->type != ICE_VSI_PF)
1922 dev = ice_pf_to_dev(vsi->back);
1924 pi = vsi->port_info;
1934 vsi->vsi_num, retcode);
1960 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1963 vsi->vsi_num, retcode);
2148 * @vsi: VSI of PHY
2154 static int ice_configure_phy(struct ice_vsi *vsi)
2156 struct device *dev = ice_pf_to_dev(vsi->back);
2157 struct ice_port_info *pi = vsi->port_info;
2161 struct ice_pf *pf = vsi->back;
2168 ice_print_topo_conflict(vsi);
2175 return ice_force_phys_link_state(vsi, true);
2186 vsi->vsi_num, err);
2207 vsi->vsi_num, err);
2223 vsi->back->state)) {
2263 vsi->vsi_num, err);
2281 struct ice_vsi *vsi;
2288 vsi = ice_get_main_vsi(pf);
2289 if (!vsi)
2293 pi = vsi->port_info;
2307 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2308 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2311 err = ice_configure_phy(vsi);
2500 * @vsi: the VSI being configured
2502 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2504 struct ice_hw *hw = &vsi->back->hw;
2507 ice_for_each_q_vector(vsi, i)
2508 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2516 * @vsi: the VSI being configured
2519 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2521 int q_vectors = vsi->num_q_vectors;
2522 struct ice_pf *pf = vsi->back;
2531 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2549 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2550 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2554 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2557 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2576 err = ice_set_cpu_rx_rmap(vsi);
2578 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2579 vsi->vsi_num, ERR_PTR(err));
2583 vsi->irqs_ready = true;
2588 irq_num = vsi->q_vectors[vector]->irq.virq;
2592 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2599 * @vsi: VSI to setup Tx rings used by XDP
2603 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2605 struct device *dev = ice_pf_to_dev(vsi->back);
2609 ice_for_each_xdp_txq(vsi, i) {
2610 u16 xdp_q_idx = vsi->alloc_txq + i;
2626 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2627 xdp_ring->vsi = vsi;
2630 xdp_ring->count = vsi->num_tx_desc;
2631 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2646 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2647 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2648 vsi->xdp_rings[i]->ring_stats = NULL;
2649 ice_free_tx_ring(vsi->xdp_rings[i]);
2657 * @vsi: VSI to set the bpf prog on
2660 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2665 old_prog = xchg(&vsi->xdp_prog, prog);
2666 ice_for_each_rxq(vsi, i)
2667 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2675 * @vsi: VSI to bring up Tx rings used by XDP
2680 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2683 int xdp_rings_rem = vsi->num_xdp_txq;
2684 struct ice_pf *pf = vsi->back;
2689 .q_count = vsi->num_xdp_txq,
2691 .vsi_map = vsi->txq_map,
2692 .vsi_map_offset = vsi->alloc_txq,
2700 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2701 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2702 if (!vsi->xdp_rings)
2705 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2710 netdev_warn(vsi->netdev,
2713 if (ice_xdp_alloc_setup_rings(vsi))
2717 ice_for_each_q_vector(vsi, v_idx) {
2718 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2722 vsi->num_q_vectors - v_idx);
2723 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2726 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2735 ice_for_each_rxq(vsi, i) {
2737 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2739 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2744 vsi->rx_rings[i]->xdp_ring = ring;
2749 ice_tx_xsk_pool(vsi, i);
2762 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2763 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2765 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2777 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2782 if (!ice_is_xdp_ena_vsi(vsi))
2783 ice_vsi_assign_bpf_prog(vsi, prog);
2787 ice_for_each_xdp_txq(vsi, i)
2788 if (vsi->xdp_rings[i]) {
2789 kfree_rcu(vsi->xdp_rings[i], rcu);
2790 vsi->xdp_rings[i] = NULL;
2795 ice_for_each_xdp_txq(vsi, i) {
2796 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2797 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2801 devm_kfree(dev, vsi->xdp_rings);
2807 * @vsi: VSI to remove XDP rings
2812 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2815 struct ice_pf *pf = vsi->back;
2823 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2826 ice_for_each_q_vector(vsi, v_idx) {
2827 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2840 ice_for_each_xdp_txq(vsi, i) {
2841 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2842 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2846 ice_for_each_xdp_txq(vsi, i)
2847 if (vsi->xdp_rings[i]) {
2848 if (vsi->xdp_rings[i]->desc) {
2850 ice_free_tx_ring(vsi->xdp_rings[i]);
2852 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2853 vsi->xdp_rings[i]->ring_stats = NULL;
2854 kfree_rcu(vsi->xdp_rings[i], rcu);
2855 vsi->xdp_rings[i] = NULL;
2858 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2859 vsi->xdp_rings = NULL;
2864 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2867 ice_vsi_assign_bpf_prog(vsi, NULL);
2872 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2873 max_txqs[i] = vsi->num_txq;
2876 vsi->num_xdp_txq = 0;
2878 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2884 * @vsi: VSI to schedule napi on
2886 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2890 ice_for_each_rxq(vsi, i) {
2891 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2900 * @vsi: VSI to determine the count of XDP Tx qs
2905 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2907 u16 avail = ice_get_avail_txq_count(vsi->back);
2913 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2915 if (vsi->num_xdp_txq < cpus)
2923 * @vsi: Pointer to VSI structure
2925 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2927 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2935 * @vsi: VSI to setup XDP for
2940 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2943 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2944 bool if_running = netif_running(vsi->netdev);
2948 if (frame_size > ice_max_xdp_frame_size(vsi)) {
2956 if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2957 ice_vsi_assign_bpf_prog(vsi, prog);
2962 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2963 ret = ice_down(vsi);
2970 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2971 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2975 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2979 xdp_features_set_redirect_target(vsi->netdev, true);
2981 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2984 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2985 xdp_features_clear_redirect_target(vsi->netdev);
2986 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2990 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2996 ret = ice_up(vsi);
2999 ice_vsi_rx_napi_schedule(vsi);
3026 struct ice_vsi *vsi = np->vsi;
3028 if (vsi->type != ICE_VSI_PF) {
3035 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3037 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3484 * @vsi: VSI for which NAPI handler is to be registered
3490 static void ice_napi_add(struct ice_vsi *vsi)
3494 if (!vsi->netdev)
3497 ice_for_each_q_vector(vsi, v_idx) {
3498 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3500 __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
3506 * @vsi: the VSI associated with the new netdev
3508 static void ice_set_ops(struct ice_vsi *vsi)
3510 struct net_device *netdev = vsi->netdev;
3524 if (vsi->type != ICE_VSI_PF)
3724 struct ice_vsi *vsi = np->vsi;
3732 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3738 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3739 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3746 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3752 ret = vlan_ops->add_vlan(vsi, &vlan);
3760 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3761 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3762 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3764 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3769 clear_bit(ICE_CFG_BUSY, vsi->state);
3787 struct ice_vsi *vsi = np->vsi;
3795 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3798 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3802 vsi->vsi_num);
3803 vsi->current_netdev_flags |= IFF_ALLMULTI;
3806 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3812 ret = vlan_ops->del_vlan(vsi, &vlan);
3819 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3820 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3823 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3828 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3829 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3832 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3838 clear_bit(ICE_CFG_BUSY, vsi->state);
3857 * @vsi: VSI struct which has the netdev
3859 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3861 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3869 * @vsi: VSI struct which has the netdev
3873 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3877 if (!vsi || !vsi->netdev)
3880 np = netdev_priv(vsi->netdev);
4070 * @vsi: VSI being changed
4079 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4081 struct ice_pf *pf = vsi->back;
4095 vsi->req_txq = (u16)new_tx;
4097 vsi->req_rxq = (u16)new_rx;
4100 if (!netif_running(vsi->netdev)) {
4101 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4106 ice_vsi_close(vsi);
4107 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4109 ice_vsi_open(vsi);
4124 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4129 if (!vsi)
4137 ctxt->info = vsi->info;
4155 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4157 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4160 vsi->info.sec_flags = ctxt->info.sec_flags;
4161 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4162 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4364 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4372 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4374 if (!vsi)
4377 ice_vsi_manage_fdir(vsi, false);
4378 ice_vsi_release(vsi);
4380 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4500 * @vsi: pointer to the VSI struct
4502 static int ice_register_netdev(struct ice_vsi *vsi)
4506 if (!vsi || !vsi->netdev)
4509 err = register_netdev(vsi->netdev);
4513 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4514 netif_carrier_off(vsi->netdev);
4515 netif_tx_stop_all_queues(vsi->netdev);
4520 static void ice_unregister_netdev(struct ice_vsi *vsi)
4522 if (!vsi || !vsi->netdev)
4525 unregister_netdev(vsi->netdev);
4526 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4531 * @vsi: the VSI associated with the new netdev
4535 static int ice_cfg_netdev(struct ice_vsi *vsi)
4541 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4542 vsi->alloc_rxq);
4546 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4547 vsi->netdev = netdev;
4549 np->vsi = vsi;
4552 ice_set_ops(vsi);
4554 if (vsi->type == ICE_VSI_PF) {
4555 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4556 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4563 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4570 static void ice_decfg_netdev(struct ice_vsi *vsi)
4572 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4573 free_netdev(vsi->netdev);
4574 vsi->netdev = NULL;
4812 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4814 if (vsi)
4815 ice_configure_phy(vsi);
4827 struct ice_vsi *vsi;
4849 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4850 if (!vsi) {
4865 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4867 if (!vsi)
4870 ice_vsi_release(vsi);
4889 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4891 if (!pf->vsi)
4897 devm_kfree(dev, pf->vsi);
4910 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4911 pf->vsi = NULL;
5005 struct ice_vsi *vsi;
5010 vsi = ice_get_main_vsi(pf);
5013 INIT_LIST_HEAD(&vsi->ch_list);
5015 err = ice_cfg_netdev(vsi);
5020 ice_dcbnl_setup(vsi);
5030 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5032 err = ice_register_netdev(vsi);
5036 err = ice_tc_indir_block_register(vsi);
5040 ice_napi_add(vsi);
5054 ice_tc_indir_block_unregister(vsi);
5056 ice_unregister_netdev(vsi);
5061 ice_decfg_netdev(vsi);
5073 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5079 ice_tc_indir_block_unregister(vsi);
5080 ice_unregister_netdev(vsi);
5082 ice_decfg_netdev(vsi);
5237 struct ice_vsi *vsi;
5244 vsi = ice_get_main_vsi(pf);
5245 if (!vsi)
5249 if (vsi->netdev)
5250 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5252 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5346 if (pf->vsi[v])
5347 pf->vsi[v]->vsi_num = 0;
5379 if (!pf->vsi[v])
5382 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5385 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5386 ice_vsi_set_napi_queues(pf->vsi[v]);
5400 if (pf->vsi[v])
5401 ice_vsi_free_q_vectors(pf->vsi[v]);
5464 if (!pf->vsi[v])
5466 ice_vsi_free_q_vectors(pf->vsi[v]);
5802 struct ice_vsi *vsi = np->vsi;
5803 struct ice_pf *pf = vsi->back;
5836 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5843 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5868 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5888 struct ice_vsi *vsi = np->vsi;
5890 if (!vsi || ice_is_switchdev_running(vsi->back))
5897 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5898 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5899 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5904 ice_service_task_schedule(vsi->back);
5917 struct ice_vsi *vsi = np->vsi;
5929 q_handle = vsi->tx_rings[queue_index]->q_handle;
5930 tc = ice_dcb_get_tc(vsi, queue_index);
5932 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5933 if (!vsi) {
5941 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5944 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6088 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6121 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6131 * @vsi: PF's VSI
6138 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6142 ice_for_each_alloc_rxq(vsi, i)
6143 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6148 * @vsi: PF's VSI
6156 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6163 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6176 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6178 strip_err = vlan_ops->dis_stripping(vsi);
6181 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6183 insert_err = vlan_ops->dis_insertion(vsi);
6188 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6196 * @vsi: PF's VSI
6203 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6205 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6213 err = vlan_ops->ena_rx_filtering(vsi);
6215 err = vlan_ops->dis_rx_filtering(vsi);
6233 struct ice_vsi *vsi = np->vsi;
6241 dev_err(ice_pf_to_dev(vsi->back),
6246 err = ice_set_vlan_offload_features(vsi, features);
6255 err = ice_set_vlan_filtering_features(vsi, features);
6265 * @vsi: ptr to VSI
6268 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6270 bool if_running = netif_running(vsi->netdev);
6273 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6274 ret = ice_down(vsi);
6276 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6280 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6282 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6284 ret = ice_up(vsi);
6299 struct ice_vsi *vsi = np->vsi;
6300 struct ice_pf *pf = vsi->back;
6321 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6333 dev_err(ice_pf_to_dev(vsi->back),
6338 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6339 ret = ice_down_up(vsi);
6347 ice_vsi_manage_fdir(vsi, ena);
6348 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6365 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6372 * @vsi: VSI to setup VLAN properties for
6374 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6378 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6382 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6386 return ice_vsi_add_vlan_zero(vsi);
6391 * @vsi: the VSI being configured
6395 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6399 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6400 ice_set_rx_mode(vsi->netdev);
6402 err = ice_vsi_vlan_setup(vsi);
6406 ice_vsi_cfg_dcb_rings(vsi);
6408 err = ice_vsi_cfg_lan_txqs(vsi);
6409 if (!err && ice_is_xdp_ena_vsi(vsi))
6410 err = ice_vsi_cfg_xdp_txqs(vsi);
6412 err = ice_vsi_cfg_rxqs(vsi);
6539 * @vsi: the VSI being configured
6541 static void ice_napi_enable_all(struct ice_vsi *vsi)
6545 if (!vsi->netdev)
6548 ice_for_each_q_vector(vsi, q_idx) {
6549 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6560 * @vsi: The VSI being configured
6564 static int ice_up_complete(struct ice_vsi *vsi)
6566 struct ice_pf *pf = vsi->back;
6569 ice_vsi_cfg_msix(vsi);
6575 err = ice_vsi_start_all_rx_rings(vsi);
6579 clear_bit(ICE_VSI_DOWN, vsi->state);
6580 ice_napi_enable_all(vsi);
6581 ice_vsi_ena_irq(vsi);
6583 if (vsi->port_info &&
6584 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6585 vsi->netdev && vsi->type == ICE_VSI_PF) {
6586 ice_print_link_msg(vsi, true);
6587 netif_tx_start_all_queues(vsi->netdev);
6588 netif_carrier_on(vsi->netdev);
6595 ice_update_eth_stats(vsi);
6597 if (vsi->type == ICE_VSI_PF)
6605 * @vsi: VSI being configured
6607 int ice_up(struct ice_vsi *vsi)
6611 err = ice_vsi_cfg_lan(vsi);
6613 err = ice_up_complete(vsi);
6643 * @vsi: the VSI to be updated
6649 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6667 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6668 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6669 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6675 * @vsi: the VSI to be updated
6677 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6681 struct ice_pf *pf = vsi->back;
6690 vsi->tx_restart = 0;
6691 vsi->tx_busy = 0;
6692 vsi->tx_linearize = 0;
6693 vsi->rx_buf_failed = 0;
6694 vsi->rx_page_failed = 0;
6699 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6700 vsi->num_txq);
6703 ice_for_each_rxq(vsi, i) {
6704 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6713 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6714 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6718 if (ice_is_xdp_ena_vsi(vsi))
6719 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6720 vsi->num_xdp_txq);
6724 net_stats = &vsi->net_stats;
6725 stats_prev = &vsi->net_stats_prev;
6749 * @vsi: the VSI to be updated
6751 void ice_update_vsi_stats(struct ice_vsi *vsi)
6753 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6754 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6755 struct ice_pf *pf = vsi->back;
6757 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6762 ice_update_vsi_ring_stats(vsi);
6765 ice_update_eth_stats(vsi);
6773 if (vsi->type == ICE_VSI_PF) {
6948 struct ice_vsi *vsi = np->vsi;
6950 vsi_stats = &vsi->net_stats;
6952 if (!vsi->num_txq || !vsi->num_rxq)
6960 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6961 ice_update_vsi_ring_stats(vsi);
6982 * @vsi: VSI having NAPI disabled
6984 static void ice_napi_disable_all(struct ice_vsi *vsi)
6988 if (!vsi->netdev)
6991 ice_for_each_q_vector(vsi, q_idx) {
6992 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7004 * @vsi: the VSI being un-configured
7006 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7008 struct ice_pf *pf = vsi->back;
7016 if (vsi->rx_rings) {
7017 ice_for_each_rxq(vsi, i) {
7018 if (vsi->rx_rings[i]) {
7021 reg = vsi->rx_rings[i]->reg_idx;
7030 ice_for_each_q_vector(vsi, i) {
7031 if (!vsi->q_vectors[i])
7033 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7039 if (vsi->type == ICE_VSI_VF)
7042 ice_for_each_q_vector(vsi, i)
7043 synchronize_irq(vsi->q_vectors[i]->irq.virq);
7048 * @vsi: The VSI being stopped
7050 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7052 int ice_down(struct ice_vsi *vsi)
7056 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7058 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
7059 vlan_err = ice_vsi_del_vlan_zero(vsi);
7060 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7061 netif_carrier_off(vsi->netdev);
7062 netif_tx_disable(vsi->netdev);
7063 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
7064 ice_eswitch_stop_all_tx_queues(vsi->back);
7067 ice_vsi_dis_irq(vsi);
7069 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7071 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7072 vsi->vsi_num, tx_err);
7073 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
7074 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7076 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7077 vsi->vsi_num, tx_err);
7080 rx_err = ice_vsi_stop_all_rx_rings(vsi);
7082 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7083 vsi->vsi_num, rx_err);
7085 ice_napi_disable_all(vsi);
7087 ice_for_each_txq(vsi, i)
7088 ice_clean_tx_ring(vsi->tx_rings[i]);
7090 if (ice_is_xdp_ena_vsi(vsi))
7091 ice_for_each_xdp_txq(vsi, i)
7092 ice_clean_tx_ring(vsi->xdp_rings[i]);
7094 ice_for_each_rxq(vsi, i)
7095 ice_clean_rx_ring(vsi->rx_rings[i]);
7098 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7099 vsi->vsi_num, vsi->vsw->sw_id);
7108 * @vsi: the VSI to be reconnected
7110 int ice_down_up(struct ice_vsi *vsi)
7115 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7118 ret = ice_down(vsi);
7122 ret = ice_up(vsi);
7124 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7133 * @vsi: VSI having resources allocated
7137 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7141 if (!vsi->num_txq) {
7142 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7143 vsi->vsi_num);
7147 ice_for_each_txq(vsi, i) {
7148 struct ice_tx_ring *ring = vsi->tx_rings[i];
7153 if (vsi->netdev)
7154 ring->netdev = vsi->netdev;
7165 * @vsi: VSI having resources allocated
7169 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7173 if (!vsi->num_rxq) {
7174 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7175 vsi->vsi_num);
7179 ice_for_each_rxq(vsi, i) {
7180 struct ice_rx_ring *ring = vsi->rx_rings[i];
7185 if (vsi->netdev)
7186 ring->netdev = vsi->netdev;
7197 * @vsi: the VSI to open
7203 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7206 struct ice_pf *pf = vsi->back;
7212 err = ice_vsi_setup_tx_rings(vsi);
7216 err = ice_vsi_setup_rx_rings(vsi);
7220 err = ice_vsi_cfg_lan(vsi);
7226 err = ice_vsi_req_irq_msix(vsi, int_name);
7230 ice_vsi_cfg_msix(vsi);
7232 err = ice_vsi_start_all_rx_rings(vsi);
7236 clear_bit(ICE_VSI_DOWN, vsi->state);
7237 ice_vsi_ena_irq(vsi);
7242 ice_down(vsi);
7244 ice_vsi_free_rx_rings(vsi);
7246 ice_vsi_free_tx_rings(vsi);
7253 * @vsi: the VSI to open
7259 int ice_vsi_open(struct ice_vsi *vsi)
7262 struct ice_pf *pf = vsi->back;
7266 err = ice_vsi_setup_tx_rings(vsi);
7270 err = ice_vsi_setup_rx_rings(vsi);
7274 err = ice_vsi_cfg_lan(vsi);
7279 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7280 err = ice_vsi_req_irq_msix(vsi, int_name);
7284 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7286 if (vsi->type == ICE_VSI_PF) {
7288 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7292 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7297 err = ice_up_complete(vsi);
7304 ice_down(vsi);
7306 ice_vsi_free_irq(vsi);
7308 ice_vsi_free_rx_rings(vsi);
7310 ice_vsi_free_tx_rings(vsi);
7323 if (!pf->vsi)
7327 if (!pf->vsi[i])
7330 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7333 err = ice_vsi_release(pf->vsi[i]);
7335 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7336 i, err, pf->vsi[i]->vsi_num);
7345 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7353 struct ice_vsi *vsi = pf->vsi[i];
7355 if (!vsi || vsi->type != type)
7359 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7362 err, vsi->idx, ice_vsi_type_str(type));
7367 err = ice_replay_vsi(&pf->hw, vsi->idx);
7370 err, vsi->idx, ice_vsi_type_str(type));
7377 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7380 err = ice_ena_vsi(vsi, false);
7383 err, vsi->idx, ice_vsi_type_str(type));
7387 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7404 struct ice_vsi *vsi = pf->vsi[i];
7406 if (!vsi || vsi->type != ICE_VSI_PF)
7409 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7411 netif_carrier_on(pf->vsi[i]->netdev);
7412 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7414 netif_carrier_off(pf->vsi[i]->netdev);
7415 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7625 struct ice_vsi *vsi = np->vsi;
7626 struct ice_pf *pf = vsi->back;
7636 prog = vsi->xdp_prog;
7638 int frame_size = ice_max_xdp_frame_size(vsi);
7670 err = ice_down_up(vsi);
7689 struct ice_pf *pf = np->vsi->back;
7745 * @vsi: Pointer to VSI structure
7751 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7754 struct ice_hw *hw = &vsi->back->hw;
7760 params.vsi_handle = vsi->idx;
7762 params.lut_type = vsi->rss_lut_type;
7767 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7775 * @vsi: Pointer to the VSI structure
7780 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7782 struct ice_hw *hw = &vsi->back->hw;
7788 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7790 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7798 * @vsi: Pointer to VSI structure
7804 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7807 struct ice_hw *hw = &vsi->back->hw;
7813 params.vsi_handle = vsi->idx;
7815 params.lut_type = vsi->rss_lut_type;
7820 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7828 * @vsi: Pointer to VSI structure
7833 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7835 struct ice_hw *hw = &vsi->back->hw;
7841 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7843 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7851 * @vsi: Pointer to VSI structure
7856 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
7858 struct ice_hw *hw = &vsi->back->hw;
7863 if (hfunc == vsi->rss_hfunc)
7875 ctx->info.q_opt_rss = vsi->info.q_opt_rss;
7879 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
7880 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
7882 err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
7884 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
7885 vsi->vsi_num, err);
7887 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
7888 vsi->rss_hfunc = hfunc;
7889 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
7899 return ice_set_rss_cfg_symm(hw, vsi, symm);
7918 struct ice_vsi *vsi = np->vsi;
7919 struct ice_pf *pf = vsi->back;
7930 * @vsi: Pointer to VSI structure
7935 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7938 struct ice_hw *hw = &vsi->back->hw;
7942 vsi_props = &vsi->info;
7948 ctxt->info = vsi->info;
7958 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7960 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7990 struct ice_pf *pf = np->vsi->back;
8017 if (!pf->vsi[v])
8019 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8053 struct ice_vsi *vsi = np->vsi;
8054 struct ice_pf *pf = vsi->back;
8070 ice_for_each_txq(vsi, i)
8071 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8072 if (txqueue == vsi->tx_rings[i]->q_index) {
8073 tx_ring = vsi->tx_rings[i];
8091 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8096 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8117 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8137 struct ice_vsi *vsi = np->vsi;
8144 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8146 return ice_del_cls_flower(vsi, cls_flower);
8165 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8174 * @vsi: Pointer to VSI
8182 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8186 struct ice_pf *pf = vsi->back;
8193 if (vsi->type != ICE_VSI_PF)
8202 vsi->ch_rss_size = 0;
8204 speed = ice_get_link_speed_kbps(vsi);
8293 if (vsi->num_rxq <
8296 if (vsi->num_txq <
8306 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8307 vsi->ch_rss_size = max_rss_q_cnt;
8315 * @vsi: ptr to VSI
8317 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8324 if (!(vsi->num_gfltr || vsi->num_bfltr))
8345 prof->vsi_h[0], vsi->idx,
8350 vsi->idx, flow);
8358 prof->vsi_h[prof->cnt] = vsi->idx;
8362 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8367 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8383 struct ice_vsi *vsi;
8390 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8391 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8396 ice_add_vsi_to_fdir(pf, vsi);
8399 ch->vsi_num = vsi->vsi_num;
8400 ch->info.mapping_flags = vsi->info.mapping_flags;
8401 ch->ch_vsi = vsi;
8403 vsi->ch = ch;
8405 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8406 sizeof(vsi->info.q_mapping));
8407 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8408 sizeof(vsi->info.tc_mapping));
8415 * @vsi: the VSI being setup
8420 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8430 tx_ring = vsi->tx_rings[ch->base_q + i];
8431 rx_ring = vsi->rx_rings[ch->base_q + i];
8466 ice_flush(&vsi->back->hw);
8471 * @vsi: pte to main_vsi
8478 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8483 ice_chnl_cfg_res(vsi, ch);
8489 * @vsi: the VSI being setup
8498 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8504 ch->base_q = vsi->next_base_q;
8514 ice_cfg_chnl_all_res(vsi, ch);
8519 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8529 * @vsi: the VSI being setup
8536 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8543 if (vsi->type != ICE_VSI_PF) {
8544 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8551 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8563 * @vsi: VSI to be configured
8568 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8572 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8576 return ice_set_max_bw_limit(vsi, max_tx_rate);
8581 * @vsi: VSI to be configured
8587 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8589 struct ice_pf *pf = vsi->back;
8601 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8603 vsi->cnt_q_avail, ch->num_txq);
8607 if (!ice_setup_channel(pf, vsi, ch)) {
8625 vsi->cnt_q_avail -= ch->num_txq;
8683 * @vsi: VSI to be configured
8688 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8691 struct ice_pf *pf = vsi->back;
8699 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8703 ice_fdir_del_all_fltrs(vsi);
8708 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8723 tx_ring = vsi->tx_rings[ch->base_q + i];
8724 rx_ring = vsi->rx_rings[ch->base_q + i];
8752 vsi->tc_map_vsi[i] = NULL;
8755 vsi->all_enatc = 0;
8756 vsi->all_numtc = 0;
8771 struct ice_vsi *vsi;
8797 vsi = pf->vsi[i];
8798 if (!vsi || vsi->type != ICE_VSI_CHNL)
8801 type = vsi->type;
8804 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8807 ice_vsi_type_str(type), vsi->idx, err);
8814 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8817 err = ice_replay_vsi(&pf->hw, vsi->idx);
8820 ice_vsi_type_str(type), err, vsi->idx);
8825 ice_vsi_type_str(type), vsi->idx);
8830 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8875 * @vsi: VSI to be configured
8879 static int ice_create_q_channels(struct ice_vsi *vsi)
8881 struct ice_pf *pf = vsi->back;
8886 if (!(vsi->all_enatc & BIT(i)))
8895 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8896 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8897 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8898 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8899 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8909 ret = ice_create_q_channel(vsi, ch);
8916 list_add_tail(&ch->list, &vsi->ch_list);
8917 vsi->tc_map_vsi[i] = ch->ch_vsi;
8924 ice_remove_q_channels(vsi, false);
8938 struct ice_vsi *vsi = np->vsi;
8939 struct ice_pf *pf = vsi->back;
8952 vsi->ch_rss_size = 0;
8953 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8970 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8976 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8982 if (vsi->netdev->features & NETIF_F_HW_TC)
8992 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8997 ice_dis_vsi(vsi, true);
9000 ice_remove_q_channels(vsi, true);
9003 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9005 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9015 offset = vsi->mqprio_qopt.qopt.offset[i];
9016 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9017 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9019 vsi->req_txq = offset + qcount_tx;
9020 vsi->req_rxq = offset + qcount_rx;
9026 vsi->orig_rss_size = vsi->rss_size;
9032 cur_txq = vsi->num_txq;
9033 cur_rxq = vsi->num_rxq;
9036 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9040 vsi->req_txq = cur_txq;
9041 vsi->req_rxq = cur_rxq;
9043 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9049 vsi->all_numtc = num_tcf;
9050 vsi->all_enatc = ena_tc_qdisc;
9051 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9054 vsi->vsi_num);
9059 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9060 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9070 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9073 max_tx_rate, min_tx_rate, vsi->vsi_num);
9076 max_tx_rate, min_tx_rate, vsi->vsi_num);
9080 ret = ice_create_q_channels(vsi);
9089 if (vsi->ch_rss_size)
9090 ice_vsi_cfg_rss_lut_key(vsi);
9095 vsi->all_numtc = 0;
9096 vsi->all_enatc = 0;
9099 ice_ena_vsi(vsi, true);
9111 struct ice_pf *pf = np->vsi->back;
9198 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9285 struct ice_pf *pf = np->vsi->back;
9307 struct ice_vsi *vsi = np->vsi;
9308 struct ice_pf *pf = vsi->back;
9319 pi = vsi->port_info;
9340 err = ice_configure_phy(vsi);
9348 ice_set_link(vsi, false);
9351 err = ice_vsi_open(vsi);
9354 vsi->vsi_num, vsi->vsw->sw_id);
9375 struct ice_vsi *vsi = np->vsi;
9376 struct ice_pf *pf = vsi->back;
9383 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9384 int link_err = ice_force_phys_link_state(vsi, false);
9388 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9389 vsi->vsi_num);
9391 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9392 vsi->vsi_num, link_err);
9394 ice_vsi_close(vsi);
9399 ice_vsi_close(vsi);