Lines Matching refs:vsi

39  * @vsi: the VSI being configured
47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
52 ice_for_each_rxq(vsi, i)
53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
55 ice_flush(&vsi->back->hw);
57 ice_for_each_rxq(vsi, i) {
58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
68 * @vsi: VSI pointer
73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
75 struct ice_pf *pf = vsi->back;
79 if (vsi->type == ICE_VSI_CHNL)
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84 sizeof(*vsi->tx_rings), GFP_KERNEL);
85 if (!vsi->tx_rings)
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89 sizeof(*vsi->rx_rings), GFP_KERNEL);
90 if (!vsi->rx_rings)
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
100 sizeof(*vsi->txq_map), GFP_KERNEL);
102 if (!vsi->txq_map)
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
106 sizeof(*vsi->rxq_map), GFP_KERNEL);
107 if (!vsi->rxq_map)
111 if (vsi->type == ICE_VSI_LB)
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116 sizeof(*vsi->q_vectors), GFP_KERNEL);
117 if (!vsi->q_vectors)
120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
121 if (!vsi->af_xdp_zc_qps)
127 devm_kfree(dev, vsi->q_vectors);
129 devm_kfree(dev, vsi->rxq_map);
131 devm_kfree(dev, vsi->txq_map);
133 devm_kfree(dev, vsi->rx_rings);
135 devm_kfree(dev, vsi->tx_rings);
141 * @vsi: the VSI being configured
143 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
145 switch (vsi->type) {
154 if (!vsi->num_rx_desc)
155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
156 if (!vsi->num_tx_desc)
157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
161 vsi->type);
168 * @vsi: the VSI being configured
172 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
174 enum ice_vsi_type vsi_type = vsi->type;
175 struct ice_pf *pf = vsi->back;
176 struct ice_vf *vf = vsi->vf;
183 if (vsi->req_txq) {
184 vsi->alloc_txq = vsi->req_txq;
185 vsi->num_txq = vsi->req_txq;
187 vsi->alloc_txq = min3(pf->num_lan_msix,
192 pf->num_lan_tx = vsi->alloc_txq;
196 vsi->alloc_rxq = 1;
198 if (vsi->req_rxq) {
199 vsi->alloc_rxq = vsi->req_rxq;
200 vsi->num_rxq = vsi->req_rxq;
202 vsi->alloc_rxq = min3(pf->num_lan_msix,
208 pf->num_lan_rx = vsi->alloc_rxq;
210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
211 max_t(int, vsi->alloc_rxq,
212 vsi->alloc_txq));
219 if (vsi->req_txq && vsi->req_rxq) {
220 vsi->alloc_txq = vsi->req_txq;
221 vsi->alloc_rxq = vsi->req_rxq;
223 vsi->alloc_txq = 1;
224 vsi->alloc_rxq = 1;
227 vsi->num_q_vectors = 1;
232 vsi->alloc_txq = vf->num_vf_qs;
233 vsi->alloc_rxq = vf->num_vf_qs;
235 * data queue interrupts). Since vsi->num_q_vectors is number
239 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
242 vsi->alloc_txq = 1;
243 vsi->alloc_rxq = 1;
244 vsi->num_q_vectors = 1;
247 vsi->alloc_txq = 0;
248 vsi->alloc_rxq = 0;
251 vsi->alloc_txq = 1;
252 vsi->alloc_rxq = 1;
259 ice_vsi_set_num_desc(vsi);
293 * @vsi: pointer to VSI being removed
295 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
297 struct ice_pf *pf = vsi->back;
301 ice_fltr_remove_all(vsi);
306 if (vsi->type == ICE_VSI_VF)
307 ctxt->vf_num = vsi->vf->vf_id;
308 ctxt->vsi_num = vsi->vsi_num;
310 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
312 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
315 vsi->vsi_num, status);
322 * @vsi: pointer to VSI being cleared
324 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
326 struct ice_pf *pf = vsi->back;
331 bitmap_free(vsi->af_xdp_zc_qps);
332 vsi->af_xdp_zc_qps = NULL;
334 devm_kfree(dev, vsi->q_vectors);
335 vsi->q_vectors = NULL;
336 devm_kfree(dev, vsi->tx_rings);
337 vsi->tx_rings = NULL;
338 devm_kfree(dev, vsi->rx_rings);
339 vsi->rx_rings = NULL;
340 devm_kfree(dev, vsi->txq_map);
341 vsi->txq_map = NULL;
342 devm_kfree(dev, vsi->rxq_map);
343 vsi->rxq_map = NULL;
348 * @vsi: VSI pointer
350 static void ice_vsi_free_stats(struct ice_vsi *vsi)
353 struct ice_pf *pf = vsi->back;
356 if (vsi->type == ICE_VSI_CHNL)
361 vsi_stat = pf->vsi_stats[vsi->idx];
365 ice_for_each_alloc_txq(vsi, i) {
372 ice_for_each_alloc_rxq(vsi, i) {
382 pf->vsi_stats[vsi->idx] = NULL;
387 * @vsi: VSI which is having stats allocated
389 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
394 struct ice_pf *pf = vsi->back;
397 vsi_stats = pf->vsi_stats[vsi->idx];
402 ice_for_each_alloc_txq(vsi, i) {
406 ring = vsi->tx_rings[i];
421 ice_for_each_alloc_rxq(vsi, i) {
425 ring = vsi->rx_rings[i];
442 ice_vsi_free_stats(vsi);
448 * @vsi: pointer to VSI being cleared
453 static void ice_vsi_free(struct ice_vsi *vsi)
458 if (!vsi || !vsi->back)
461 pf = vsi->back;
464 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
465 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
472 pf->vsi[vsi->idx] = NULL;
473 pf->next_vsi = vsi->idx;
475 ice_vsi_free_stats(vsi);
476 ice_vsi_free_arrays(vsi);
478 devm_kfree(dev, vsi);
481 void ice_vsi_delete(struct ice_vsi *vsi)
483 ice_vsi_delete_from_hw(vsi);
484 ice_vsi_free(vsi);
528 struct ice_pf *pf = q_vector->vsi->back;
543 * @vsi: VSI pointer
545 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
548 struct ice_pf *pf = vsi->back;
550 if (vsi->type == ICE_VSI_CHNL)
555 if (pf->vsi_stats[vsi->idx])
564 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
570 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
575 pf->vsi_stats[vsi->idx] = vsi_stat;
584 pf->vsi_stats[vsi->idx] = NULL;
590 * @vsi: ptr to VSI
594 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
596 if (vsi->type != ICE_VSI_CHNL) {
597 ice_vsi_set_num_qs(vsi);
598 if (ice_vsi_alloc_arrays(vsi))
602 switch (vsi->type) {
605 vsi->irq_handler = ice_eswitch_msix_clean_rings;
609 vsi->irq_handler = ice_msix_clean_rings;
613 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
619 vsi->num_rxq = ch->num_rxq;
620 vsi->num_txq = ch->num_txq;
621 vsi->next_base_q = ch->base_q;
627 ice_vsi_free_arrays(vsi);
647 struct ice_vsi *vsi = NULL;
661 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
662 if (!vsi)
665 vsi->back = pf;
666 set_bit(ICE_VSI_DOWN, vsi->state);
669 vsi->idx = pf->next_vsi;
670 pf->vsi[pf->next_vsi] = vsi;
673 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
678 return vsi;
683 * @vsi: pointer to the ice_vsi
689 static int ice_alloc_fd_res(struct ice_vsi *vsi)
691 struct ice_pf *pf = vsi->back;
701 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
702 vsi->type == ICE_VSI_CHNL))
723 if (vsi->type == ICE_VSI_PF) {
724 vsi->num_gfltr = g_val;
732 vsi->num_gfltr = ICE_PF_VSI_GFLTR;
736 vsi->num_bfltr = b_val;
737 } else if (vsi->type == ICE_VSI_VF) {
738 vsi->num_gfltr = 0;
741 vsi->num_bfltr = b_val;
764 vsi->num_gfltr = g_val / numtc;
767 vsi->num_bfltr = b_val;
775 * @vsi: the VSI to assign queues to
779 static int ice_vsi_get_qs(struct ice_vsi *vsi)
781 struct ice_pf *pf = vsi->back;
786 .q_count = vsi->alloc_txq,
788 .vsi_map = vsi->txq_map,
796 .q_count = vsi->alloc_rxq,
798 .vsi_map = vsi->rxq_map,
804 if (vsi->type == ICE_VSI_CHNL)
810 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
815 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
822 * @vsi: the VSI that is going to release queues
824 static void ice_vsi_put_qs(struct ice_vsi *vsi)
826 struct ice_pf *pf = vsi->back;
831 ice_for_each_alloc_txq(vsi, i) {
832 clear_bit(vsi->txq_map[i], pf->avail_txqs);
833 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
836 ice_for_each_alloc_rxq(vsi, i) {
837 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
838 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
868 * @vsi: the VSI being cleaned up
873 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
875 struct ice_pf *pf = vsi->back;
881 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
883 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
884 vsi->vsi_num, status);
889 * @vsi: the VSI being removed
891 static void ice_rss_clean(struct ice_vsi *vsi)
893 struct ice_pf *pf = vsi->back;
898 devm_kfree(dev, vsi->rss_hkey_user);
899 devm_kfree(dev, vsi->rss_lut_user);
901 ice_vsi_clean_rss_flow_fld(vsi);
904 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
909 * @vsi: the VSI being configured
911 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
914 struct ice_pf *pf = vsi->back;
918 vsi->rss_size = 1;
924 switch (vsi->type) {
928 vsi->rss_table_size = (u16)cap->rss_table_size;
929 if (vsi->type == ICE_VSI_CHNL)
930 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
932 vsi->rss_size = min_t(u16, num_online_cpus(),
934 vsi->rss_lut_type = ICE_LUT_PF;
937 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
938 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
939 vsi->rss_lut_type = ICE_LUT_VSI;
945 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
946 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
947 vsi->rss_lut_type = ICE_LUT_VSI;
953 ice_vsi_type_str(vsi->type));
1016 * @vsi: the VSI being configured
1019 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1023 u16 qcount_tx = vsi->alloc_txq;
1024 u16 qcount_rx = vsi->alloc_rxq;
1028 if (!vsi->tc_cfg.numtc) {
1030 vsi->tc_cfg.numtc = 1;
1031 vsi->tc_cfg.ena_tc = 1;
1034 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
1037 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
1056 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1058 vsi->tc_cfg.tc_info[i].qoffset = 0;
1059 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1060 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1061 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1067 vsi->tc_cfg.tc_info[i].qoffset = offset;
1068 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1069 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1070 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1090 if (rx_count > vsi->alloc_rxq) {
1091 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1092 rx_count, vsi->alloc_rxq);
1096 if (tx_count > vsi->alloc_txq) {
1097 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1098 tx_count, vsi->alloc_txq);
1102 vsi->num_txq = tx_count;
1103 vsi->num_rxq = rx_count;
1105 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1106 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1110 vsi->num_txq = vsi->num_rxq;
1119 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1120 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1128 * @vsi: the VSI being configured
1130 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1135 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1136 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1151 cpu_to_le16(vsi->num_gfltr);
1154 cpu_to_le16(vsi->num_bfltr);
1170 * @vsi: the VSI being configured
1172 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1178 pf = vsi->back;
1181 switch (vsi->type) {
1193 ice_vsi_type_str(vsi->type));
1198 vsi->rss_hfunc = hash_type;
1206 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1208 struct ice_pf *pf = vsi->back;
1213 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
1221 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1227 * @vsi: VSI to check whether or not VLAN pruning is enabled.
1231 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
1233 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1238 * @vsi: the VSI being configured
1247 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
1249 struct ice_pf *pf = vsi->back;
1260 switch (vsi->type) {
1273 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1283 if (vsi->type == ICE_VSI_CHNL) {
1297 ice_set_fd_vsi_ctx(ctxt, vsi);
1299 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1304 vsi->type != ICE_VSI_CTRL) {
1305 ice_set_rss_vsi_ctx(ctxt, vsi);
1314 ctxt->info.sw_id = vsi->port_info->sw_id;
1315 if (vsi->type == ICE_VSI_CHNL) {
1316 ice_chnl_vsi_setup_q_map(vsi, ctxt);
1318 ret = ice_vsi_setup_q_map(vsi, ctxt);
1332 if (vsi->type == ICE_VSI_PF) {
1339 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1346 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1355 vsi->info = ctxt->info;
1358 vsi->vsi_num = ctxt->vsi_num;
1367 * @vsi: the VSI having rings deallocated
1369 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1374 if (vsi->q_vectors) {
1375 ice_for_each_q_vector(vsi, i) {
1376 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1385 if (vsi->tx_rings) {
1386 ice_for_each_alloc_txq(vsi, i) {
1387 if (vsi->tx_rings[i]) {
1388 kfree_rcu(vsi->tx_rings[i], rcu);
1389 WRITE_ONCE(vsi->tx_rings[i], NULL);
1393 if (vsi->rx_rings) {
1394 ice_for_each_alloc_rxq(vsi, i) {
1395 if (vsi->rx_rings[i]) {
1396 kfree_rcu(vsi->rx_rings[i], rcu);
1397 WRITE_ONCE(vsi->rx_rings[i], NULL);
1405 * @vsi: VSI which is having rings allocated
1407 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1409 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1410 struct ice_pf *pf = vsi->back;
1416 ice_for_each_alloc_txq(vsi, i) {
1426 ring->reg_idx = vsi->txq_map[i];
1427 ring->vsi = vsi;
1430 ring->count = vsi->num_tx_desc;
1436 WRITE_ONCE(vsi->tx_rings[i], ring);
1440 ice_for_each_alloc_rxq(vsi, i) {
1449 ring->reg_idx = vsi->rxq_map[i];
1450 ring->vsi = vsi;
1451 ring->netdev = vsi->netdev;
1453 ring->count = vsi->num_rx_desc;
1455 WRITE_ONCE(vsi->rx_rings[i], ring);
1461 ice_vsi_clear_rings(vsi);
1467 * @vsi: the VSI being changed
1474 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1478 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1483 if (vsi->rss_lut_user)
1484 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1486 ice_fill_rss_lut(lut, vsi->rss_table_size,
1487 vsi->rss_size);
1490 ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1496 * @vsi: VSI to be configured
1499 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1503 ice_for_each_rxq(vsi, i)
1505 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1507 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1512 * @vsi: VSI to be configured
1514 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1516 struct ice_pf *pf = vsi->back;
1522 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1524 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1526 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1534 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1535 vsi->orig_rss_size <= vsi->num_rxq) {
1536 vsi->rss_size = vsi->orig_rss_size;
1538 vsi->orig_rss_size = 0;
1542 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1546 if (vsi->rss_lut_user)
1547 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1549 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1551 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1563 if (vsi->rss_hkey_user)
1564 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1568 err = ice_set_rss_key(vsi, key);
1580 * @vsi: VSI to be configured
1586 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1588 struct ice_pf *pf = vsi->back;
1594 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1595 vsi->vsi_num);
1599 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
1601 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1602 vsi->vsi_num, status);
1676 * @vsi: VSI to be configured
1685 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1687 u16 vsi_num = vsi->vsi_num;
1688 struct ice_pf *pf = vsi->back;
1696 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1703 status = ice_add_rss_cfg(hw, vsi, cfg);
1737 * @vsi: the VSI to be updated
1739 void ice_update_eth_stats(struct ice_vsi *vsi)
1742 struct ice_hw *hw = &vsi->back->hw;
1743 struct ice_pf *pf = vsi->back;
1744 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1746 prev_es = &vsi->eth_stats_prev;
1747 cur_es = &vsi->eth_stats;
1750 vsi->stat_offsets_loaded = false;
1752 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1755 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1758 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1761 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1764 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1767 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1770 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1773 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1776 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1779 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1782 vsi->stat_offsets_loaded = true;
1838 struct ice_hw *hw = &q_vector->vsi->back->hw;
1871 struct ice_hw *hw = &q_vector->vsi->back->hw;
1920 * @vsi: the VSI being configured
1925 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1927 struct ice_pf *pf = vsi->back;
1932 ice_for_each_q_vector(vsi, i) {
1933 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1950 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1956 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1965 * @vsi: the VSI whose rings are to be enabled
1969 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
1971 return ice_vsi_ctrl_all_rx_rings(vsi, true);
1976 * @vsi: the VSI whose rings are to be disabled
1980 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1982 return ice_vsi_ctrl_all_rx_rings(vsi, false);
1987 * @vsi: the VSI being configured
1994 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1999 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2009 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2010 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
2022 * @vsi: the VSI being configured
2027 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2030 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2035 * @vsi: the VSI being configured
2037 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2039 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2044 * @vsi: the VSI being configured
2048 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2050 struct ice_pf *pf = vsi->back;
2054 ice_for_each_rxq(vsi, i) {
2058 pf_q = vsi->rxq_map[i];
2067 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2069 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2070 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2071 vsi->tc_cfg.numtc = 1;
2076 ice_vsi_set_dcb_tc_cfg(vsi);
2081 * @vsi: the VSI being configured
2085 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2089 struct ice_pf *pf = vsi->back;
2097 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2101 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2104 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2112 vsi->vsi_num, status);
2117 * @vsi: pointer to the VSI
2122 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2124 struct device *dev = ice_pf_to_dev(vsi->back);
2130 struct ice_pf *pf = vsi->back;
2143 switch (vsi->type) {
2167 ice_vsi_type_str(vsi->type));
2205 (u8)vsi->tc_cfg.ena_tc);
2217 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2218 (u8)vsi->tc_cfg.ena_tc);
2221 vsi->idx, agg_id);
2231 vsi->agg_node = agg_node;
2233 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2234 vsi->agg_node->num_vsis);
2237 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
2245 if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2248 if (vsi->type == ICE_VSI_CHNL) {
2249 if (!vsi->alloc_txq && vsi->num_txq)
2250 max_txqs[i] = vsi->num_txq;
2254 max_txqs[i] = vsi->alloc_txq;
2257 if (vsi->type == ICE_VSI_PF)
2258 max_txqs[i] += vsi->num_xdp_txq;
2261 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2262 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2266 vsi->vsi_num, ret);
2275 * @vsi: pointer to VSI
2279 ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
2281 struct device *dev = ice_pf_to_dev(vsi->back);
2282 struct ice_pf *pf = vsi->back;
2285 vsi->vsw = pf->first_sw;
2287 ret = ice_vsi_alloc_def(vsi, params->ch);
2292 ret = ice_vsi_alloc_stat_arrays(vsi);
2296 ice_alloc_fd_res(vsi);
2298 ret = ice_vsi_get_qs(vsi);
2300 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2301 vsi->idx);
2306 ice_vsi_set_rss_params(vsi);
2309 ice_vsi_set_tc_cfg(vsi);
2312 ret = ice_vsi_init(vsi, params->flags);
2316 ice_vsi_init_vlan_ops(vsi);
2318 switch (vsi->type) {
2322 ret = ice_vsi_alloc_q_vectors(vsi);
2326 ret = ice_vsi_alloc_rings(vsi);
2330 ret = ice_vsi_alloc_ring_stats(vsi);
2334 ice_vsi_map_rings_to_vectors(vsi);
2337 ice_vsi_set_napi_queues(vsi);
2339 vsi->stat_offsets_loaded = false;
2341 if (ice_is_xdp_ena_vsi(vsi)) {
2342 ret = ice_vsi_determine_xdp_res(vsi);
2345 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
2351 if (vsi->type != ICE_VSI_CTRL)
2357 ice_vsi_cfg_rss_lut_key(vsi);
2358 ice_vsi_set_rss_flow_fld(vsi);
2360 ice_init_arfs(vsi);
2364 ice_vsi_cfg_rss_lut_key(vsi);
2365 ice_vsi_set_rss_flow_fld(vsi);
2374 ret = ice_vsi_alloc_q_vectors(vsi);
2378 ret = ice_vsi_alloc_rings(vsi);
2382 ret = ice_vsi_alloc_ring_stats(vsi);
2386 vsi->stat_offsets_loaded = false;
2393 ice_vsi_cfg_rss_lut_key(vsi);
2394 ice_vsi_set_vf_rss_flow_fld(vsi);
2398 ret = ice_vsi_alloc_rings(vsi);
2402 ret = ice_vsi_alloc_ring_stats(vsi);
2418 ice_vsi_free_q_vectors(vsi);
2420 ice_vsi_delete_from_hw(vsi);
2422 ice_vsi_put_qs(vsi);
2424 ice_vsi_free_stats(vsi);
2426 ice_vsi_free_arrays(vsi);
2432 * @vsi: pointer to VSI
2435 int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
2437 struct ice_pf *pf = vsi->back;
2443 vsi->type = params->type;
2444 vsi->port_info = params->pi;
2447 vsi->vf = params->vf;
2449 ret = ice_vsi_cfg_def(vsi, params);
2453 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2455 ice_vsi_decfg(vsi);
2457 if (vsi->type == ICE_VSI_CTRL) {
2458 if (vsi->vf) {
2459 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2460 vsi->vf->ctrl_vsi_idx = vsi->idx;
2463 pf->ctrl_vsi_idx = vsi->idx;
2472 * @vsi: pointer to VSI
2474 void ice_vsi_decfg(struct ice_vsi *vsi)
2476 struct ice_pf *pf = vsi->back;
2482 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
2484 ice_cfg_sw_lldp(vsi, false, false);
2486 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2487 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2490 vsi->vsi_num, err);
2492 if (ice_is_xdp_ena_vsi(vsi))
2496 ice_destroy_xdp_rings(vsi);
2498 ice_vsi_clear_rings(vsi);
2499 ice_vsi_free_q_vectors(vsi);
2500 ice_vsi_put_qs(vsi);
2501 ice_vsi_free_arrays(vsi);
2509 if (vsi->type == ICE_VSI_VF &&
2510 vsi->agg_node && vsi->agg_node->valid)
2511 vsi->agg_node->num_vsis--;
2528 struct ice_vsi *vsi;
2538 vsi = ice_vsi_alloc(pf);
2539 if (!vsi) {
2544 ret = ice_vsi_cfg(vsi, params);
2557 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2558 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2560 ice_cfg_sw_lldp(vsi, true, true);
2563 if (!vsi->agg_node)
2564 ice_set_agg_vsi(vsi);
2566 return vsi;
2569 ice_vsi_free(vsi);
2576 * @vsi: the VSI being cleaned up
2578 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2580 struct ice_pf *pf = vsi->back;
2586 ice_for_each_q_vector(vsi, i) {
2587 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2592 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2593 if (ice_is_xdp_ena_vsi(vsi)) {
2594 u32 xdp_txq = txq + vsi->num_xdp_txq;
2596 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2603 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2613 * @vsi: the VSI being configured
2615 void ice_vsi_free_irq(struct ice_vsi *vsi)
2617 struct ice_pf *pf = vsi->back;
2620 if (!vsi->q_vectors || !vsi->irqs_ready)
2623 ice_vsi_release_msix(vsi);
2624 if (vsi->type == ICE_VSI_VF)
2627 vsi->irqs_ready = false;
2628 ice_free_cpu_rx_rmap(vsi);
2630 ice_for_each_q_vector(vsi, i) {
2633 irq_num = vsi->q_vectors[i]->irq.virq;
2636 if (!vsi->q_vectors[i] ||
2637 !(vsi->q_vectors[i]->num_ring_tx ||
2638 vsi->q_vectors[i]->num_ring_rx))
2648 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2654 * @vsi: the VSI having resources freed
2656 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2660 if (!vsi->tx_rings)
2663 ice_for_each_txq(vsi, i)
2664 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2665 ice_free_tx_ring(vsi->tx_rings[i]);
2670 * @vsi: the VSI having resources freed
2672 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2676 if (!vsi->rx_rings)
2679 ice_for_each_rxq(vsi, i)
2680 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2681 ice_free_rx_ring(vsi->rx_rings[i]);
2686 * @vsi: the VSI being shut down
2688 void ice_vsi_close(struct ice_vsi *vsi)
2690 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2691 ice_down(vsi);
2693 ice_vsi_free_irq(vsi);
2694 ice_vsi_free_tx_rings(vsi);
2695 ice_vsi_free_rx_rings(vsi);
2700 * @vsi: the VSI being resume
2703 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2707 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2710 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2712 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2713 if (netif_running(vsi->netdev)) {
2717 err = ice_open_internal(vsi->netdev);
2722 } else if (vsi->type == ICE_VSI_CTRL) {
2723 err = ice_vsi_open_ctrl(vsi);
2731 * @vsi: the VSI being paused
2734 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2736 if (test_bit(ICE_VSI_DOWN, vsi->state))
2739 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2741 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2742 if (netif_running(vsi->netdev)) {
2746 ice_vsi_close(vsi);
2751 ice_vsi_close(vsi);
2753 } else if (vsi->type == ICE_VSI_CTRL ||
2754 vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
2755 ice_vsi_close(vsi);
2783 * @vsi: VSI being configured
2792 ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
2795 struct ice_pf *pf = vsi->back;
2797 if (!vsi->netdev)
2804 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
2807 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
2825 __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
2830 __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
2849 ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
2853 ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
2861 * @vsi: VSI pointer
2865 void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
2869 if (!vsi->netdev)
2872 ice_for_each_q_vector(vsi, i)
2873 ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
2878 * @vsi: the VSI being removed
2882 int ice_vsi_release(struct ice_vsi *vsi)
2886 if (!vsi->back)
2888 pf = vsi->back;
2891 ice_rss_clean(vsi);
2893 ice_vsi_close(vsi);
2894 ice_vsi_decfg(vsi);
2901 ice_vsi_delete(vsi);
2908 * @vsi: VSI connected with q_vectors
2914 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2919 ice_for_each_q_vector(vsi, i) {
2920 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2926 if (i < vsi->num_txq)
2928 if (i < vsi->num_rxq)
2932 return vsi->num_q_vectors;
2937 * @vsi: VSI connected with q_vectors
2946 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
2952 if ((size && !coalesce) || !vsi)
2961 for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
2977 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
2978 rc = &vsi->q_vectors[i]->rx;
2981 } else if (i < vsi->alloc_rxq) {
2982 rc = &vsi->q_vectors[i]->rx;
2987 if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
2988 rc = &vsi->q_vectors[i]->tx;
2991 } else if (i < vsi->alloc_txq) {
2992 rc = &vsi->q_vectors[i]->tx;
2997 vsi->q_vectors[i]->intrl = coalesce[i].intrl;
2998 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3004 for (; i < vsi->num_q_vectors; i++) {
3006 rc = &vsi->q_vectors[i]->tx;
3011 rc = &vsi->q_vectors[i]->rx;
3015 vsi->q_vectors[i]->intrl = coalesce[0].intrl;
3016 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3022 * @vsi: VSI pointer
3025 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
3027 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
3028 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
3032 struct ice_pf *pf = vsi->back;
3033 u16 prev_txq = vsi->alloc_txq;
3034 u16 prev_rxq = vsi->alloc_rxq;
3037 vsi_stat = pf->vsi_stats[vsi->idx];
3082 * @vsi: VSI to be rebuild
3090 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3098 if (!vsi)
3101 params = ice_vsi_to_params(vsi);
3104 pf = vsi->back;
3105 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3108 ret = ice_vsi_realloc_stat_arrays(vsi);
3112 ice_vsi_decfg(vsi);
3113 ret = ice_vsi_cfg_def(vsi, &params);
3117 coalesce = kcalloc(vsi->num_q_vectors,
3122 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3124 ret = ice_vsi_cfg_tc_lan(pf, vsi);
3135 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3141 ice_vsi_decfg(vsi);
3189 * @vsi: VSI being configured
3192 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3194 vsi->info.mapping_flags = ctx->info.mapping_flags;
3195 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3196 sizeof(vsi->info.q_mapping));
3197 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3198 sizeof(vsi->info.tc_mapping));
3203 * @vsi: the VSI being configured
3206 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3208 struct net_device *netdev = vsi->netdev;
3209 struct ice_pf *pf = vsi->back;
3210 int numtc = vsi->tc_cfg.numtc;
3219 if (vsi->type == ICE_VSI_CHNL)
3227 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3228 numtc = vsi->all_numtc;
3236 if (vsi->tc_cfg.ena_tc & BIT(i))
3238 vsi->tc_cfg.tc_info[i].netdev_tc,
3239 vsi->tc_cfg.tc_info[i].qcount_tx,
3240 vsi->tc_cfg.tc_info[i].qoffset);
3243 if (!(vsi->all_enatc & BIT(i)))
3245 if (!vsi->mqprio_qopt.qopt.count[i])
3248 vsi->mqprio_qopt.qopt.count[i],
3249 vsi->mqprio_qopt.qopt.offset[i]);
3259 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3266 * @vsi: the VSI being configured,
3273 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3277 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3278 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3283 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3290 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3292 vsi->tc_cfg.tc_info[i].qoffset = 0;
3293 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3294 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3295 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3300 offset = vsi->mqprio_qopt.qopt.offset[i];
3301 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3302 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3303 vsi->tc_cfg.tc_info[i].qoffset = offset;
3304 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3305 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3306 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3309 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3311 if (!(vsi->all_enatc & BIT(i)))
3313 offset = vsi->mqprio_qopt.qopt.offset[i];
3314 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3315 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3320 if (new_txq > vsi->alloc_txq) {
3321 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3322 new_txq, vsi->alloc_txq);
3327 if (new_rxq > vsi->alloc_rxq) {
3328 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3329 new_rxq, vsi->alloc_rxq);
3334 vsi->num_txq = new_txq;
3335 vsi->num_rxq = new_rxq;
3339 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3345 if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3346 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3347 vsi->next_base_q = tc0_qcount;
3349 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
3350 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
3351 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3352 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3359 * @vsi: VSI to be configured
3364 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3367 struct ice_pf *pf = vsi->back;
3375 if (vsi->tc_cfg.ena_tc == ena_tc &&
3376 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3384 max_txqs[i] = vsi->alloc_txq;
3388 if (vsi->type == ICE_VSI_CHNL &&
3390 max_txqs[i] = vsi->num_txq;
3393 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3394 vsi->tc_cfg.ena_tc = ena_tc;
3395 vsi->tc_cfg.numtc = num_tc;
3402 ctx->info = vsi->info;
3404 if (vsi->type == ICE_VSI_PF &&
3406 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3408 ret = ice_vsi_setup_q_map(vsi, ctx);
3411 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3417 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3423 if (vsi->type == ICE_VSI_PF &&
3425 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3427 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3428 vsi->tc_cfg.ena_tc, max_txqs);
3432 vsi->vsi_num, ret);
3435 ice_vsi_update_q_map(vsi, ctx);
3436 vsi->info.valid_sections = 0;
3438 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3500 * @vsi: VSI to compare against default forwarding VSI
3505 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3507 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3512 * @vsi: VSI getting set as the default forwarding VSI on the switch
3520 int ice_set_dflt_vsi(struct ice_vsi *vsi)
3525 if (!vsi)
3528 dev = ice_pf_to_dev(vsi->back);
3530 if (ice_lag_is_switchdev_running(vsi->back)) {
3532 vsi->vsi_num);
3537 if (ice_is_vsi_dflt_vsi(vsi)) {
3539 vsi->vsi_num);
3543 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3546 vsi->vsi_num, status);
3555 * @vsi: VSI to remove from filter list
3561 int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3566 if (!vsi)
3569 dev = ice_pf_to_dev(vsi->back);
3572 if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3575 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3579 vsi->vsi_num, status);
3588 * @vsi: the VSI whose link speed is being queried
3592 int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3596 link_speed = vsi->port_info->phy.link_info.link_speed;
3603 * @vsi: the VSI whose link speed is being queried
3607 int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3611 speed_mbps = ice_get_link_speed_mbps(vsi);
3618 * @vsi: VSI to be configured
3625 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3627 struct ice_pf *pf = vsi->back;
3633 if (!vsi->port_info) {
3635 vsi->idx, vsi->type);
3639 speed = ice_get_link_speed_kbps(vsi);
3642 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3649 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3653 min_tx_rate, ice_vsi_type_str(vsi->type),
3654 vsi->idx);
3659 min_tx_rate, ice_vsi_type_str(vsi->type));
3661 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3662 vsi->idx, 0,
3666 ice_vsi_type_str(vsi->type), vsi->idx);
3671 ice_vsi_type_str(vsi->type), vsi->idx);
3679 * @vsi: VSI to be configured
3686 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3688 struct ice_pf *pf = vsi->back;
3694 if (!vsi->port_info) {
3696 vsi->idx, vsi->type);
3700 speed = ice_get_link_speed_kbps(vsi);
3703 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3710 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3714 max_tx_rate, ice_vsi_type_str(vsi->type),
3715 vsi->idx);
3720 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3722 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3723 vsi->idx, 0,
3727 ice_vsi_type_str(vsi->type), vsi->idx);
3732 ice_vsi_type_str(vsi->type), vsi->idx);
3740 * @vsi: VSI to modify physical link on
3743 int ice_set_link(struct ice_vsi *vsi, bool ena)
3745 struct device *dev = ice_pf_to_dev(vsi->back);
3746 struct ice_port_info *pi = vsi->port_info;
3750 if (vsi->type != ICE_VSI_PF)
3777 * @vsi: VSI used to add VLAN filters
3792 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3794 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3799 err = vlan_ops->add_vlan(vsi, &vlan);
3804 if (!ice_is_dvm_ena(&vsi->back->hw))
3808 err = vlan_ops->add_vlan(vsi, &vlan);
3817 * @vsi: VSI used to add VLAN filters
3822 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3824 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3829 err = vlan_ops->del_vlan(vsi, &vlan);
3834 if (!ice_is_dvm_ena(&vsi->back->hw))
3838 err = vlan_ops->del_vlan(vsi, &vlan);
3845 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3851 * @vsi: VSI used to get the VLAN mode
3856 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3861 if (vsi->type == ICE_VSI_VF) {
3862 if (WARN_ON(!vsi->vf))
3865 if (ice_vf_is_port_vlan_ena(vsi->vf))
3869 if (ice_is_dvm_ena(&vsi->back->hw))
3877 * @vsi: VSI used to determine if any non-zero VLANs have been added
3879 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3881 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3886 * @vsi: VSI used to get the number of non-zero VLANs added
3888 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3890 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
3969 * @vsi: pointer to VSI structure
3973 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
3977 ctx.info = vsi->info;
3981 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3984 vsi->info = ctx.info;
4030 * @vsi: pointer to VSI structure
4034 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
4037 .info = vsi->info,
4046 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4049 vsi->info = ctx.info;