Lines Matching refs:pf

19  * @pf: pointer to the PF structure
24 static void ice_free_vf_entries(struct ice_pf *pf)
26 struct ice_vfs *vfs = &pf->vfs;
49 struct ice_pf *pf = vf->pf;
75 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
76 ice_flush(&pf->hw);
89 struct ice_pf *pf = vf->pf;
95 hw = &pf->hw;
100 dev = ice_pf_to_dev(pf);
127 * @pf: pointer to the PF structure
129 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
130 * the pf->sriov_base_vector.
134 static int ice_sriov_free_msix_res(struct ice_pf *pf)
136 if (!pf)
139 bitmap_free(pf->sriov_irq_bm);
140 pf->sriov_irq_size = 0;
141 pf->sriov_base_vector = 0;
148 * @pf: pointer to the PF structure
150 void ice_free_vfs(struct ice_pf *pf)
152 struct device *dev = ice_pf_to_dev(pf);
153 struct ice_vfs *vfs = &pf->vfs;
154 struct ice_hw *hw = &pf->hw;
158 if (!ice_has_vfs(pf))
161 while (test_and_set_bit(ICE_VF_DIS, pf->state))
168 if (!pci_vfs_assigned(pf->pdev))
169 pci_disable_sriov(pf->pdev);
173 ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
177 ice_for_each_vf(pf, bkt, vf) {
180 ice_eswitch_detach(pf, vf);
190 if (!pci_vfs_assigned(pf->pdev)) {
204 if (ice_sriov_free_msix_res(pf))
208 ice_free_vf_entries(pf);
212 clear_bit(ICE_VF_DIS, pf->state);
213 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
226 struct ice_pf *pf = vf->pf;
234 vsi = ice_vsi_setup(pf, &params);
237 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
260 struct ice_pf *pf = vf->pf;
265 hw = &pf->hw;
270 pf->hw.func_caps.common_cap.msix_vector_first_id;
304 struct device *dev = ice_pf_to_dev(vf->pf);
306 struct ice_hw *hw = &vf->pf->hw;
376 * @pf: pointer to PF structure
381 * just set the pf->sriov_base_vector and return success.
389 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
391 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
392 int vectors_used = ice_get_max_used_msix_vector(pf);
403 pf->sriov_base_vector = sriov_base_vector;
410 * @pf: pointer to the PF structure
430 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
432 int vectors_used = ice_get_max_used_msix_vector(pf);
435 struct device *dev = ice_pf_to_dev(pf);
438 lockdep_assert_held(&pf->vfs.table_lock);
444 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
464 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
472 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
484 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
492 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
493 pf->vfs.num_msix_per = num_msix_per_vf;
495 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
502 * @pf: pointer to PF structure
518 static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
520 int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
521 pf->sriov_irq_size, 0, needed, 0);
523 int index = pf->sriov_irq_size - res - needed;
525 if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
528 bitmap_set(pf->sriov_irq_bm, res, needed);
534 * @pf: pointer to PF structure
537 static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
540 int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
542 bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
555 struct ice_pf *pf = vf->pf;
559 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
580 * @pf: PF the VFs are associated with
582 static int ice_start_vfs(struct ice_pf *pf)
584 struct ice_hw *hw = &pf->hw;
589 lockdep_assert_held(&pf->vfs.table_lock);
592 ice_for_each_vf(pf, bkt, vf) {
597 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
602 retval = ice_eswitch_attach(pf, vf);
604 dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
620 ice_for_each_vf(pf, bkt, vf) {
652 struct ice_hw *hw = &vf->pf->hw;
667 struct ice_pf *pf = vf->pf;
669 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
670 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
682 struct ice_pf *pf = vf->pf;
688 dev = ice_pf_to_dev(pf);
689 hw = &pf->hw;
729 struct ice_pf *pf = vf->pf;
738 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
754 struct ice_hw *hw = &vf->pf->hw;
770 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
787 * @pf: pointer to the PF structure
798 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
800 struct pci_dev *pdev = pf->pdev;
801 struct ice_vfs *vfs = &pf->vfs;
820 vf->pf = pf;
832 vf->vf_sw_id = pf->first_sw;
837 vf->num_msix = pf->vfs.num_msix_per;
838 vf->num_vf_qs = pf->vfs.num_qps_per;
853 ice_free_vf_entries(pf);
859 * @pf: pointer to the PF structure
862 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
864 int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
865 struct device *dev = ice_pf_to_dev(pf);
866 struct ice_hw *hw = &pf->hw;
869 pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
870 if (!pf->sriov_irq_bm)
872 pf->sriov_irq_size = total_vectors;
875 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
877 set_bit(ICE_OICR_INTR_DIS, pf->state);
880 ret = pci_enable_sriov(pf->pdev, num_vfs);
884 mutex_lock(&pf->vfs.table_lock);
886 ret = ice_set_per_vf_res(pf, num_vfs);
893 ret = ice_create_vf_entries(pf, num_vfs);
900 ice_eswitch_reserve_cp_queues(pf, num_vfs);
901 ret = ice_start_vfs(pf);
908 clear_bit(ICE_VF_DIS, pf->state);
911 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
914 mutex_unlock(&pf->vfs.table_lock);
919 ice_free_vf_entries(pf);
921 mutex_unlock(&pf->vfs.table_lock);
922 pci_disable_sriov(pf->pdev);
926 clear_bit(ICE_OICR_INTR_DIS, pf->state);
927 bitmap_free(pf->sriov_irq_bm);
933 * @pf: pointer to the PF structure
938 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
940 struct device *dev = ice_pf_to_dev(pf);
944 ice_free_vfs(pf);
948 if (num_vfs > pf->vfs.num_supported) {
950 num_vfs, pf->vfs.num_supported);
955 err = ice_ena_vfs(pf, num_vfs);
961 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
967 * @pf: PF to enabled SR-IOV on
969 static int ice_check_sriov_allowed(struct ice_pf *pf)
971 struct device *dev = ice_pf_to_dev(pf);
973 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
978 if (ice_is_safe_mode(pf)) {
983 if (!ice_pf_state_is_nominal(pf)) {
999 struct ice_pf *pf = pci_get_drvdata(pdev);
1001 return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
1004 static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
1006 if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
1009 pf->sriov_base_vector -= move;
1013 static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
1022 ice_for_each_vf(pf, bkt, tmp_vf) {
1029 ice_sriov_free_irqs(pf, tmp_vf);
1036 tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
1041 ice_sriov_get_irqs(pf, tmp_vf->num_msix);
1067 struct ice_pf *pf = pci_get_drvdata(pdev);
1074 if (!ice_get_num_vfs(pf))
1084 if (queues > min(ice_get_avail_txq_count(pf),
1085 ice_get_avail_rxq_count(pf)))
1100 vf = ice_get_vf_by_id(pf, id);
1112 if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
1118 ice_sriov_free_irqs(pf, vf);
1121 ice_sriov_remap_vectors(pf, vf->vf_id);
1125 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
1135 dev_info(ice_pf_to_dev(pf),
1145 dev_info(ice_pf_to_dev(pf),
1151 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
1177 struct ice_pf *pf = pci_get_drvdata(pdev);
1178 struct device *dev = ice_pf_to_dev(pf);
1181 err = ice_check_sriov_allowed(pf);
1187 ice_free_vfs(pf);
1195 err = ice_pci_sriov_ena(pf, num_vfs);
1204 * @pf: pointer to the PF structure
1209 void ice_process_vflr_event(struct ice_pf *pf)
1211 struct ice_hw *hw = &pf->hw;
1216 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1217 !ice_has_vfs(pf))
1220 mutex_lock(&pf->vfs.table_lock);
1221 ice_for_each_vf(pf, bkt, vf) {
1232 mutex_unlock(&pf->vfs.table_lock);
1237 * @pf: PF used to index all VFs
1247 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1253 ice_for_each_vf_rcu(pf, bkt, vf) {
1280 * @pf: PF used for conversion
1283 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1285 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1290 * @pf: PF that the LAN overflow event happened on
1298 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1304 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1309 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1328 struct ice_pf *pf = np->vsi->back;
1334 dev = ice_pf_to_dev(pf);
1336 vf = ice_get_vf_by_id(pf, vf_id);
1388 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1392 vf = ice_get_vf_by_id(pf, vf_id);
1435 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1444 vf = ice_get_vf_by_id(pf, vf_id);
1496 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1500 vf = ice_get_vf_by_id(pf, vf_id);
1504 if (ice_is_eswitch_mode_switchdev(pf)) {
1505 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1523 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1543 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1547 vf = ice_get_vf_by_id(pf, vf_id);
1581 * @pf: PF associated with VFs
1583 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1590 ice_for_each_vf_rcu(pf, bkt, vf)
1620 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1626 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1647 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1653 dev = ice_pf_to_dev(pf);
1655 vf = ice_get_vf_by_id(pf, vf_id);
1669 if (min_tx_rate && ice_is_dcb_active(pf)) {
1716 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1722 vf = ice_get_vf_by_id(pf, vf_id);
1798 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1804 dev = ice_pf_to_dev(pf);
1812 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1818 vf = ice_get_vf_by_id(pf, vf_id);
1859 struct ice_pf *pf = vf->pf;
1862 dev = ice_pf_to_dev(pf);
1865 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1867 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1873 * @pf: pointer to the PF structure
1877 void ice_print_vfs_mdd_events(struct ice_pf *pf)
1879 struct device *dev = ice_pf_to_dev(pf);
1880 struct ice_hw *hw = &pf->hw;
1885 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1889 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1892 pf->vfs.last_printed_mdd_jiffies = jiffies;
1894 mutex_lock(&pf->vfs.table_lock);
1895 ice_for_each_vf(pf, bkt, vf) {
1913 mutex_unlock(&pf->vfs.table_lock);
1918 * @pf: pointer to the PF structure
1923 void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
1928 ice_for_each_vf(pf, bkt, vf)