Lines Matching refs:pf

16 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
18 while (ice_is_reset_in_progress(pf->state))
35 struct ice_pf *pf;
37 pf = ice_netdev_to_pf(netdev);
38 dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
64 struct ice_pf *pf = ice_netdev_to_pf(netdev);
69 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
70 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
73 if (pf->lag && pf->lag->bonded) {
78 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
80 mutex_lock(&pf->tc_mutex);
98 if (ice_dcb_bwchk(pf, new_cfg)) {
103 new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
111 err = ice_pf_dcb_cfg(pf, new_cfg, true);
119 mutex_unlock(&pf->tc_mutex);
134 struct ice_pf *pf = ice_netdev_to_pf(dev);
136 if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
139 *num = pf->hw.func_caps.common_cap.maxtc;
149 struct ice_pf *pf = ice_netdev_to_pf(netdev);
151 return pf->dcbx_cap;
161 struct ice_pf *pf = ice_netdev_to_pf(netdev);
165 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
175 if (mode == pf->dcbx_cap)
178 if (pf->lag && pf->lag->bonded) {
183 qos_cfg = &pf->hw.port_info->qos_cfg;
189 pf->dcbx_cap = mode;
196 dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
207 struct ice_pf *pf = ice_netdev_to_pf(netdev);
208 struct ice_port_info *pi = pf->hw.port_info;
240 struct ice_pf *pf = ice_netdev_to_pf(netdev);
241 struct ice_port_info *pi = pf->hw.port_info;
249 ice_get_pfc_delay(&pf->hw, &pfc->delay);
252 pfc->requests[i] = pf->stats.priority_xoff_tx[i];
253 pfc->indications[i] = pf->stats.priority_xoff_rx[i];
266 struct ice_pf *pf = ice_netdev_to_pf(netdev);
270 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
271 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
274 if (pf->lag && pf->lag->bonded) {
279 mutex_lock(&pf->tc_mutex);
281 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
286 new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
290 err = ice_pf_dcb_cfg(pf, new_cfg, true);
295 mutex_unlock(&pf->tc_mutex);
308 struct ice_pf *pf = ice_netdev_to_pf(netdev);
309 struct ice_port_info *pi = pf->hw.port_info;
311 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
312 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
319 dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
331 struct ice_pf *pf = ice_netdev_to_pf(netdev);
334 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
335 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
341 if (pf->lag && pf->lag->bonded) {
346 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
348 new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
354 dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
364 struct ice_pf *pf = ice_netdev_to_pf(netdev);
365 struct ice_port_info *pi = pf->hw.port_info;
380 struct ice_pf *pf = ice_netdev_to_pf(netdev);
383 state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
385 dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
396 struct ice_pf *pf = ice_netdev_to_pf(netdev);
398 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
399 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
402 if (pf->lag && pf->lag->bonded) {
408 if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
412 set_bit(ICE_FLAG_DCB_ENA, pf->flags);
413 memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
414 &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
417 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
438 struct ice_pf *pf = ice_netdev_to_pf(netdev);
439 struct ice_port_info *pi = pf->hw.port_info;
441 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
442 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
449 dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
468 struct ice_pf *pf = ice_netdev_to_pf(netdev);
472 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
473 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
479 if (pf->lag && pf->lag->bonded) {
484 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
504 struct ice_pf *pf = ice_netdev_to_pf(netdev);
505 struct ice_port_info *pi = pf->hw.port_info;
507 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
508 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
515 dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
528 struct ice_pf *pf = ice_netdev_to_pf(netdev);
531 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
532 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
538 if (pf->lag && pf->lag->bonded) {
543 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
563 struct ice_pf *pf = ice_netdev_to_pf(netdev);
564 struct ice_port_info *pi = pf->hw.port_info;
566 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
567 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
595 struct ice_pf *pf = ice_netdev_to_pf(netdev);
597 dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
610 struct ice_pf *pf = ice_netdev_to_pf(netdev);
612 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
613 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
631 struct ice_pf *pf = ice_netdev_to_pf(netdev);
633 dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
644 struct ice_pf *pf = ice_netdev_to_pf(netdev);
646 if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
672 *cap = pf->dcbx_cap;
679 dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
692 struct ice_pf *pf = ice_netdev_to_pf(netdev);
698 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
699 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
735 struct ice_pf *pf = ice_netdev_to_pf(netdev);
746 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
751 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
754 if (!ice_is_feature_supported(pf, ICE_F_DSCP))
763 if (pf->lag && pf->lag->bonded) {
768 max_tc = pf->hw.func_caps.common_cap.maxtc;
776 mutex_lock(&pf->tc_mutex);
778 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
779 old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
804 ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
848 ret = ice_pf_dcb_cfg(pf, new_cfg, true);
856 mutex_unlock(&pf->tc_mutex);
869 struct ice_pf *pf = ice_netdev_to_pf(netdev);
874 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
879 if (pf->lag && pf->lag->bonded) {
884 mutex_lock(&pf->tc_mutex);
885 old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
891 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
920 !ice_is_feature_supported(pf, ICE_F_DSCP)) {
936 ret = ice_aq_set_pfc_mode(&pf->hw,
948 ret = ice_dcb_sw_dflt_cfg(pf, true, true);
950 ret = ice_pf_dcb_cfg(pf, new_cfg, true);
967 mutex_unlock(&pf->tc_mutex);
977 struct ice_pf *pf = ice_netdev_to_pf(netdev);
981 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
982 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
985 if (pf->lag && pf->lag->bonded) {
990 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
992 mutex_lock(&pf->tc_mutex);
994 err = ice_pf_dcb_cfg(pf, new_cfg, true);
996 mutex_unlock(&pf->tc_mutex);
1044 struct ice_pf *pf;
1050 pf = ice_netdev_to_pf(netdev);
1051 pi = pf->hw.port_info;
1054 if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
1058 if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1105 * @pf: the corresponding PF
1113 ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
1116 struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
1138 struct ice_pf *pf;
1140 pf = ice_netdev_to_pf(netdev);
1141 if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))