Lines Matching refs:bp

27 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
34 rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
48 rc = hwrm_req_send(bp, req);
51 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
56 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
58 if (!bp->pf.active_vfs) {
59 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
62 if (vf_id >= bp->pf.active_vfs) {
63 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
71 struct bnxt *bp = netdev_priv(dev);
78 if (bp->hwrm_spec_code < 0x10701)
81 rc = bnxt_vf_ndo_prep(bp, vf_id);
85 vf = &bp->pf.vf[vf_id];
98 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
102 rc = hwrm_req_send(bp, req);
113 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
119 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
123 req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
124 resp = hwrm_req_hold(bp, req);
125 rc = hwrm_req_send(bp, req);
128 hwrm_req_drop(bp, req);
132 bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
134 if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
137 bnxt_hwrm_func_qcfg_flags(bp, vf);
141 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
146 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
149 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
158 return hwrm_req_send(bp, req);
163 struct bnxt *bp = netdev_priv(dev);
166 if (bnxt_vf_ndo_prep(bp, vf_id))
169 vf = &bp->pf.vf[vf_id];
175 bnxt_hwrm_set_trusted_vf(bp, vf);
182 struct bnxt *bp = netdev_priv(dev);
186 rc = bnxt_vf_ndo_prep(bp, vf_id);
191 vf = &bp->pf.vf[vf_id];
205 ivi->trusted = bnxt_is_trusted_vf(bp, vf);
218 struct bnxt *bp = netdev_priv(dev);
223 rc = bnxt_vf_ndo_prep(bp, vf_id);
233 vf = &bp->pf.vf[vf_id];
235 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
244 return hwrm_req_send(bp, req);
250 struct bnxt *bp = netdev_priv(dev);
256 if (bp->hwrm_spec_code < 0x10201)
262 rc = bnxt_vf_ndo_prep(bp, vf_id);
272 vf = &bp->pf.vf[vf_id];
277 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
282 rc = hwrm_req_send(bp, req);
292 struct bnxt *bp = netdev_priv(dev);
298 rc = bnxt_vf_ndo_prep(bp, vf_id);
302 vf = &bp->pf.vf[vf_id];
303 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
305 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
311 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
317 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
324 rc = hwrm_req_send(bp, req);
335 struct bnxt *bp = netdev_priv(dev);
339 rc = bnxt_vf_ndo_prep(bp, vf_id);
343 vf = &bp->pf.vf[vf_id];
357 netdev_err(bp->dev, "Invalid link option\n");
362 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
367 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
373 vf = &bp->pf.vf[i];
379 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
382 struct bnxt_pf_info *pf = &bp->pf;
385 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
389 hwrm_req_hold(bp, req);
392 rc = hwrm_req_send(bp, req);
396 hwrm_req_drop(bp, req);
400 static void bnxt_free_vf_resources(struct bnxt *bp)
402 struct pci_dev *pdev = bp->pdev;
405 kfree(bp->pf.vf_event_bmap);
406 bp->pf.vf_event_bmap = NULL;
409 if (bp->pf.hwrm_cmd_req_addr[i]) {
411 bp->pf.hwrm_cmd_req_addr[i],
412 bp->pf.hwrm_cmd_req_dma_addr[i]);
413 bp->pf.hwrm_cmd_req_addr[i] = NULL;
417 bp->pf.active_vfs = 0;
418 kfree(bp->pf.vf);
419 bp->pf.vf = NULL;
422 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
424 struct pci_dev *pdev = bp->pdev;
427 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
428 if (!bp->pf.vf)
431 bnxt_set_vf_attr(bp, num_vfs);
439 bp->pf.hwrm_cmd_req_addr[i] =
441 &bp->pf.hwrm_cmd_req_dma_addr[i],
444 if (!bp->pf.hwrm_cmd_req_addr[i])
448 struct bnxt_vf_info *vf = &bp->pf.vf[k];
450 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
453 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
460 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
461 if (!bp->pf.vf_event_bmap)
464 bp->pf.hwrm_cmd_req_pages = nr_pages;
468 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
473 rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
477 req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
480 req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
481 req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
482 req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
483 req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
485 return hwrm_req_send(bp, req);
488 static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
494 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
498 vf = &bp->pf.vf[vf_id];
518 return hwrm_req_send(bp, req);
524 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
527 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
530 struct bnxt_pf_info *pf = &bp->pf;
535 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
539 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
540 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
543 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
545 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
546 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
547 if (bp->flags & BNXT_FLAG_AGG_RINGS)
548 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
550 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
551 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
552 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
553 vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
568 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
574 if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
605 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
608 hwrm_req_hold(bp, req);
611 __bnxt_set_vf_params(bp, i);
614 rc = hwrm_req_send(bp, req);
633 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
638 hwrm_req_drop(bp, req);
645 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
648 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
649 struct bnxt_pf_info *pf = &bp->pf;
656 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
661 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
662 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
663 if (bp->flags & BNXT_FLAG_AGG_RINGS)
664 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
667 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
669 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
670 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
671 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
685 mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
700 hwrm_req_hold(bp, req);
705 rc = hwrm_req_send(bp, req);
710 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
716 hwrm_req_drop(bp, req);
730 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
732 if (BNXT_NEW_RM(bp))
733 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
735 return bnxt_hwrm_func_cfg(bp, num_vfs);
738 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
743 rc = bnxt_hwrm_func_buf_rgtr(bp);
748 rc = bnxt_func_cfg(bp, *num_vfs, reset);
751 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
755 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
763 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
767 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
777 avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
778 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
786 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
787 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
791 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
795 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
799 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
803 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
814 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
819 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
824 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
828 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
832 rc = pci_enable_sriov(bp->pdev, *num_vfs);
836 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
840 devl_lock(bp->dl);
841 rc = bnxt_vf_reps_create(bp);
842 devl_unlock(bp->dl);
844 netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
852 pci_disable_sriov(bp->pdev);
856 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
859 bnxt_hwrm_func_qcaps(bp);
862 bnxt_free_vf_resources(bp);
867 void bnxt_sriov_disable(struct bnxt *bp)
869 u16 num_vfs = pci_num_vf(bp->pdev);
875 devl_lock(bp->dl);
876 bnxt_vf_reps_destroy(bp);
878 if (pci_vfs_assigned(bp->pdev)) {
880 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
881 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
884 pci_disable_sriov(bp->pdev);
886 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
888 devl_unlock(bp->dl);
890 bnxt_free_vf_resources(bp);
894 bnxt_restore_pf_fw_resources(bp);
901 struct bnxt *bp = netdev_priv(dev);
903 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
914 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
919 bp->sriov_cfg = true;
922 if (pci_vfs_assigned(bp->pdev)) {
929 if (num_vfs && num_vfs == bp->pf.active_vfs)
933 bnxt_sriov_disable(bp);
937 bnxt_sriov_enable(bp, &num_vfs);
940 bp->sriov_cfg = false;
941 wake_up(&bp->sriov_cfg_wait);
946 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
956 rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
966 rc = hwrm_req_send(bp, req);
969 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
973 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
982 rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
989 rc = hwrm_req_send(bp, req);
992 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
996 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
1005 rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
1012 rc = hwrm_req_send(bp, req);
1015 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
1019 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1029 bool trust = bnxt_is_trusted_vf(bp, vf);
1035 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1037 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1039 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1042 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1050 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1057 if (bnxt_is_trusted_vf(bp, vf)) {
1075 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1076 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1079 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1086 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1093 mutex_lock(&bp->link_lock);
1094 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1096 mutex_unlock(&bp->link_lock);
1125 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1133 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1141 rc = bnxt_vf_configure_mac(bp, vf);
1144 rc = bnxt_vf_validate_set_mac(bp, vf);
1151 bp, vf, sizeof(struct hwrm_func_cfg_input));
1154 rc = bnxt_vf_set_link(bp, vf);
1162 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1164 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1168 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1172 clear_bit(vf_id, bp->pf.vf_event_bmap);
1173 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1178 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1183 if (!BNXT_VF(bp))
1186 if (bp->hwrm_spec_code < 0x10202) {
1187 if (is_valid_ether_addr(bp->vf.mac_addr))
1192 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
1199 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
1200 rc = hwrm_req_send(bp, req);
1204 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1211 void bnxt_update_vf_mac(struct bnxt *bp)
1217 if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
1222 resp = hwrm_req_hold(bp, req);
1223 if (hwrm_req_send(bp, req))
1233 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
1234 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1238 if (!is_valid_ether_addr(bp->vf.mac_addr))
1243 if (is_valid_ether_addr(bp->vf.mac_addr))
1244 eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
1246 hwrm_req_drop(bp, req);
1248 bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1253 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1260 void bnxt_sriov_disable(struct bnxt *bp)
1264 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1266 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1269 void bnxt_update_vf_mac(struct bnxt *bp)
1273 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)