Lines Matching refs:rc

1727 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1729 if (rc < 0)
1983 int rc = 0;
2026 rc = -ENOMEM;
2029 rc = 1;
2074 rc = -EIO;
2105 rc = 1;
2210 rc = 1;
2223 return rc;
2227 rc = -ENOMEM;
2243 int rc;
2273 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2274 if (rc && rc != -EBUSY)
2276 return rc;
2834 int rc;
2872 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2874 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2876 if (likely(rc >= 0))
2877 rx_pkts += rc;
2878 /* Increment rx_pkts when rc is -ENOMEM to count towards
2883 else if (rc == -ENOMEM && budget)
2885 else if (rc == -EBUSY) /* partial completion */
2970 int rc;
2995 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2996 if (likely(rc == -EIO) && budget)
2998 else if (rc == -EBUSY) /* partial completion */
3581 int i, rc = 0, agg_rings = 0;
3595 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3596 if (rc)
3597 return rc;
3599 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3600 if (rc < 0)
3601 return rc;
3603 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3606 if (rc) {
3608 return rc;
3611 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3612 if (rc)
3613 return rc;
3620 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3621 if (rc)
3622 return rc;
3633 rc = bnxt_alloc_tpa_info(bp);
3634 return rc;
3672 int i, j, rc;
3697 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3698 if (rc)
3699 return rc;
3776 int rc;
3780 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3781 if (rc)
3782 return rc;
3829 int rc;
3831 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3832 if (rc) {
3843 rc = bnxt_alloc_ring(bp, rmem);
3844 if (rc) {
3848 return rc;
3854 int i, j, rc, ulp_base_vec, ulp_msix;
3875 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3876 if (rc)
3877 return rc;
3908 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3909 if (rc)
3910 return rc;
4125 int i, rc = 0;
4136 rc = bnxt_init_one_rx_ring(bp, i);
4137 if (rc)
4141 return rc;
4473 int i, rc = 0, size;
4487 rc = -ENOMEM;
4501 rc = -ENOMEM;
4516 rc = -ENOMEM;
4535 rc = -ENOMEM;
4545 return rc;
4633 int rc;
4639 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4640 if (rc)
4641 return rc;
4647 rc = hwrm_req_send(bp, req);
4648 if (!rc) {
4653 return rc;
4665 int rc, rx_count, tx_count;
4672 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4673 if (rc) {
4690 rc = bnxt_hwrm_port_qstats(bp, flags);
4691 if (rc) {
4713 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4714 if (rc) {
4758 int rc;
4767 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4768 if (rc)
4769 return rc;
4781 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4782 if (rc)
4783 return rc;
4797 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4799 if (rc)
4809 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4811 if (rc)
4923 int i, rc = 0;
4935 rc = -ENOMEM;
4937 return rc;
4999 int i, j, rc, size, arr_size;
5095 rc = bnxt_alloc_stats(bp);
5096 if (rc)
5100 rc = bnxt_alloc_ntp_fltrs(bp);
5101 if (rc)
5104 rc = bnxt_alloc_vnics(bp);
5105 if (rc)
5109 rc = bnxt_alloc_all_cp_arrays(bp);
5110 if (rc)
5115 rc = bnxt_alloc_rx_rings(bp);
5116 if (rc)
5119 rc = bnxt_alloc_tx_rings(bp);
5120 if (rc)
5123 rc = bnxt_alloc_cp_rings(bp);
5124 if (rc)
5134 rc = bnxt_alloc_vnic_attributes(bp);
5135 if (rc)
5141 return rc;
5208 int rc, i;
5210 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5211 if (rc)
5212 return rc;
5284 rc = hwrm_req_send(bp, req);
5285 if (!rc) {
5292 return rc;
5298 int rc;
5303 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5304 if (rc)
5305 return rc;
5314 int rc;
5323 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5324 if (rc)
5325 return rc;
5349 rc = hwrm_req_send(bp, req);
5350 if (rc)
5351 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5352 rc);
5355 return rc;
5363 int rc;
5365 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5366 if (rc)
5367 return rc;
5373 rc = hwrm_req_send(bp, req);
5374 if (rc) {
5375 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5376 rc);
5403 return rc;
5410 int rc;
5412 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5413 if (rc)
5414 return rc;
5594 int rc;
5606 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5608 if (rc) {
5610 fltr = ERR_PTR(rc);
5621 int rc;
5637 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5638 if (rc) {
5641 return ERR_PTR(rc);
5664 int rc;
5677 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5678 if (rc)
5679 return rc;
5691 int rc;
5701 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5702 if (rc)
5703 return rc;
5730 rc = hwrm_req_send(bp, req);
5731 if (!rc) {
5736 return rc;
5743 int rc;
5746 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5747 if (rc)
5748 return rc;
5812 int rc;
5814 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5815 if (rc)
5816 return rc;
5862 rc = hwrm_req_send(bp, req);
5863 if (!rc)
5866 return rc;
5874 int rc;
5883 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
5884 if (rc)
5888 return rc;
5938 int rc;
5943 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5944 if (rc)
5945 return rc;
6153 int rc;
6159 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6160 if (rc)
6161 return rc;
6175 int rc;
6177 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6178 if (rc)
6179 return rc;
6194 rc = hwrm_req_send(bp, req);
6195 if (rc)
6201 return rc;
6228 int rc;
6230 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6231 if (rc)
6232 return rc;
6285 int rc;
6287 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6288 if (rc)
6289 return rc;
6292 rc = hwrm_req_send(bp, req);
6293 if (!rc)
6298 return rc;
6315 int rc;
6317 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6318 if (rc)
6319 return rc;
6414 int rc;
6416 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6417 if (rc)
6418 return rc;
6441 rc = hwrm_req_send(bp, req);
6442 if (!rc)
6445 return rc;
6452 int rc;
6460 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6461 if (rc)
6462 return rc;
6465 rc = hwrm_req_send(bp, req);
6466 if (!rc) {
6506 return rc;
6513 int rc;
6519 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6520 if (rc)
6521 return rc;
6532 rc = hwrm_req_send(bp, req);
6534 if (rc)
6541 return rc;
6576 int rc, err = 0;
6579 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6580 if (rc)
6674 rc = hwrm_req_send(bp, req);
6680 if (rc || err) {
6681 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6682 ring_type, rc, err);
6686 return rc;
6691 int rc;
6696 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6697 if (rc)
6698 return rc;
6707 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6708 if (rc)
6709 return rc;
6788 int i, rc = 0;
6804 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6805 if (rc) {
6815 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
6816 if (rc)
6835 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6836 if (rc)
6844 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6845 if (rc)
6857 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6858 if (rc)
6871 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6872 if (rc)
6889 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6890 if (rc)
6901 return rc;
6911 int rc;
6916 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6917 if (rc)
6925 rc = hwrm_req_send(bp, req);
6929 if (rc || error_code) {
6930 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6931 ring_type, rc, error_code);
7046 int rc;
7051 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7052 if (rc)
7053 return rc;
7057 rc = hwrm_req_send(bp, req);
7058 if (rc) {
7060 return rc;
7082 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7083 if (rc)
7098 return rc;
7105 int rc;
7110 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7111 if (rc)
7112 return rc;
7116 rc = hwrm_req_send(bp, req);
7117 if (!rc)
7121 return rc;
7216 int rc;
7227 rc = hwrm_req_send(bp, req);
7228 if (rc)
7229 return rc;
7241 int rc;
7252 rc = hwrm_req_send(bp, req);
7253 if (rc)
7254 return rc;
7415 int rx_rings, rc;
7438 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7439 if (rc)
7440 return rc;
7464 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7494 return rc;
7558 int rc;
7577 rc = hwrm_req_send_silent(bp, req);
7578 if (!rc) {
7671 int rc;
7676 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7677 if (rc)
7678 return rc;
7696 int rc;
7709 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7710 if (rc)
7711 return rc;
7735 int i, rc;
7742 rc = hwrm_req_send(bp, req);
7743 if (rc)
7744 return rc;
7754 int i, rc;
7756 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7757 if (rc)
7758 return rc;
7760 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7761 if (rc) {
7763 return rc;
7776 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7778 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
7779 if (rc)
7786 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7787 if (rc)
7798 return rc;
7847 int rc, i;
7852 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
7853 if (rc)
7854 return rc;
7866 rc = hwrm_req_send(bp, req);
7867 if (rc)
7875 return rc;
7883 int rc;
7885 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7886 if (rc)
7887 return rc;
7891 rc = hwrm_req_send(bp, req);
7892 if (rc)
7954 return rc;
8000 int rc;
8002 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8003 if (rc)
8004 return rc;
8020 rc = hwrm_req_send(bp, req);
8021 if (rc)
8045 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8049 return rc;
8056 int rc;
8064 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8065 if (rc)
8066 return rc;
8069 rc = hwrm_req_send_silent(bp, req);
8070 if (!rc) {
8080 rc = -ENOMEM;
8160 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8162 rc = 0;
8166 return rc;
8207 int rc;
8215 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8216 if (rc)
8217 return rc;
8343 int rc;
8363 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8364 if (rc)
8365 return rc;
8385 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8386 if (rc)
8394 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8396 return rc;
8433 int i, rc = 0, n = 1;
8444 for (i = 0; i < n && !rc; i++) {
8446 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8449 return rc;
8458 int i, j, rc = 0, n = 1;
8469 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8470 if (rc)
8471 return rc;
8478 for (i = 0, j = 0; j < n && !rc; i++) {
8494 rc = hwrm_req_send(bp, req);
8497 return rc;
8505 int rc = 0;
8519 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8520 if (rc)
8521 return rc;
8567 int i, rc;
8569 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8570 if (rc) {
8571 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8572 rc);
8573 return rc;
8599 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8601 if (rc)
8602 return rc;
8605 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8606 if (rc)
8607 return rc;
8610 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8612 if (rc)
8613 return rc;
8616 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8617 if (rc)
8618 return rc;
8621 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8622 if (rc)
8623 return rc;
8638 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8639 if (rc)
8640 return rc;
8644 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8645 if (rc)
8646 return rc;
8654 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8655 if (rc)
8656 return rc;
8660 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8661 if (rc)
8662 return rc;
8668 rc = bnxt_backing_store_cfg_v2(bp, ena);
8670 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8671 if (rc) {
8672 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8673 rc);
8674 return rc;
8685 int rc;
8687 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8688 if (rc)
8689 return rc;
8693 rc = hwrm_req_send_silent(bp, req);
8694 if (rc)
8735 return rc;
8745 int rc;
8748 rc = -ENODEV;
8752 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
8753 if (rc)
8758 rc = hwrm_req_send(bp, req);
8759 if (rc)
8764 rc = -ENODEV;
8770 rc = -ENOMEM;
8783 rc = -ENODEV;
8787 rc = bnxt_ptp_init(bp, phc_cfg);
8788 if (rc)
8792 if (!rc)
8799 return rc;
8808 int rc;
8810 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
8811 if (rc)
8812 return rc;
8816 rc = hwrm_req_send(bp, req);
8817 if (rc)
8914 return rc;
8921 int rc;
8927 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
8928 if (rc)
8933 rc = hwrm_req_send(bp, req);
8934 if (rc)
8947 int rc;
8949 rc = __bnxt_hwrm_func_qcaps(bp);
8950 if (rc)
8951 return rc;
8955 rc = bnxt_hwrm_queue_qportcfg(bp);
8956 if (rc) {
8957 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
8958 return rc;
8961 rc = bnxt_alloc_ctx_mem(bp);
8962 if (rc)
8963 return rc;
8964 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8965 if (!rc)
8976 int rc;
8981 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
8982 if (rc)
8983 return rc;
8986 rc = hwrm_req_send(bp, req);
8987 if (rc)
9005 return rc;
9023 int rc;
9029 rc = __bnxt_alloc_fw_health(bp);
9030 if (rc) {
9033 return rc;
9157 int rc, i;
9162 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9163 if (rc)
9164 return rc;
9167 rc = hwrm_req_send(bp, req);
9168 if (rc)
9173 rc = -EINVAL;
9197 rc = -EINVAL;
9210 if (!rc)
9211 rc = bnxt_map_fw_health_regs(bp);
9212 if (rc)
9214 return rc;
9220 int rc;
9222 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9223 if (rc)
9224 return rc;
9247 int rc = 0;
9249 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9250 if (rc)
9251 return rc;
9254 rc = hwrm_req_send(bp, req);
9255 if (rc)
9259 rc = -EINVAL;
9289 return rc;
9295 int rc;
9297 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9298 if (rc)
9299 return rc;
9306 rc = hwrm_req_send(bp, req);
9307 return rc;
9316 int rc, len;
9318 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9319 if (rc)
9320 return rc;
9329 rc = hwrm_req_send(bp, req);
9330 if (rc)
9428 return rc;
9436 int rc;
9443 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9444 if (rc)
9445 return rc;
9543 int rc;
9551 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9552 if (rc)
9553 return rc;
9571 int rc;
9579 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9580 if (rc)
9581 return rc;
9592 rc = hwrm_req_send(bp, req_qs);
9593 if (!rc) {
9609 return rc;
9614 return rc;
9617 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9618 if (rc)
9619 return rc;
9624 rc = hwrm_req_send(bp, req_qc);
9625 if (!rc) {
9639 return rc;
9650 return rc;
9663 int rc, i;
9671 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
9672 if (rc) {
9673 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9674 i, rc);
9675 return rc;
9724 int rc;
9733 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9734 if (rc)
9735 return rc;
9746 int rc;
9751 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9752 if (rc)
9753 return rc;
9767 int rc;
9773 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
9774 if (rc) {
9775 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9776 vnic_id, rc);
9782 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
9783 if (rc) {
9784 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
9785 vnic_id, rc);
9793 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9794 if (rc) {
9795 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9796 vnic_id, rc);
9801 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
9802 if (rc) {
9803 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
9804 vnic_id, rc);
9809 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9810 if (rc) {
9811 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9812 vnic_id, rc);
9817 return rc;
9822 int rc, i, nr_ctxs;
9826 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
9827 if (rc) {
9828 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
9829 vnic_id, i, rc);
9837 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
9838 if (rc) {
9839 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
9840 vnic_id, rc);
9841 return rc;
9843 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9844 if (rc) {
9845 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9846 vnic_id, rc);
9847 return rc;
9850 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9851 if (rc) {
9852 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9853 vnic_id, rc);
9856 return rc;
9870 int rc;
9872 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
9873 if (rc) {
9874 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9875 vnic_id, rc);
9876 return rc;
9883 int i, rc = 0;
9907 return rc;
9922 unsigned int rc = 0;
9924 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
9925 if (rc) {
9927 rc);
9928 return rc;
9931 rc = bnxt_hwrm_vnic_cfg(bp, 1);
9932 if (rc) {
9934 rc);
9935 return rc;
9937 return rc;
9946 int rc = 0;
9950 rc = bnxt_hwrm_stat_ctx_alloc(bp);
9951 if (rc) {
9952 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
9953 rc);
9958 rc = bnxt_hwrm_ring_alloc(bp);
9959 if (rc) {
9960 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
9964 rc = bnxt_hwrm_ring_grp_alloc(bp);
9965 if (rc) {
9966 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
9974 rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
9975 if (rc) {
9976 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
9983 rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
9984 if (rc)
9990 rc = bnxt_alloc_rfs_vnics(bp);
9991 if (rc)
9996 rc = bnxt_set_tpa(bp, true);
9997 if (rc)
10005 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10006 if (rc) {
10007 if (BNXT_VF(bp) && rc == -ENODEV)
10010 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10035 rc = bnxt_cfg_rx_mode(bp);
10036 if (rc)
10040 rc = bnxt_hwrm_set_coal(bp);
10041 if (rc)
10042 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10043 rc);
10046 rc = bnxt_setup_nitroa0_vnic(bp);
10047 if (rc)
10048 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10049 rc);
10062 return rc;
10084 int rc;
10087 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10089 if (rc)
10090 return rc;
10092 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10093 if (rc)
10094 return rc;
10101 return rc;
10156 int tx_saved = tx_cp, rc;
10158 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10159 if (rc)
10160 return rc;
10220 int rc;
10223 rc = bnxt_init_int_mode(bp);
10224 if (rc || !bp->irq_tbl)
10225 return rc ?: -ENODEV;
10233 rc = bnxt_set_real_num_queues(bp);
10234 return rc;
10330 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
10356 rc = -ENODEV;
10367 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10369 if (rc)
10378 rc = -ENOMEM;
10386 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
10391 return rc;
10411 int rc = -ENODEV;
10414 rc = bnxt_init_msix(bp);
10418 rc = bnxt_init_inta(bp);
10420 return rc;
10437 int rc;
10448 rc = __bnxt_reserve_rings(bp);
10450 if (!rc)
10451 rc = bnxt_init_int_mode(bp);
10452 bnxt_ulp_irq_restart(bp, rc);
10454 if (rc) {
10455 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10456 return rc;
10503 int i, j, rc = 0;
10509 rc = bnxt_setup_int_mode(bp);
10510 if (rc) {
10512 rc);
10513 return rc;
10527 rc = irq_cpu_rmap_add(rmap, irq->vector);
10528 if (rc)
10534 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
10536 if (rc)
10548 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
10549 if (rc) {
10557 return rc;
10794 int rc = 0;
10799 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
10800 if (rc)
10801 return rc;
10804 rc = hwrm_req_send(bp, req);
10805 if (rc)
10847 return rc;
10892 int rc;
10894 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
10895 if (rc)
10896 return rc;
10899 rc = hwrm_req_send(bp, req);
10900 if (rc) {
10902 if (BNXT_VF(bp) && rc == -ENODEV) {
10904 rc = 0;
10906 return rc;
11113 int rc;
11115 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11116 if (rc)
11117 return rc;
11125 rc = hwrm_req_send(bp, req);
11126 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11138 return rc;
11167 int rc;
11169 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11170 if (rc)
11171 return rc;
11186 int rc;
11195 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11196 if (rc)
11197 return rc;
11200 rc = hwrm_req_send(bp, req);
11201 if (!rc) {
11211 return rc;
11217 int rc = tee_bnxt_fw_load();
11219 if (rc)
11220 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11222 return rc;
11232 int retry = 0, rc;
11237 rc = bnxt_hwrm_poll(bp);
11242 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
11248 rc = -ENODEV;
11254 return rc;
11283 int rc;
11288 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11289 if (rc)
11294 return rc;
11303 int rc, retry = 0;
11309 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11310 if (rc)
11311 return rc;
11319 rc = hwrm_req_send(bp, req);
11320 if (rc != -EAGAIN)
11327 if (rc == -EAGAIN) {
11329 return rc;
11330 } else if (!rc) {
11333 rc = bnxt_try_recover_fw(bp);
11337 if (rc)
11338 return rc;
11365 rc = bnxt_fw_init_one(bp);
11366 if (rc) {
11369 return rc;
11372 rc = bnxt_init_int_mode(bp);
11373 if (rc) {
11376 return rc;
11379 rc = bnxt_cancel_reservations(bp, fw_reset);
11381 return rc;
11389 int rc;
11395 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11396 if (rc)
11397 return rc;
11401 rc = hwrm_req_send(bp, req);
11402 if (rc) {
11404 return rc;
11431 int rc;
11433 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11434 if (rc)
11435 return rc;
11443 rc = hwrm_req_send(bp, req);
11444 if (!rc)
11447 return rc;
11453 int rc;
11455 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11456 if (rc)
11457 return rc;
11471 int rc;
11473 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11474 if (rc)
11475 return rc;
11480 rc = hwrm_req_send(bp, req);
11481 if (!rc) {
11537 int rc;
11543 rc = bnxt_update_link(bp, true);
11544 if (rc) {
11545 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11546 rc);
11547 return rc;
11583 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11585 rc = bnxt_hwrm_set_pause(bp);
11586 if (rc) {
11587 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11588 rc);
11589 return rc;
11592 return rc;
11613 int rc;
11621 rc = bnxt_fw_init_one(bp);
11622 if (!rc) {
11624 rc = bnxt_init_int_mode(bp);
11625 if (!rc) {
11630 return rc;
11671 int rc = 0;
11677 rc = bnxt_init_dflt_ring_mode(bp);
11678 if (rc) {
11680 return rc;
11683 rc = bnxt_reserve_rings(bp, irq_re_init);
11684 if (rc)
11685 return rc;
11693 rc = bnxt_alloc_mem(bp, irq_re_init);
11694 if (rc) {
11695 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11701 rc = bnxt_request_irq(bp);
11702 if (rc) {
11703 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
11708 rc = bnxt_init_nic(bp, irq_re_init);
11709 if (rc) {
11710 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11719 rc = bnxt_update_phy_setting(bp);
11721 if (rc) {
11767 return rc;
11773 int rc = 0;
11776 rc = -EIO;
11777 if (!rc)
11778 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
11779 if (rc) {
11780 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
11783 return rc;
11792 int rc = 0;
11796 rc = -ENODEV;
11800 rc = bnxt_alloc_mem(bp, true);
11801 if (rc) {
11802 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11807 rc = bnxt_init_nic(bp, true);
11808 if (rc) {
11811 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11820 return rc;
11849 int rc;
11852 rc = bnxt_reinit_after_abort(bp);
11853 if (rc) {
11854 if (rc == -EBUSY)
11862 rc = bnxt_hwrm_if_change(bp, true);
11863 if (rc)
11864 return rc;
11866 rc = __bnxt_open_nic(bp, true, true);
11867 if (rc) {
11878 return rc;
11943 int rc;
11945 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
11948 if (!rc)
11950 else if (rc < 0)
11972 int rc;
11977 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
11978 if (rc)
11979 return rc;
11992 rc = hwrm_req_send(bp, req);
11993 if (!rc)
11996 return rc;
12003 int rc;
12008 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12009 if (rc)
12010 return rc;
12031 int rc;
12044 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12047 return rc;
12294 int i, off = 0, rc;
12326 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12327 if (rc) {
12328 if (BNXT_VF(bp) && rc == -ENODEV) {
12333 rc = 0;
12335 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12338 return rc;
12348 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12349 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
12350 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12351 rc);
12355 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12357 if (rc)
12358 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12359 rc);
12361 return rc;
12507 int rc = 0;
12549 return rc;
12560 rc = bnxt_set_tpa(bp,
12563 if (rc)
12567 return rc;
12724 int rc, i;
12726 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
12727 if (rc)
12728 return rc;
12733 rc = -ENOMEM;
12743 rc = hwrm_req_send(bp, req);
12744 if (rc || resp->error_code) {
12745 rc = -EIO;
12753 return rc;
12761 int rc;
12763 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
12764 if (rc)
12765 return rc;
12770 rc = hwrm_req_send(bp, req);
12771 if (!rc) {
12776 return rc;
12835 int rc;
12837 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
12838 if (rc)
12839 return rc;
12854 int rc;
12862 rc = bnxt_open_nic(bp, true, false);
12863 bnxt_ulp_start(bp, rc);
12999 int rc;
13004 rc = bnxt_hwrm_rx_ring_reset(bp, i);
13005 if (rc) {
13006 if (rc == -EINVAL || rc == -EOPNOTSUPP)
13009 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13010 rc);
13136 int rc;
13141 rc = bnxt_hwrm_func_qcfg(bp);
13142 if (rc) {
13143 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13144 return rc;
13173 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13270 int rc;
13272 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13273 if (rc)
13307 int rc;
13314 rc = bnxt_update_link(bp, true);
13315 if (rc)
13316 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13317 rc);
13325 int rc;
13328 rc = bnxt_update_phy_setting(bp);
13330 if (rc) {
13505 int rc;
13508 rc = bnxt_hwrm_ver_get(bp);
13512 if (rc)
13515 if (rc) {
13516 rc = bnxt_try_recover_fw(bp);
13517 if (rc)
13518 return rc;
13519 rc = bnxt_hwrm_ver_get(bp);
13520 if (rc)
13521 return rc;
13526 rc = bnxt_hwrm_func_reset(bp);
13527 if (rc)
13536 int rc;
13539 rc = bnxt_hwrm_func_qcaps(bp);
13540 if (rc) {
13541 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13542 rc);
13546 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13547 if (rc)
13548 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13549 rc);
13554 rc = bnxt_hwrm_error_recovery_qcfg(bp);
13555 if (rc)
13556 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13557 rc);
13560 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13561 if (rc)
13631 int rc;
13633 rc = bnxt_fw_init_one_p1(bp);
13634 if (rc) {
13636 return rc;
13638 rc = bnxt_fw_init_one_p2(bp);
13639 if (rc) {
13641 return rc;
13643 rc = bnxt_probe_phy(bp, false);
13644 if (rc)
13645 return rc;
13646 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
13647 if (rc)
13648 return rc;
13710 int i, rc;
13724 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
13725 if (!rc) {
13730 rc = hwrm_req_send(bp, req);
13732 if (rc != -ENODEV)
13733 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
13744 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
13748 bnxt_ulp_start(bp, rc);
13758 int rc = 0;
13771 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
13789 bnxt_fw_reset_abort(bp, rc);
13840 rc = -ETIMEDOUT;
13854 rc = -ENODEV;
13862 rc = bnxt_hwrm_poll(bp);
13863 if (rc) {
13879 rc = bnxt_open(bp->dev);
13880 if (rc) {
13882 bnxt_fw_reset_abort(bp, rc);
13920 bnxt_fw_reset_abort(bp, rc);
13926 int rc;
13932 rc = pci_enable_device(pdev);
13933 if (rc) {
13941 rc = -ENODEV;
13945 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13946 if (rc) {
13954 rc = -EIO;
13969 rc = -ENOMEM;
13976 rc = -ENOMEM;
14008 return rc;
14016 int rc = 0;
14024 rc = bnxt_approve_mac(bp, addr->sa_data, true);
14025 if (rc)
14026 return rc;
14032 rc = bnxt_open_nic(bp, false, false);
14035 return rc;
14059 int rc, tx_cp;
14073 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14075 if (rc)
14076 return rc;
14243 int rc = 0, idx;
14270 rc = -EPROTONOSUPPORT;
14278 rc = -EPROTONOSUPPORT;
14284 rc = -EPROTONOSUPPORT;
14292 rc = -EPROTONOSUPPORT;
14301 rc = fltr->base.sw_id;
14309 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14310 if (!rc) {
14318 return rc;
14347 int rc;
14364 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14366 if (rc)
14447 int rem, rc = 0;
14466 rc = bnxt_hwrm_set_br_mode(bp, mode);
14467 if (!rc)
14471 return rc;
14633 int rc = 0;
14637 rc = bnxt_hwrm_phy_qcaps(bp);
14638 if (rc) {
14639 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
14640 rc);
14641 return rc;
14651 rc = bnxt_update_link(bp, false);
14652 if (rc) {
14654 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
14655 rc);
14656 return rc;
14703 int rc;
14705 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
14706 if (rc) {
14732 int rc;
14734 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14735 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
14738 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14739 if (rc) {
14742 return rc;
14767 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
14768 if (rc)
14769 rc = 0;
14771 return rc;
14787 int dflt_rings, max_rx_rings, max_tx_rings, rc;
14804 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
14805 if (rc)
14806 return rc;
14815 rc = __bnxt_reserve_rings(bp);
14816 if (rc && rc != -ENODEV)
14824 rc = __bnxt_reserve_rings(bp);
14825 if (rc && rc != -ENODEV)
14833 if (rc) {
14837 return rc;
14842 int rc;
14849 rc = bnxt_set_dflt_rings(bp, true);
14850 if (rc) {
14851 if (BNXT_VF(bp) && rc == -ENODEV)
14857 rc = bnxt_init_int_mode(bp);
14858 if (rc)
14866 bnxt_ulp_irq_restart(bp, rc);
14867 return rc;
14872 int rc;
14882 rc = bnxt_init_int_mode(bp);
14883 bnxt_ulp_irq_restart(bp, rc);
14886 if (rc)
14889 rc = bnxt_open_nic(bp, true, false);
14892 return rc;
14897 int rc = 0;
14916 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
14919 return rc;
14997 int rc, max_irqs;
15031 rc = bnxt_init_board(pdev, dev);
15032 if (rc < 0)
15041 rc = bnxt_alloc_hwrm_resources(bp);
15042 if (rc)
15048 rc = bnxt_fw_init_one_p1(bp);
15049 if (rc)
15061 rc = bnxt_alloc_rss_indir_tbl(bp);
15062 if (rc)
15065 rc = bnxt_fw_init_one_p2(bp);
15066 if (rc)
15069 rc = bnxt_map_db_bar(bp);
15070 if (rc) {
15071 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
15072 rc);
15134 rc = bnxt_init_mac_addr(bp);
15135 if (rc) {
15137 rc = -EADDRNOTAVAIL;
15143 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
15150 rc = bnxt_probe_phy(bp, true);
15151 if (rc)
15164 rc = bnxt_set_dflt_rings(bp, true);
15165 if (rc) {
15166 if (BNXT_VF(bp) && rc == -ENODEV) {
15170 rc = -ENOMEM;
15182 rc = bnxt_init_int_mode(bp);
15183 if (rc)
15197 rc = -ENOMEM;
15201 rc = bnxt_init_tc(bp);
15202 if (rc)
15204 rc);
15208 rc = bnxt_dl_register(bp);
15209 if (rc)
15214 rc = register_netdev(dev);
15215 if (rc)
15250 return rc;
15286 int rc = 0;
15292 rc = bnxt_close(dev);
15298 return rc;
15305 int rc = 0;
15308 rc = pci_enable_device(bp->pdev);
15309 if (rc) {
15311 rc);
15316 rc = -ENODEV;
15319 rc = bnxt_hwrm_func_reset(bp);
15320 if (rc) {
15321 rc = -EBUSY;
15325 rc = bnxt_hwrm_func_qcaps(bp);
15326 if (rc)
15332 rc = -ENODEV;
15338 rc = bnxt_open(dev);
15339 if (!rc)
15344 bnxt_ulp_start(bp, rc);
15345 if (!rc)
15348 return rc;