Lines Matching refs:bp

311 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
315 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
322 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
325 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
327 if (bp->flags & BNXT_FLAG_CHIP_P7)
329 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
335 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
337 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
343 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
345 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
346 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
352 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
354 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
357 if (BNXT_PF(bp))
358 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
360 schedule_delayed_work(&bp->fw_reset_task, delay);
363 static void __bnxt_queue_sp_work(struct bnxt *bp)
365 if (BNXT_PF(bp))
366 queue_work(bnxt_pf_wq, &bp->sp_task);
368 schedule_work(&bp->sp_task);
371 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
373 set_bit(event, &bp->sp_event);
374 __bnxt_queue_sp_work(bp);
377 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
381 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
382 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
384 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
385 __bnxt_queue_sp_work(bp);
390 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
398 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
403 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
438 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
443 bnxt_db_write(bp, &txr->tx_db, prod);
449 struct bnxt *bp = netdev_priv(dev);
458 struct pci_dev *pdev = bp->pdev;
464 if (unlikely(i >= bp->tx_nr_rings)) {
471 txr = &bp->tx_ring[bp->tx_ring_map[i]];
474 free_size = bnxt_tx_avail(bp, txr);
478 netif_warn(bp, tx_err, dev,
480 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
481 bp->tx_wake_thresh))
492 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
494 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
511 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
522 atomic_inc(&bp->ptp_cfg->tx_avail);
530 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
579 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
582 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
625 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
629 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
679 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
688 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
710 bnxt_txr_db_kick(bp, txr, prod);
712 if (free_size >= bp->tx_wake_thresh)
720 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
724 bnxt_txr_db_kick(bp, txr, prod);
727 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
728 bp->tx_wake_thresh);
734 atomic_inc(&bp->ptp_cfg->tx_avail);
740 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
748 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
758 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
764 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
767 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
768 struct pci_dev *pdev = bp->pdev;
774 while (RING_TX(bp, cons) != hw_cons) {
779 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
785 bnxt_sched_reset_txr(bp, txr, cons);
803 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
811 if (BNXT_CHIP_P5(bp)) {
813 if (!bnxt_get_tx_ts_p5(bp, skb))
816 atomic_inc(&bp->ptp_cfg->tx_avail);
829 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
833 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
839 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
840 __bnxt_tx_int(bp, txr, budget);
845 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
866 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
870 struct pci_dev *pdev = bp->pdev;
873 data = napi_alloc_frag(bp->rx_buf_size);
875 data = netdev_alloc_frag(bp->rx_buf_size);
879 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
880 bp->rx_buf_use_size, bp->rx_dir,
890 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
893 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
894 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
897 if (BNXT_RX_PAGE_MODE(bp)) {
900 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
905 mapping += bp->rx_dma_offset;
907 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
909 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
915 rx_buf->data_ptr = data + bp->rx_offset;
927 struct bnxt *bp = rxr->bnapi->bp;
930 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
938 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
939 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
954 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
959 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
966 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
976 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
986 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
998 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
1011 struct bnxt *bp = bnapi->bp;
1018 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1029 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1031 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1052 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1058 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1064 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1076 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1081 dma_addr -= bp->rx_dma_offset;
1082 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1083 bp->rx_dir);
1084 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1090 skb_reserve(skb, bp->rx_offset);
1096 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1110 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1115 dma_addr -= bp->rx_dma_offset;
1116 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1117 bp->rx_dir);
1120 payload = eth_get_headlen(bp->dev, data_ptr, len);
1143 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1153 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1159 skb = napi_build_skb(data, bp->rx_buf_size);
1160 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1161 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1167 skb_reserve(skb, bp->rx_offset);
1172 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1179 struct pci_dev *pdev = bp->pdev;
1185 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1197 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1199 agg = bnxt_get_agg(bp, cpr, idx, i);
1221 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1234 bp->rx_dir);
1243 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1251 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1265 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1276 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1286 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1303 struct bnxt *bp = bnapi->bp;
1304 struct pci_dev *pdev = bp->pdev;
1311 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1312 bp->rx_dir);
1317 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1318 bp->rx_dir);
1324 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1340 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1347 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1412 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1422 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1431 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1436 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1439 bnxt_sched_reset_rxr(bp, rxr);
1448 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1474 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1485 cons = RING_RX(bp, NEXT_RX(cons));
1486 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1667 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1685 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1689 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1699 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1701 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1704 return dev ? dev : bp->dev;
1707 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1716 struct net_device *dev = bp->dev;
1727 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1734 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1740 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1748 gro = !!(bp->flags & BNXT_FLAG_GRO);
1755 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1772 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1777 if (len <= bp->rx_copy_thresh) {
1788 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1796 tpa_info->data_ptr = new_data + bp->rx_offset;
1799 skb = napi_build_skb(data, bp->rx_buf_size);
1800 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1801 bp->rx_buf_use_size, bp->rx_dir,
1810 skb_reserve(skb, bp->rx_offset);
1815 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1824 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1852 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1857 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1869 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1874 if (skb->dev != bp->dev) {
1876 bnxt_vf_rep_rx(bp, skb);
1883 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1890 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1939 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
1944 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
1963 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1968 struct net_device *dev = bp->dev;
1991 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
2011 bnxt_tpa_start(bp, rxr, cmp_type,
2019 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2028 bnxt_deliver_skb(bp, bnapi, skb);
2037 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2041 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2043 bnxt_sched_reset_rxr(bp, rxr);
2057 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2077 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2078 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2079 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2081 bnxt_sched_reset_rxr(bp, rxr);
2091 if (bnxt_xdp_attached(bp, rxr)) {
2092 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2094 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2104 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
2110 if (len <= bp->rx_copy_thresh) {
2130 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2138 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2142 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
2155 type = bnxt_rss_ext_op(bp, rxcmp);
2171 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2193 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2194 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2197 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2198 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2209 bnxt_deliver_skb(bp, bnapi, skb);
2218 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2234 static int bnxt_force_rx_discard(struct bnxt *bp,
2273 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2279 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2281 struct bnxt_fw_health *fw_health = bp->fw_health;
2289 pci_read_config_dword(bp->pdev, reg_off, &val);
2295 val = readl(bp->bar0 + reg_off);
2298 val = readl(bp->bar1 + reg_off);
2306 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2310 for (i = 0; i < bp->rx_nr_rings; i++) {
2311 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2314 grp_info = &bp->grp_info[grp_idx];
2323 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2325 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2334 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2336 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2366 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2368 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2378 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2380 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2396 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2398 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2428 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2434 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2438 netdev_warn(bp->dev, "Pause Storm detected!\n");
2441 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2463 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2472 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2474 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2478 bp->thermal_threshold_type = type;
2479 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2485 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2514 static int bnxt_async_event_process(struct bnxt *bp,
2521 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2527 struct bnxt_link_info *link_info = &bp->link_info;
2529 if (BNXT_VF(bp))
2539 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2542 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2547 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2550 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2553 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2558 if (BNXT_VF(bp))
2561 if (bp->pf.port_id != port_id)
2564 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2568 if (BNXT_PF(bp))
2570 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2575 if (!bp->fw_health)
2578 bp->fw_reset_timestamp = jiffies;
2579 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2580 if (!bp->fw_reset_min_dsecs)
2581 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2582 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2583 if (!bp->fw_reset_max_dsecs)
2584 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2586 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2589 bp->fw_health->fatalities++;
2590 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2594 bp->fw_health->survivals++;
2595 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2597 netif_warn(bp, hw, bp->dev,
2600 bp->fw_reset_min_dsecs * 100,
2601 bp->fw_reset_max_dsecs * 100);
2602 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2606 struct bnxt_fw_health *fw_health = bp->fw_health;
2615 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2621 bp->current_interval * 10);
2625 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2627 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2628 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2631 netif_info(bp, drv, bp->dev,
2645 netif_notice(bp, hw, bp->dev,
2653 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2656 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2661 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2663 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2667 rxr = bp->bnapi[grp_idx]->rx_ring;
2668 bnxt_sched_reset_rxr(bp, rxr);
2672 struct bnxt_fw_health *fw_health = bp->fw_health;
2674 netif_notice(bp, hw, bp->dev,
2680 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2686 bnxt_ptp_pps_event(bp, data1, data2);
2690 if (bnxt_event_error_report(bp, data1, data2))
2697 if (BNXT_PTP_USE_RTC(bp)) {
2698 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2705 bnxt_ptp_update_current_time(bp);
2718 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2724 __bnxt_queue_sp_work(bp);
2729 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2739 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2745 if ((vf_id < bp->pf.first_vf_id) ||
2746 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2747 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2752 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2753 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2757 bnxt_async_event_process(bp,
2771 struct bnxt *bp = bnapi->bp;
2781 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2795 struct bnxt *bp = bnapi->bp;
2802 if (!bnxt_has_work(bp, cpr)) {
2803 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2813 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2820 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2858 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2860 bp->tx_ring_mask;
2862 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2872 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2874 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2890 bnxt_hwrm_handler(bp, txcmp);
2910 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2918 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2922 bnapi->tx_int(bp, bnapi, budget);
2927 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2932 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2937 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2943 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2949 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2951 __bnxt_poll_work_done(bp, bnapi, budget);
2958 struct bnxt *bp = bnapi->bp;
2995 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3004 bnxt_hwrm_handler(bp, txcmp);
3006 netdev_err(bp->dev,
3017 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3020 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3024 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3034 struct bnxt *bp = bnapi->bp;
3038 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3043 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3051 if (!bnxt_has_work(bp, cpr)) {
3057 if (bp->flags & BNXT_FLAG_DIM) {
3069 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3078 work_done += __bnxt_poll_work(bp, cpr2,
3086 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3104 bnxt_writeq(bp,
3111 __bnxt_poll_work_done(bp, bnapi, budget);
3120 struct bnxt *bp = bnapi->bp;
3125 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3131 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3143 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3172 work_done += __bnxt_poll_work(bp, cpr2,
3176 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3180 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3188 (bp->flags & BNXT_FLAG_DIM)) {
3200 static void bnxt_free_tx_skbs(struct bnxt *bp)
3203 struct pci_dev *pdev = bp->pdev;
3205 if (!bp->tx_ring)
3208 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3209 for (i = 0; i < bp->tx_nr_rings; i++) {
3210 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3221 if (i < bp->tx_nr_rings_xdp &&
3256 int ring_idx = j & bp->tx_ring_mask;
3267 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3271 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3273 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3274 struct pci_dev *pdev = bp->pdev;
3278 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3279 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3283 for (i = 0; i < bp->max_tpa; i++) {
3291 bp->rx_buf_use_size, bp->rx_dir,
3312 if (BNXT_RX_PAGE_MODE(bp)) {
3316 bp->rx_buf_use_size, bp->rx_dir,
3345 static void bnxt_free_rx_skbs(struct bnxt *bp)
3349 if (!bp->rx_ring)
3352 for (i = 0; i < bp->rx_nr_rings; i++)
3353 bnxt_free_one_rx_ring_skbs(bp, i);
3356 static void bnxt_free_skbs(struct bnxt *bp)
3358 bnxt_free_tx_skbs(bp);
3359 bnxt_free_rx_skbs(bp);
3379 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3381 struct pci_dev *pdev = bp->pdev;
3412 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3414 struct pci_dev *pdev = bp->pdev;
3465 static void bnxt_free_tpa_info(struct bnxt *bp)
3469 for (i = 0; i < bp->rx_nr_rings; i++) {
3470 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3475 for (j = 0; j < bp->max_tpa; j++) {
3485 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3489 bp->max_tpa = MAX_TPA;
3490 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3491 if (!bp->max_tpa_v2)
3493 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3496 for (i = 0; i < bp->rx_nr_rings; i++) {
3497 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3500 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3505 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3507 for (j = 0; j < bp->max_tpa; j++) {
3521 static void bnxt_free_rx_rings(struct bnxt *bp)
3525 if (!bp->rx_ring)
3528 bnxt_free_tpa_info(bp);
3529 for (i = 0; i < bp->rx_nr_rings; i++) {
3530 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3546 bnxt_free_ring(bp, &ring->ring_mem);
3549 bnxt_free_ring(bp, &ring->ring_mem);
3553 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3558 pp.pool_size = bp->rx_agg_ring_size;
3559 if (BNXT_RX_PAGE_MODE(bp))
3560 pp.pool_size += bp->rx_ring_size;
3561 pp.nid = dev_to_node(&bp->pdev->dev);
3563 pp.netdev = bp->dev;
3564 pp.dev = &bp->pdev->dev;
3565 pp.dma_dir = bp->rx_dir;
3579 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3583 if (!bp->rx_ring)
3586 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3589 for (i = 0; i < bp->rx_nr_rings; i++) {
3590 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3595 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3599 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3611 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3620 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3625 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3632 if (bp->flags & BNXT_FLAG_TPA)
3633 rc = bnxt_alloc_tpa_info(bp);
3637 static void bnxt_free_tx_rings(struct bnxt *bp)
3640 struct pci_dev *pdev = bp->pdev;
3642 if (!bp->tx_ring)
3645 for (i = 0; i < bp->tx_nr_rings; i++) {
3646 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3650 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3657 bnxt_free_ring(bp, &ring->ring_mem);
3661 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3662 ((tc) * (bp)->tx_nr_rings_per_tc)
3664 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3665 ((tx) % (bp)->tx_nr_rings_per_tc)
3667 #define BNXT_RING_TO_TC(bp, tx) \
3668 ((tx) / (bp)->tx_nr_rings_per_tc)
3670 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3673 struct pci_dev *pdev = bp->pdev;
3675 bp->tx_push_size = 0;
3676 if (bp->tx_push_thresh) {
3680 bp->tx_push_thresh);
3684 bp->tx_push_thresh = 0;
3687 bp->tx_push_size = push_size;
3690 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3691 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3697 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3702 if (bp->tx_push_size) {
3709 bp->tx_push_size,
3720 qidx = bp->tc_to_qidx[j];
3721 ring->queue_id = bp->q_info[qidx].queue_id;
3723 if (i < bp->tx_nr_rings_xdp)
3725 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3755 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3759 if (!bp->bnapi)
3761 for (i = 0; i < bp->cp_nr_rings; i++) {
3762 struct bnxt_napi *bnapi = bp->bnapi[i];
3770 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3772 int i, n = bp->cp_nr_pages;
3774 for (i = 0; i < bp->cp_nr_rings; i++) {
3775 struct bnxt_napi *bnapi = bp->bnapi[i];
3787 static void bnxt_free_cp_rings(struct bnxt *bp)
3791 if (!bp->bnapi)
3794 for (i = 0; i < bp->cp_nr_rings; i++) {
3795 struct bnxt_napi *bnapi = bp->bnapi[i];
3806 bnxt_free_ring(bp, &ring->ring_mem);
3815 bnxt_free_ring(bp, &ring->ring_mem);
3824 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
3831 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3838 rmem->nr_pages = bp->cp_nr_pages;
3843 rc = bnxt_alloc_ring(bp, rmem);
3845 bnxt_free_ring(bp, rmem);
3851 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3853 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3855 int tcs = bp->num_tc;
3859 ulp_msix = bnxt_get_ulp_msix_num(bp);
3860 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3861 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
3862 struct bnxt_napi *bnapi = bp->bnapi[i];
3875 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3884 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3887 if (i < bp->rx_nr_rings) {
3891 if (i < bp->tx_nr_rings_xdp) {
3894 } else if ((sh && i < bp->tx_nr_rings) ||
3895 (!sh && i >= bp->rx_nr_rings)) {
3908 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3914 bp->rx_ring[i].rx_cpr = cpr2;
3919 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
3920 bp->tx_ring[n].tx_cpr = cpr2;
3930 static void bnxt_init_ring_struct(struct bnxt *bp)
3934 for (i = 0; i < bp->cp_nr_rings; i++) {
3935 struct bnxt_napi *bnapi = bp->bnapi[i];
3948 rmem->nr_pages = bp->cp_nr_pages;
3960 rmem->nr_pages = bp->rx_nr_pages;
3964 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3969 rmem->nr_pages = bp->rx_agg_nr_pages;
3973 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3980 rmem->nr_pages = bp->tx_nr_pages;
3984 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
4012 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4014 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4015 struct net_device *dev = bp->dev;
4020 for (i = 0; i < bp->rx_ring_size; i++) {
4021 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4023 ring_nr, i, bp->rx_ring_size);
4030 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4034 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4035 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4037 ring_nr, i, bp->rx_ring_size);
4048 for (i = 0; i < bp->max_tpa; i++) {
4049 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
4054 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4061 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4067 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4073 rxr = &bp->rx_ring[ring_nr];
4077 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4080 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4081 bpf_prog_add(bp->xdp_prog, 1);
4082 rxr->xdp_prog = bp->xdp_prog;
4089 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4096 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4099 static void bnxt_init_cp_rings(struct bnxt *bp)
4103 for (i = 0; i < bp->cp_nr_rings; i++) {
4104 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4108 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4109 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4117 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4118 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4123 static int bnxt_init_rx_rings(struct bnxt *bp)
4127 if (BNXT_RX_PAGE_MODE(bp)) {
4128 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4129 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4131 bp->rx_offset = BNXT_RX_OFFSET;
4132 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4135 for (i = 0; i < bp->rx_nr_rings; i++) {
4136 rc = bnxt_init_one_rx_ring(bp, i);
4144 static int bnxt_init_tx_rings(struct bnxt *bp)
4148 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4151 for (i = 0; i < bp->tx_nr_rings; i++) {
4152 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4157 if (i >= bp->tx_nr_rings_xdp)
4158 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4166 static void bnxt_free_ring_grps(struct bnxt *bp)
4168 kfree(bp->grp_info);
4169 bp->grp_info = NULL;
4172 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4177 bp->grp_info = kcalloc(bp->cp_nr_rings,
4180 if (!bp->grp_info)
4183 for (i = 0; i < bp->cp_nr_rings; i++) {
4185 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4186 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4187 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4188 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4189 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4194 static void bnxt_free_vnics(struct bnxt *bp)
4196 kfree(bp->vnic_info);
4197 bp->vnic_info = NULL;
4198 bp->nr_vnics = 0;
4201 static int bnxt_alloc_vnics(struct bnxt *bp)
4206 if (bp->flags & BNXT_FLAG_RFS) {
4207 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4209 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4210 num_vnics += bp->rx_nr_rings;
4214 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4217 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4219 if (!bp->vnic_info)
4222 bp->nr_vnics = num_vnics;
4226 static void bnxt_init_vnics(struct bnxt *bp)
4228 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4231 for (i = 0; i < bp->nr_vnics; i++) {
4232 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4241 if (bp->vnic_info[i].rss_hash_key) {
4246 if (!bp->rss_hash_key_valid &&
4247 !bp->rss_hash_key_updated) {
4248 get_random_bytes(bp->rss_hash_key,
4250 bp->rss_hash_key_updated = true;
4253 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4256 if (!bp->rss_hash_key_updated)
4259 bp->rss_hash_key_updated = false;
4260 bp->rss_hash_key_valid = true;
4262 bp->toeplitz_prefix = 0;
4264 bp->toeplitz_prefix <<= 8;
4265 bp->toeplitz_prefix |= key[k];
4292 void bnxt_set_tpa_flags(struct bnxt *bp)
4294 bp->flags &= ~BNXT_FLAG_TPA;
4295 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4297 if (bp->dev->features & NETIF_F_LRO)
4298 bp->flags |= BNXT_FLAG_LRO;
4299 else if (bp->dev->features & NETIF_F_GRO_HW)
4300 bp->flags |= BNXT_FLAG_GRO;
4303 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4306 void bnxt_set_ring_params(struct bnxt *bp)
4312 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4317 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
4318 ring_size = bp->rx_ring_size;
4319 bp->rx_agg_ring_size = 0;
4320 bp->rx_agg_nr_pages = 0;
4322 if (bp->flags & BNXT_FLAG_TPA)
4325 bp->flags &= ~BNXT_FLAG_JUMBO;
4326 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4329 bp->flags |= BNXT_FLAG_JUMBO;
4330 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4337 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4338 bp->rx_ring_size, ring_size);
4339 bp->rx_ring_size = ring_size;
4343 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4345 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4348 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4350 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4353 bp->rx_agg_ring_size = agg_ring_size;
4354 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4356 if (BNXT_RX_PAGE_MODE(bp)) {
4368 bp->rx_buf_use_size = rx_size;
4369 bp->rx_buf_size = rx_space;
4371 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4372 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4374 ring_size = bp->tx_ring_size;
4375 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4376 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4378 max_rx_cmpl = bp->rx_ring_size;
4383 if (bp->flags & BNXT_FLAG_TPA)
4384 max_rx_cmpl += bp->max_tpa;
4386 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4387 bp->cp_ring_size = ring_size;
4389 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4390 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4391 bp->cp_nr_pages = MAX_CP_PAGES;
4392 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4393 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4394 ring_size, bp->cp_ring_size);
4396 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4397 bp->cp_ring_mask = bp->cp_bit - 1;
4403 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4405 struct net_device *dev = bp->dev;
4408 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4409 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4411 if (bp->xdp_prog->aux->xdp_has_frags)
4412 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4415 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4417 bp->flags |= BNXT_FLAG_JUMBO;
4418 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4420 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4421 bp->rx_skb_func = bnxt_rx_page_skb;
4423 bp->rx_dir = DMA_BIDIRECTIONAL;
4427 dev->max_mtu = bp->max_mtu;
4428 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4429 bp->rx_dir = DMA_FROM_DEVICE;
4430 bp->rx_skb_func = bnxt_rx_skb;
4435 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4439 struct pci_dev *pdev = bp->pdev;
4441 if (!bp->vnic_info)
4444 for (i = 0; i < bp->nr_vnics; i++) {
4445 vnic = &bp->vnic_info[i];
4471 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4475 struct pci_dev *pdev = bp->pdev;
4478 for (i = 0; i < bp->nr_vnics; i++) {
4479 vnic = &bp->vnic_info[i];
4506 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4510 max_rings = bp->rx_nr_rings;
4520 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4526 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4548 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4552 dma_pool_destroy(bp->hwrm_dma_pool);
4553 bp->hwrm_dma_pool = NULL;
4556 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4561 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4563 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4566 if (!bp->hwrm_dma_pool)
4569 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4574 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4581 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4587 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4590 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4607 bnxt_free_stats_mem(bp, stats);
4627 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4635 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4636 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4639 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4646 resp = hwrm_req_hold(bp, req);
4647 rc = hwrm_req_send(bp, req);
4652 hwrm_req_drop(bp, req);
4656 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4657 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4659 static void bnxt_init_stats(struct bnxt *bp)
4661 struct bnxt_napi *bnapi = bp->bnapi[0];
4672 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4674 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4680 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4681 stats = &bp->port_stats;
4690 rc = bnxt_hwrm_port_qstats(bp, flags);
4699 bnxt_hwrm_port_qstats(bp, 0);
4702 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4703 stats = &bp->rx_port_stats_ext;
4707 stats = &bp->tx_port_stats_ext;
4713 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4725 bnxt_hwrm_port_qstats_ext(bp, 0);
4730 static void bnxt_free_port_stats(struct bnxt *bp)
4732 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4733 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4735 bnxt_free_stats_mem(bp, &bp->port_stats);
4736 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4737 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4740 static void bnxt_free_ring_stats(struct bnxt *bp)
4744 if (!bp->bnapi)
4747 for (i = 0; i < bp->cp_nr_rings; i++) {
4748 struct bnxt_napi *bnapi = bp->bnapi[i];
4751 bnxt_free_stats_mem(bp, &cpr->stats);
4755 static int bnxt_alloc_stats(struct bnxt *bp)
4760 size = bp->hw_ring_stats_size;
4762 for (i = 0; i < bp->cp_nr_rings; i++) {
4763 struct bnxt_napi *bnapi = bp->bnapi[i];
4767 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4774 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4777 if (bp->port_stats.hw_stats)
4780 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4781 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4785 bp->flags |= BNXT_FLAG_PORT_STATS;
4789 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4790 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4793 if (bp->rx_port_stats_ext.hw_stats)
4796 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4797 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4803 if (bp->tx_port_stats_ext.hw_stats)
4806 if (bp->hwrm_spec_code >= 0x10902 ||
4807 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4808 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4809 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4814 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4818 static void bnxt_clear_ring_indices(struct bnxt *bp)
4822 if (!bp->bnapi)
4825 for (i = 0; i < bp->cp_nr_rings; i++) {
4826 struct bnxt_napi *bnapi = bp->bnapi[i];
4854 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4861 list_add_tail(&fltr->list, &bp->usr_fltr_list);
4864 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4870 void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
4874 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
4877 bnxt_del_one_usr_fltr(bp, usr_fltr);
4881 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4884 bnxt_del_one_usr_fltr(bp, fltr);
4886 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
4887 bp->ntp_fltr_count--;
4892 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
4904 head = &bp->ntp_fltr_hash_tbl[i];
4906 bnxt_del_l2_filter(bp, fltr->l2_fltr);
4910 bnxt_del_fltr(bp, &fltr->base);
4916 bitmap_free(bp->ntp_fltr_bmap);
4917 bp->ntp_fltr_bmap = NULL;
4918 bp->ntp_fltr_count = 0;
4921 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4925 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
4929 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4931 bp->ntp_fltr_count = 0;
4932 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
4934 if (!bp->ntp_fltr_bmap)
4940 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
4949 head = &bp->l2_fltr_hash_tbl[i];
4954 bnxt_del_fltr(bp, &fltr->base);
4959 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
4964 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
4965 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
4968 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4970 bnxt_free_vnic_attributes(bp);
4971 bnxt_free_tx_rings(bp);
4972 bnxt_free_rx_rings(bp);
4973 bnxt_free_cp_rings(bp);
4974 bnxt_free_all_cp_arrays(bp);
4975 bnxt_free_ntp_fltrs(bp, false);
4976 bnxt_free_l2_filters(bp, false);
4978 bnxt_free_ring_stats(bp);
4979 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4980 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4981 bnxt_free_port_stats(bp);
4982 bnxt_free_ring_grps(bp);
4983 bnxt_free_vnics(bp);
4984 kfree(bp->tx_ring_map);
4985 bp->tx_ring_map = NULL;
4986 kfree(bp->tx_ring);
4987 bp->tx_ring = NULL;
4988 kfree(bp->rx_ring);
4989 bp->rx_ring = NULL;
4990 kfree(bp->bnapi);
4991 bp->bnapi = NULL;
4993 bnxt_clear_ring_indices(bp);
4997 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5007 bp->cp_nr_rings);
5009 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5013 bp->bnapi = bnapi;
5015 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5016 bp->bnapi[i] = bnapi;
5017 bp->bnapi[i]->index = i;
5018 bp->bnapi[i]->bp = bp;
5019 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5021 &bp->bnapi[i]->cp_ring;
5028 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5031 if (!bp->rx_ring)
5034 for (i = 0; i < bp->rx_nr_rings; i++) {
5035 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5037 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5043 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5045 rxr->bnapi = bp->bnapi[i];
5046 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5049 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5052 if (!bp->tx_ring)
5055 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5058 if (!bp->tx_ring_map)
5061 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5064 j = bp->rx_nr_rings;
5066 for (i = 0; i < bp->tx_nr_rings; i++) {
5067 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5070 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5073 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5074 if (i >= bp->tx_nr_rings_xdp) {
5075 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5077 bnapi2 = bp->bnapi[k];
5078 txr->txq_index = i - bp->tx_nr_rings_xdp;
5080 BNXT_RING_TO_TC(bp, txr->txq_index);
5084 bnapi2 = bp->bnapi[j];
5091 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5095 rc = bnxt_alloc_stats(bp);
5098 bnxt_init_stats(bp);
5100 rc = bnxt_alloc_ntp_fltrs(bp);
5104 rc = bnxt_alloc_vnics(bp);
5109 rc = bnxt_alloc_all_cp_arrays(bp);
5113 bnxt_init_ring_struct(bp);
5115 rc = bnxt_alloc_rx_rings(bp);
5119 rc = bnxt_alloc_tx_rings(bp);
5123 rc = bnxt_alloc_cp_rings(bp);
5127 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5130 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5131 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5134 rc = bnxt_alloc_vnic_attributes(bp);
5140 bnxt_free_mem(bp, true);
5144 static void bnxt_disable_int(struct bnxt *bp)
5148 if (!bp->bnapi)
5151 for (i = 0; i < bp->cp_nr_rings; i++) {
5152 struct bnxt_napi *bnapi = bp->bnapi[i];
5157 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5161 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5163 struct bnxt_napi *bnapi = bp->bnapi[n];
5170 static void bnxt_disable_int_sync(struct bnxt *bp)
5174 if (!bp->irq_tbl)
5177 atomic_inc(&bp->intr_sem);
5179 bnxt_disable_int(bp);
5180 for (i = 0; i < bp->cp_nr_rings; i++) {
5181 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5183 synchronize_irq(bp->irq_tbl[map_idx].vector);
5187 static void bnxt_enable_int(struct bnxt *bp)
5191 atomic_set(&bp->intr_sem, 0);
5192 for (i = 0; i < bp->cp_nr_rings; i++) {
5193 struct bnxt_napi *bnapi = bp->bnapi[i];
5196 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5200 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5210 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5220 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5222 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5233 if (BNXT_PF(bp)) {
5254 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5263 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5266 !bp->ptp_cfg)
5283 resp = hwrm_req_hold(bp, req);
5284 rc = hwrm_req_send(bp, req);
5286 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5289 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5291 hwrm_req_drop(bp, req);
5295 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5300 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5303 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5306 return hwrm_req_send(bp, req);
5309 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5311 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5317 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5320 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5323 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5331 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5332 bp->vxlan_port = 0;
5333 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5336 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5337 bp->nge_port = 0;
5338 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5341 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5342 bp->vxlan_gpe_port = 0;
5343 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5349 rc = hwrm_req_send(bp, req);
5351 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5353 if (bp->flags & BNXT_FLAG_TPA)
5354 bnxt_set_tpa(bp, true);
5358 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5365 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5372 resp = hwrm_req_hold(bp, req);
5373 rc = hwrm_req_send(bp, req);
5375 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5382 bp->vxlan_port = port;
5383 bp->vxlan_fw_dst_port_id =
5387 bp->nge_port = port;
5388 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5391 bp->vxlan_gpe_port = port;
5392 bp->vxlan_gpe_fw_dst_port_id =
5398 if (bp->flags & BNXT_FLAG_TPA)
5399 bnxt_set_tpa(bp, true);
5402 hwrm_req_drop(bp, req);
5406 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5409 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5412 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5422 return hwrm_req_send_silent(bp, req);
5425 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5429 spin_lock_bh(&bp->ntp_fltr_lock);
5431 spin_unlock_bh(&bp->ntp_fltr_lock);
5435 bnxt_del_one_usr_fltr(bp, &fltr->base);
5437 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5438 bp->ntp_fltr_count--;
5440 spin_unlock_bh(&bp->ntp_fltr_lock);
5444 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5448 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5461 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5468 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5475 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5477 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5479 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5481 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5483 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5485 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5487 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5490 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5494 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5499 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5503 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5510 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5513 u64 prefix = bp->toeplitz_prefix, hash = 0;
5519 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5550 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5555 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5557 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5562 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5573 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5574 bp->max_fltr, 0);
5578 bp->ntp_fltr_count++;
5580 head = &bp->l2_fltr_hash_tbl[idx];
5582 bnxt_insert_usr_fltr(bp, &fltr->base);
5588 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5596 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5598 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5605 spin_lock_bh(&bp->ntp_fltr_lock);
5606 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5607 spin_unlock_bh(&bp->ntp_fltr_lock);
5609 bnxt_del_l2_filter(bp, fltr);
5615 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5623 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5625 spin_lock_bh(&bp->ntp_fltr_lock);
5626 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5637 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5639 spin_unlock_bh(&bp->ntp_fltr_lock);
5640 bnxt_del_l2_filter(bp, fltr);
5645 spin_unlock_bh(&bp->ntp_fltr_lock);
5660 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5667 struct bnxt_pf_info *pf = &bp->pf;
5677 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5683 return hwrm_req_send(bp, req);
5686 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5694 struct bnxt_pf_info *pf = &bp->pf;
5701 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5708 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5729 resp = hwrm_req_hold(bp, req);
5730 rc = hwrm_req_send(bp, req);
5735 hwrm_req_drop(bp, req);
5739 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5746 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5751 return hwrm_req_send(bp, req);
5781 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
5785 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
5789 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
5803 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5814 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5824 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
5825 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
5827 vnic = &bp->vnic_info[fltr->base.rxq + 1];
5861 resp = hwrm_req_hold(bp, req);
5862 rc = hwrm_req_send(bp, req);
5865 hwrm_req_drop(bp, req);
5869 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5878 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
5882 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
5883 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
5885 bnxt_del_l2_filter(bp, fltr);
5887 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
5891 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5897 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5902 bnxt_hwrm_l2_filter_free(bp, fltr);
5903 bnxt_del_l2_filter(bp, fltr);
5914 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
5919 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
5922 if (bp->vxlan_port)
5924 if (bp->vxlan_gpe_port)
5926 if (bp->nge_port)
5933 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5935 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5943 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5948 u16 mss = bp->dev->mtu - 40;
5979 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5981 max_aggs = bp->max_tpa;
5989 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
5993 return hwrm_req_send(bp, req);
5996 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6000 grp_info = &bp->grp_info[ring->grp_idx];
6004 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6006 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6009 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6012 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6014 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6017 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6020 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6024 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6029 bp->rss_indir_tbl_entries = entries;
6030 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
6032 if (!bp->rss_indir_tbl)
6037 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
6041 if (!bp->rx_nr_rings)
6044 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6045 max_rings = bp->rx_nr_rings - 1;
6047 max_rings = bp->rx_nr_rings;
6049 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6052 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6054 pad = bp->rss_indir_tbl_entries - max_entries;
6056 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
6059 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6063 if (!bp->rss_indir_tbl)
6066 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6068 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6072 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6074 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6080 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6085 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6093 j = bp->rss_indir_tbl[i];
6098 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6105 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6111 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6113 j = bp->rss_indir_tbl[i];
6114 rxr = &bp->rx_ring[j];
6118 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6124 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6127 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6128 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6129 if (bp->flags & BNXT_FLAG_CHIP_P7)
6132 bnxt_fill_hw_rss_tbl(bp, vnic);
6135 if (bp->rss_hash_delta) {
6136 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6137 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6142 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6149 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
6151 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6155 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6159 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6164 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6166 return hwrm_req_send(bp, req);
6169 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
6171 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6177 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6183 return hwrm_req_send(bp, req);
6185 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6187 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6189 hwrm_req_hold(bp, req);
6194 rc = hwrm_req_send(bp, req);
6200 hwrm_req_drop(bp, req);
6204 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6206 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6210 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6216 resp = hwrm_req_hold(bp, req);
6217 if (!hwrm_req_send(bp, req)) {
6218 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6219 bp->rss_hash_delta = 0;
6221 hwrm_req_drop(bp, req);
6224 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
6226 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6230 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6237 if (BNXT_RX_PAGE_MODE(bp)) {
6238 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6244 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
6245 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
6248 return hwrm_req_send(bp, req);
6251 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
6256 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6260 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
6262 hwrm_req_send(bp, req);
6263 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6266 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6270 for (i = 0; i < bp->nr_vnics; i++) {
6271 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6275 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
6278 bp->rsscos_nr_ctxs = 0;
6281 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
6287 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6291 resp = hwrm_req_hold(bp, req);
6292 rc = hwrm_req_send(bp, req);
6294 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
6296 hwrm_req_drop(bp, req);
6301 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6303 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6308 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
6310 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6311 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6317 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6321 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6322 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6327 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6348 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6360 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6361 ring = bp->rx_nr_rings - 1;
6363 grp_idx = bp->rx_ring[ring].bnapi->index;
6364 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6367 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
6371 if (BNXT_VF(bp))
6372 def_vlan = bp->vf.vlan;
6374 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6376 if (!vnic_id && bnxt_ulp_registered(bp->edev))
6377 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6379 return hwrm_req_send(bp, req);
6382 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
6384 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
6387 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6391 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
6393 hwrm_req_send(bp, req);
6394 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
6398 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6402 for (i = 0; i < bp->nr_vnics; i++)
6403 bnxt_hwrm_vnic_free_one(bp, i);
6406 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
6411 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6416 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6420 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6425 grp_idx = bp->rx_ring[i].bnapi->index;
6426 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6427 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6431 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6440 resp = hwrm_req_hold(bp, req);
6441 rc = hwrm_req_send(bp, req);
6444 hwrm_req_drop(bp, req);
6448 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6454 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6455 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6456 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6457 if (bp->hwrm_spec_code < 0x10600)
6460 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6464 resp = hwrm_req_hold(bp, req);
6465 rc = hwrm_req_send(bp, req);
6469 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6471 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6474 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6480 (BNXT_CHIP_P5(bp) &&
6481 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6482 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6484 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6486 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6487 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6488 if (bp->max_tpa_v2) {
6489 if (BNXT_CHIP_P5(bp))
6490 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6492 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6495 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6497 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6499 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6501 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6503 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6505 hwrm_req_drop(bp, req);
6509 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6516 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6519 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6523 resp = hwrm_req_hold(bp, req);
6524 for (i = 0; i < bp->rx_nr_rings; i++) {
6525 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6527 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6528 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6529 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6530 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6532 rc = hwrm_req_send(bp, req);
6537 bp->grp_info[grp_idx].fw_grp_id =
6540 hwrm_req_drop(bp, req);
6544 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6549 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6552 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6555 hwrm_req_hold(bp, req);
6556 for (i = 0; i < bp->cp_nr_rings; i++) {
6557 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6560 cpu_to_le32(bp->grp_info[i].fw_grp_id);
6562 hwrm_req_send(bp, req);
6563 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6565 hwrm_req_drop(bp, req);
6568 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6579 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6604 grp_info = &bp->grp_info[ring->grp_idx];
6605 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6606 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6609 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6616 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
6617 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6621 grp_info = &bp->grp_info[ring->grp_idx];
6622 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
6632 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6635 grp_info = &bp->grp_info[ring->grp_idx];
6645 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
6649 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6650 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6652 grp_info = &bp->grp_info[map_index];
6657 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
6663 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6664 if (bp->flags & BNXT_FLAG_USING_MSIX)
6668 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
6673 resp = hwrm_req_hold(bp, req);
6674 rc = hwrm_req_send(bp, req);
6677 hwrm_req_drop(bp, req);
6681 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6689 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
6693 if (BNXT_PF(bp)) {
6696 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6703 return hwrm_req_send(bp, req);
6707 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6714 return hwrm_req_send(bp, req);
6718 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
6723 db->db_ring_mask = bp->tx_ring_mask;
6726 db->db_ring_mask = bp->rx_ring_mask;
6729 db->db_ring_mask = bp->rx_agg_ring_mask;
6733 db->db_ring_mask = bp->cp_ring_mask;
6736 if (bp->flags & BNXT_FLAG_CHIP_P7) {
6742 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
6745 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6763 if (bp->flags & BNXT_FLAG_CHIP_P7)
6766 db->doorbell = bp->bar1 + bp->db_offset;
6768 db->doorbell = bp->bar1 + map_idx * 0x80;
6782 bnxt_set_db_mask(bp, db, ring_type);
6785 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
6787 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
6791 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6795 for (i = 0; i < bp->cp_nr_rings; i++) {
6796 struct bnxt_napi *bnapi = bp->bnapi[i];
6802 vector = bp->irq_tbl[map_idx].vector;
6804 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6809 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
6810 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
6812 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
6815 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
6817 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
6822 for (i = 0; i < bp->tx_nr_rings; i++) {
6823 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6827 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6835 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6838 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6840 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6844 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6847 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
6851 for (i = 0; i < bp->rx_nr_rings; i++) {
6852 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6857 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6860 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
6863 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6864 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
6865 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6871 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6874 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6876 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6882 for (i = 0; i < bp->rx_nr_rings; i++) {
6883 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6887 u32 map_idx = grp_idx + bp->rx_nr_rings;
6889 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6893 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6895 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6896 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6897 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6904 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6913 if (BNXT_NO_FW_ACCESS(bp))
6916 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6924 resp = hwrm_req_hold(bp, req);
6925 rc = hwrm_req_send(bp, req);
6927 hwrm_req_drop(bp, req);
6930 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6937 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6942 if (!bp->bnapi)
6945 for (i = 0; i < bp->tx_nr_rings; i++) {
6946 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6950 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6952 hwrm_ring_free_send_msg(bp, ring,
6960 for (i = 0; i < bp->rx_nr_rings; i++) {
6961 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6966 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6968 hwrm_ring_free_send_msg(bp, ring,
6973 bp->grp_info[grp_idx].rx_fw_ring_id =
6978 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6982 for (i = 0; i < bp->rx_nr_rings; i++) {
6983 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6988 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6990 hwrm_ring_free_send_msg(bp, ring, type,
6994 bp->grp_info[grp_idx].agg_fw_ring_id =
7003 bnxt_disable_int_sync(bp);
7005 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7009 for (i = 0; i < bp->cp_nr_rings; i++) {
7010 struct bnxt_napi *bnapi = bp->bnapi[i];
7021 hwrm_ring_free_send_msg(bp, ring,
7028 hwrm_ring_free_send_msg(bp, ring, type,
7031 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7036 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7038 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7041 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7043 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7048 if (bp->hwrm_spec_code < 0x10601)
7051 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7056 resp = hwrm_req_hold(bp, req);
7057 rc = hwrm_req_send(bp, req);
7059 hwrm_req_drop(bp, req);
7064 if (BNXT_NEW_RM(bp)) {
7075 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7079 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7082 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7085 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7097 hwrm_req_drop(bp, req);
7101 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7107 if (bp->hwrm_spec_code < 0x10601)
7110 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7115 resp = hwrm_req_hold(bp, req);
7116 rc = hwrm_req_send(bp, req);
7120 hwrm_req_drop(bp, req);
7124 static bool bnxt_rfs_supported(struct bnxt *bp);
7127 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7132 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7138 if (BNXT_NEW_RM(bp)) {
7141 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7156 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7171 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7176 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7184 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7199 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7213 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7218 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7223 hwrm_req_drop(bp, req);
7227 rc = hwrm_req_send(bp, req);
7231 if (bp->hwrm_spec_code < 0x10601)
7232 bp->hw_resc.resv_tx_rings = hwr->tx;
7234 return bnxt_hwrm_get_rings(bp);
7238 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7243 if (!BNXT_NEW_RM(bp)) {
7244 bp->hw_resc.resv_tx_rings = hwr->tx;
7248 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7252 rc = hwrm_req_send(bp, req);
7256 return bnxt_hwrm_get_rings(bp);
7259 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7261 if (BNXT_PF(bp))
7262 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7264 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7267 int bnxt_nq_rings_in_use(struct bnxt *bp)
7269 int cp = bp->cp_nr_rings;
7272 ulp_msix = bnxt_get_ulp_msix_num(bp);
7274 ulp_base = bnxt_get_ulp_msix_base(bp);
7282 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7286 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7287 return bnxt_nq_rings_in_use(bp);
7289 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7293 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7295 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
7296 int cp = bp->cp_nr_rings;
7301 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
7302 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
7307 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7311 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7312 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7314 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7318 if (BNXT_VF(bp))
7320 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7328 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7330 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7333 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7334 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7335 if (!netif_is_rxfh_configured(bp->dev))
7336 bnxt_set_dflt_rss_indir_tbl(bp);
7340 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7342 if (bp->flags & BNXT_FLAG_RFS) {
7343 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7345 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7351 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7353 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7354 int cp = bnxt_cp_rings_in_use(bp);
7355 int nq = bnxt_nq_rings_in_use(bp);
7356 int rx = bp->rx_nr_rings, stat;
7359 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7360 bp->hwrm_spec_code >= 0x10601)
7368 if (!BNXT_NEW_RM(bp)) {
7369 bnxt_check_rss_tbl_no_rmgr(bp);
7373 vnic = bnxt_get_total_vnics(bp, rx);
7375 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7377 stat = bnxt_get_func_stat_ctxs(bp);
7381 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7383 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7389 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7391 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7394 if (BNXT_NEW_RM(bp)) {
7397 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7406 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7409 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7412 static int __bnxt_reserve_rings(struct bnxt *bp)
7419 if (!bnxt_need_reserve_rings(bp))
7422 hwr.cp = bnxt_nq_rings_in_use(bp);
7423 hwr.tx = bp->tx_nr_rings;
7424 hwr.rx = bp->rx_nr_rings;
7425 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7427 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7430 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7432 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7434 hwr.grp = bp->rx_nr_rings;
7435 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7436 hwr.stat = bnxt_get_func_stat_ctxs(bp);
7438 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7442 bnxt_copy_reserved_rings(bp, &hwr);
7445 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7449 if (netif_running(bp->dev))
7452 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7453 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7454 bp->dev->hw_features &= ~NETIF_F_LRO;
7455 bp->dev->features &= ~NETIF_F_LRO;
7456 bnxt_set_ring_params(bp);
7460 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7461 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7462 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7464 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7465 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7467 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7469 bp->tx_nr_rings = hwr.tx;
7474 if (rx_rings != bp->rx_nr_rings) {
7475 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7476 rx_rings, bp->rx_nr_rings);
7477 if (netif_is_rxfh_configured(bp->dev) &&
7478 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7479 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7480 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7481 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7482 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7485 bp->rx_nr_rings = rx_rings;
7486 bp->cp_nr_rings = hwr.cp;
7488 if (!bnxt_rings_ok(bp, &hwr))
7491 if (!netif_is_rxfh_configured(bp->dev))
7492 bnxt_set_dflt_rss_indir_tbl(bp);
7497 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7502 if (!BNXT_NEW_RM(bp))
7505 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7512 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7516 return hwrm_req_send_silent(bp, req);
7519 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7524 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7526 if (BNXT_NEW_RM(bp)) {
7531 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7539 return hwrm_req_send_silent(bp, req);
7542 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7544 if (bp->hwrm_spec_code < 0x10801)
7547 if (BNXT_PF(bp))
7548 return bnxt_hwrm_check_pf_rings(bp, hwr);
7550 return bnxt_hwrm_check_vf_rings(bp, hwr);
7553 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
7555 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7570 if (bp->hwrm_spec_code < 0x10902)
7573 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
7576 resp = hwrm_req_hold(bp, req);
7577 rc = hwrm_req_send_silent(bp, req);
7597 hwrm_req_drop(bp, req);
7600 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
7602 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7607 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
7611 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7630 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
7648 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
7663 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
7668 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7676 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7684 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
7688 return hwrm_req_send(bp, req);
7691 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
7701 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
7709 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7713 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
7715 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
7717 return hwrm_req_send(bp, req_rx);
7721 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7724 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
7727 return hwrm_req_send(bp, req);
7731 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7740 ring_id = bnxt_cp_ring_for_tx(bp, txr);
7742 rc = hwrm_req_send(bp, req);
7745 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7751 int bnxt_hwrm_set_coal(struct bnxt *bp)
7756 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7760 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7762 hwrm_req_drop(bp, req_rx);
7766 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
7767 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
7769 hwrm_req_hold(bp, req_rx);
7770 hwrm_req_hold(bp, req_tx);
7771 for (i = 0; i < bp->cp_nr_rings; i++) {
7772 struct bnxt_napi *bnapi = bp->bnapi[i];
7776 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7778 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
7782 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7786 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7791 hw_coal = &bp->rx_coal;
7793 hw_coal = &bp->tx_coal;
7794 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
7796 hwrm_req_drop(bp, req_rx);
7797 hwrm_req_drop(bp, req_tx);
7801 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
7807 if (!bp->bnapi)
7810 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7813 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
7815 if (BNXT_FW_MAJ(bp) <= 20) {
7816 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
7817 hwrm_req_drop(bp, req);
7820 hwrm_req_hold(bp, req0);
7822 hwrm_req_hold(bp, req);
7823 for (i = 0; i < bp->cp_nr_rings; i++) {
7824 struct bnxt_napi *bnapi = bp->bnapi[i];
7831 hwrm_req_send(bp, req0);
7833 hwrm_req_send(bp, req);
7838 hwrm_req_drop(bp, req);
7840 hwrm_req_drop(bp, req0);
7843 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
7849 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7852 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
7856 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
7857 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
7859 resp = hwrm_req_hold(bp, req);
7860 for (i = 0; i < bp->cp_nr_rings; i++) {
7861 struct bnxt_napi *bnapi = bp->bnapi[i];
7866 rc = hwrm_req_send(bp, req);
7872 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
7874 hwrm_req_drop(bp, req);
7878 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
7885 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7890 resp = hwrm_req_hold(bp, req);
7891 rc = hwrm_req_send(bp, req);
7896 if (BNXT_VF(bp)) {
7897 struct bnxt_vf_info *vf = &bp->vf;
7901 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
7907 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
7909 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
7911 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
7912 bp->flags |= BNXT_FLAG_MULTI_HOST;
7915 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
7921 bp->port_partition_type = resp->port_partition_type;
7924 if (bp->hwrm_spec_code < 0x10707 ||
7926 bp->br_mode = BRIDGE_MODE_VEB;
7928 bp->br_mode = BRIDGE_MODE_VEPA;
7930 bp->br_mode = BRIDGE_MODE_UNDEF;
7932 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
7933 if (!bp->max_mtu)
7934 bp->max_mtu = BNXT_MAX_MTU;
7936 if (bp->db_size)
7939 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
7940 if (BNXT_CHIP_P5(bp)) {
7941 if (BNXT_PF(bp))
7942 bp->db_offset = DB_PF_OFFSET_P5;
7944 bp->db_offset = DB_VF_OFFSET_P5;
7946 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
7948 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
7949 bp->db_size <= bp->db_offset)
7950 bp->db_size = pci_resource_len(bp->pdev, 2);
7953 hwrm_req_drop(bp, req);
7969 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
7971 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7994 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8002 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8009 bp->ctx = ctx;
8011 resp = hwrm_req_hold(bp, req);
8020 rc = hwrm_req_send(bp, req);
8045 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8048 hwrm_req_drop(bp, req);
8052 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8058 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
8061 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8062 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8064 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8068 resp = hwrm_req_hold(bp, req);
8069 rc = hwrm_req_send_silent(bp, req);
8076 ctx = bp->ctx;
8083 bp->ctx = ctx;
8152 ctx->tqm_fp_rings_count = bp->max_q;
8160 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8165 hwrm_req_drop(bp, req);
8194 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8197 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8213 if (req_len > bp->hwrm_max_ext_req_len)
8215 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8321 return hwrm_req_send(bp, req);
8324 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8335 return bnxt_alloc_ring(bp, rmem);
8338 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8363 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8385 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8394 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8399 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8416 bnxt_free_ring(bp, rmem2);
8424 bnxt_free_ring(bp, rmem);
8428 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8446 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8452 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8469 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8472 hwrm_req_hold(bp, req);
8494 rc = hwrm_req_send(bp, req);
8496 hwrm_req_drop(bp, req);
8500 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
8502 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8519 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8526 void bnxt_free_ctx_mem(struct bnxt *bp)
8528 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8544 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
8552 bp->ctx = NULL;
8555 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8569 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8571 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8575 ctx = bp->ctx;
8588 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
8599 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8605 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8610 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8616 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8621 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8625 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
8638 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8644 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8654 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8660 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8667 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8668 rc = bnxt_backing_store_cfg_v2(bp, ena);
8670 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8672 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8680 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
8684 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8687 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8692 resp = hwrm_req_hold(bp, req);
8693 rc = hwrm_req_send_silent(bp, req);
8718 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8725 if (BNXT_PF(bp)) {
8726 struct bnxt_pf_info *pf = &bp->pf;
8734 hwrm_req_drop(bp, req);
8738 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
8742 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
8747 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
8752 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
8756 req->port_id = cpu_to_le16(bp->pf.port_id);
8757 resp = hwrm_req_hold(bp, req);
8758 rc = hwrm_req_send(bp, req);
8773 ptp->bp = bp;
8774 bp->ptp_cfg = ptp;
8779 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8787 rc = bnxt_ptp_init(bp, phc_cfg);
8789 netdev_warn(bp->dev, "PTP initialization failed.\n");
8791 hwrm_req_drop(bp, req);
8796 bnxt_ptp_clear(bp);
8798 bp->ptp_cfg = NULL;
8802 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
8806 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8810 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
8815 resp = hwrm_req_hold(bp, req);
8816 rc = hwrm_req_send(bp, req);
8822 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
8824 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
8826 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
8828 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
8830 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
8832 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
8834 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
8836 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
8838 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
8842 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
8843 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
8844 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
8846 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
8847 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
8848 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
8849 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
8850 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
8852 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
8854 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
8858 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
8860 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
8862 bp->tx_push_thresh = 0;
8864 BNXT_FW_MAJ(bp) > 217)
8865 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
8885 if (BNXT_PF(bp)) {
8886 struct bnxt_pf_info *pf = &bp->pf;
8893 bp->flags &= ~BNXT_FLAG_WOL_CAP;
8895 bp->flags |= BNXT_FLAG_WOL_CAP;
8897 bp->fw_cap |= BNXT_FW_CAP_PTP;
8899 bnxt_ptp_clear(bp);
8900 kfree(bp->ptp_cfg);
8901 bp->ptp_cfg = NULL;
8905 struct bnxt_vf_info *vf = &bp->vf;
8913 hwrm_req_drop(bp, req);
8917 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
8923 bp->fw_dbg_cap = 0;
8924 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
8927 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
8932 resp = hwrm_req_hold(bp, req);
8933 rc = hwrm_req_send(bp, req);
8937 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
8940 hwrm_req_drop(bp, req);
8943 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
8945 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
8949 rc = __bnxt_hwrm_func_qcaps(bp);
8953 bnxt_hwrm_dbg_qcaps(bp);
8955 rc = bnxt_hwrm_queue_qportcfg(bp);
8957 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
8960 if (bp->hwrm_spec_code >= 0x10803) {
8961 rc = bnxt_alloc_ctx_mem(bp);
8964 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8966 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
8971 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
8978 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
8981 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
8985 resp = hwrm_req_hold(bp, req);
8986 rc = hwrm_req_send(bp, req);
8993 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
8997 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9001 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9004 hwrm_req_drop(bp, req);
9008 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9010 if (bp->fw_health)
9013 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9014 if (!bp->fw_health)
9017 mutex_init(&bp->fw_health->lock);
9021 static int bnxt_alloc_fw_health(struct bnxt *bp)
9025 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9026 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9029 rc = __bnxt_alloc_fw_health(bp);
9031 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9032 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9039 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9041 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9046 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9048 struct bnxt_fw_health *fw_health = bp->fw_health;
9063 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9070 if (bp->fw_health)
9071 bp->fw_health->status_reliable = false;
9073 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9074 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9078 if (!bp->chip_num) {
9079 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9080 bp->chip_num = readl(bp->bar0 +
9084 if (!BNXT_CHIP_P5_PLUS(bp))
9094 if (__bnxt_alloc_fw_health(bp)) {
9095 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9099 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9102 __bnxt_map_fw_health_reg(bp, status_loc);
9103 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9107 bp->fw_health->status_reliable = true;
9110 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9112 struct bnxt_fw_health *fw_health = bp->fw_health;
9116 bp->fw_health->status_reliable = false;
9117 bp->fw_health->resets_reliable = false;
9130 bp->fw_health->status_reliable = true;
9131 bp->fw_health->resets_reliable = true;
9135 __bnxt_map_fw_health_reg(bp, reg_base);
9139 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9141 if (!bp->fw_health)
9144 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9145 bp->fw_health->status_reliable = true;
9146 bp->fw_health->resets_reliable = true;
9148 bnxt_try_map_fw_health_reg(bp);
9152 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9154 struct bnxt_fw_health *fw_health = bp->fw_health;
9159 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9162 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9166 resp = hwrm_req_hold(bp, req);
9167 rc = hwrm_req_send(bp, req);
9172 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9209 hwrm_req_drop(bp, req);
9211 rc = bnxt_map_fw_health_regs(bp);
9213 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9217 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9222 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9227 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9228 return hwrm_req_send(bp, req);
9231 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9235 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9236 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9241 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9249 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9253 resp = hwrm_req_hold(bp, req);
9254 rc = hwrm_req_send(bp, req);
9262 bp->max_tc = resp->max_configurable_queues;
9263 bp->max_lltc = resp->max_configurable_lossless_queues;
9264 if (bp->max_tc > BNXT_MAX_QUEUE)
9265 bp->max_tc = BNXT_MAX_QUEUE;
9267 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9269 for (i = 0, j = 0; i < bp->max_tc; i++) {
9270 bp->q_info[j].queue_id = *qptr;
9271 bp->q_ids[i] = *qptr++;
9272 bp->q_info[j].queue_profile = *qptr++;
9273 bp->tc_to_qidx[j] = j;
9274 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9275 (no_rdma && BNXT_PF(bp)))
9278 bp->max_q = bp->max_tc;
9279 bp->max_tc = max_t(u8, j, 1);
9282 bp->max_tc = 1;
9284 if (bp->max_lltc > bp->max_tc)
9285 bp->max_lltc = bp->max_tc;
9288 hwrm_req_drop(bp, req);
9292 static int bnxt_hwrm_poll(struct bnxt *bp)
9297 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9305 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
9306 rc = hwrm_req_send(bp, req);
9310 static int bnxt_hwrm_ver_get(struct bnxt *bp)
9318 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9322 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9323 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
9328 resp = hwrm_req_hold(bp, req);
9329 rc = hwrm_req_send(bp, req);
9333 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
9335 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
9339 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
9342 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
9348 if (bp->hwrm_spec_code > hwrm_ver)
9349 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9353 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9358 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
9370 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
9371 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
9375 int fw_ver_len = strlen(bp->fw_ver_str);
9377 snprintf(bp->fw_ver_str + fw_ver_len,
9380 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
9383 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
9384 if (!bp->hwrm_cmd_timeout)
9385 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
9386 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
9387 if (!bp->hwrm_cmd_max_timeout)
9388 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
9389 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
9390 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
9391 bp->hwrm_cmd_max_timeout / 1000);
9394 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
9395 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
9397 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
9398 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
9400 bp->chip_num = le16_to_cpu(resp->chip_num);
9401 bp->chip_rev = resp->chip_rev;
9402 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
9404 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
9409 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
9412 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
9416 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
9420 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
9424 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
9427 hwrm_req_drop(bp, req);
9431 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
9438 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
9439 bp->hwrm_spec_code < 0x10400)
9443 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9453 return hwrm_req_send(bp, req);
9494 static void bnxt_accumulate_all_stats(struct bnxt *bp)
9501 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9504 for (i = 0; i < bp->cp_nr_rings; i++) {
9505 struct bnxt_napi *bnapi = bp->bnapi[i];
9517 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9518 struct bnxt_stats_mem *stats = &bp->port_stats;
9533 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
9534 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
9535 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
9539 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
9542 struct bnxt_pf_info *pf = &bp->pf;
9545 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
9548 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9551 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9557 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
9559 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9560 return hwrm_req_send(bp, req);
9563 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
9569 struct bnxt_pf_info *pf = &bp->pf;
9573 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
9576 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9579 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9586 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
9587 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
9590 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
9591 resp_qs = hwrm_req_hold(bp, req_qs);
9592 rc = hwrm_req_send(bp, req_qs);
9594 bp->fw_rx_stats_ext_size =
9596 if (BNXT_FW_MAJ(bp) < 220 &&
9597 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
9598 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
9600 bp->fw_tx_stats_ext_size = tx_stat_size ?
9603 bp->fw_rx_stats_ext_size = 0;
9604 bp->fw_tx_stats_ext_size = 0;
9606 hwrm_req_drop(bp, req_qs);
9611 if (bp->fw_tx_stats_ext_size <=
9613 bp->pri2cos_valid = 0;
9617 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9623 resp_qc = hwrm_req_hold(bp, req_qc);
9624 rc = hwrm_req_send(bp, req_qc);
9637 bp->pri2cos_valid = false;
9638 hwrm_req_drop(bp, req_qc);
9641 for (j = 0; j < bp->max_q; j++) {
9642 if (bp->q_ids[j] == queue_id)
9643 bp->pri2cos_idx[i] = queue_idx;
9646 bp->pri2cos_valid = true;
9648 hwrm_req_drop(bp, req_qc);
9653 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
9655 bnxt_hwrm_tunnel_dst_port_free(bp,
9657 bnxt_hwrm_tunnel_dst_port_free(bp,
9661 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
9667 tpa_flags = bp->flags & BNXT_FLAG_TPA;
9668 else if (BNXT_NO_FW_ACCESS(bp))
9670 for (i = 0; i < bp->nr_vnics; i++) {
9671 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
9673 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9681 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
9685 for (i = 0; i < bp->nr_vnics; i++)
9686 bnxt_hwrm_vnic_set_rss(bp, i, false);
9689 static void bnxt_clear_vnic(struct bnxt *bp)
9691 if (!bp->vnic_info)
9694 bnxt_hwrm_clear_vnic_filter(bp);
9695 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
9697 bnxt_hwrm_clear_vnic_rss(bp);
9698 bnxt_hwrm_vnic_ctx_free(bp);
9701 if (bp->flags & BNXT_FLAG_TPA)
9702 bnxt_set_tpa(bp, false);
9703 bnxt_hwrm_vnic_free(bp);
9704 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9705 bnxt_hwrm_vnic_ctx_free(bp);
9708 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
9711 bnxt_clear_vnic(bp);
9712 bnxt_hwrm_ring_free(bp, close_path);
9713 bnxt_hwrm_ring_grp_free(bp);
9715 bnxt_hwrm_stat_ctx_free(bp);
9716 bnxt_hwrm_free_tunnel_ports(bp);
9720 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
9733 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9740 return hwrm_req_send(bp, req);
9743 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
9748 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
9751 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9761 return hwrm_req_send(bp, req);
9764 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
9766 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
9773 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
9775 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9779 bp->rsscos_nr_ctxs++;
9781 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9782 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
9784 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
9788 bp->rsscos_nr_ctxs++;
9793 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9795 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9801 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
9803 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
9808 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9809 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9811 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9820 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
9824 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
9826 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
9828 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
9832 bp->rsscos_nr_ctxs++;
9837 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
9839 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
9843 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9845 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9849 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9850 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9852 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9859 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
9861 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9862 return __bnxt_setup_vnic_p5(bp, vnic_id);
9864 return __bnxt_setup_vnic(bp, vnic_id);
9867 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
9872 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
9874 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9878 return bnxt_setup_vnic(bp, vnic_id);
9881 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
9885 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
9886 return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
9887 bp->rx_nr_rings);
9889 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9892 for (i = 0; i < bp->rx_nr_rings; i++) {
9897 if (vnic_id >= bp->nr_vnics)
9900 vnic = &bp->vnic_info[vnic_id];
9902 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
9904 if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
9911 static bool bnxt_promisc_ok(struct bnxt *bp)
9914 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
9920 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
9924 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
9926 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
9931 rc = bnxt_hwrm_vnic_cfg(bp, 1);
9933 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
9943 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
9945 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
9947 unsigned int rx_nr_rings = bp->rx_nr_rings;
9950 rc = bnxt_hwrm_stat_ctx_alloc(bp);
9952 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
9958 rc = bnxt_hwrm_ring_alloc(bp);
9960 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
9964 rc = bnxt_hwrm_ring_grp_alloc(bp);
9966 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
9970 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9974 rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
9976 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
9980 if (BNXT_VF(bp))
9981 bnxt_hwrm_func_qcfg(bp);
9983 rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
9986 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
9987 bnxt_hwrm_update_rss_hash_cfg(bp);
9989 if (bp->flags & BNXT_FLAG_RFS) {
9990 rc = bnxt_alloc_rfs_vnics(bp);
9995 if (bp->flags & BNXT_FLAG_TPA) {
9996 rc = bnxt_set_tpa(bp, true);
10001 if (BNXT_VF(bp))
10002 bnxt_update_vf_mac(bp);
10005 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10007 if (BNXT_VF(bp) && rc == -ENODEV)
10008 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10010 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10016 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10019 if (bp->dev->flags & IFF_BROADCAST)
10022 if (bp->dev->flags & IFF_PROMISC)
10025 if (bp->dev->flags & IFF_ALLMULTI) {
10028 } else if (bp->dev->flags & IFF_MULTICAST) {
10031 bnxt_mc_list_updated(bp, &mask);
10035 rc = bnxt_cfg_rx_mode(bp);
10040 rc = bnxt_hwrm_set_coal(bp);
10042 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10045 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10046 rc = bnxt_setup_nitroa0_vnic(bp);
10048 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10052 if (BNXT_VF(bp)) {
10053 bnxt_hwrm_func_qcfg(bp);
10054 netdev_update_features(bp->dev);
10060 bnxt_hwrm_resource_free(bp, 0, true);
10065 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10067 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10071 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10073 bnxt_init_cp_rings(bp);
10074 bnxt_init_rx_rings(bp);
10075 bnxt_init_tx_rings(bp);
10076 bnxt_init_ring_grps(bp, irq_re_init);
10077 bnxt_init_vnics(bp);
10079 return bnxt_init_chip(bp, irq_re_init);
10082 static int bnxt_set_real_num_queues(struct bnxt *bp)
10085 struct net_device *dev = bp->dev;
10087 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10088 bp->tx_nr_rings_xdp);
10092 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10097 if (bp->flags & BNXT_FLAG_RFS)
10098 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10104 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10128 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10133 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10135 int tcs = bp->num_tc;
10139 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10142 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10144 int tcs = bp->num_tc;
10146 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10147 bp->tx_nr_rings_xdp;
10150 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10153 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10158 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10162 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
10165 return __bnxt_trim_rings(bp, rx, tx, max, sh);
10168 static void bnxt_setup_msix(struct bnxt *bp)
10170 const int len = sizeof(bp->irq_tbl[0].name);
10171 struct net_device *dev = bp->dev;
10174 tcs = bp->num_tc;
10179 count = bp->tx_nr_rings_per_tc;
10180 off = BNXT_TC_TO_RING_BASE(bp, i);
10185 for (i = 0; i < bp->cp_nr_rings; i++) {
10186 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10189 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10191 else if (i < bp->rx_nr_rings)
10196 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10198 bp->irq_tbl[map_idx].handler = bnxt_msix;
10202 static void bnxt_setup_inta(struct bnxt *bp)
10204 const int len = sizeof(bp->irq_tbl[0].name);
10206 if (bp->num_tc) {
10207 netdev_reset_tc(bp->dev);
10208 bp->num_tc = 0;
10211 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
10213 bp->irq_tbl[0].handler = bnxt_inta;
10216 static int bnxt_init_int_mode(struct bnxt *bp);
10218 static int bnxt_setup_int_mode(struct bnxt *bp)
10222 if (!bp->irq_tbl) {
10223 rc = bnxt_init_int_mode(bp);
10224 if (rc || !bp->irq_tbl)
10228 if (bp->flags & BNXT_FLAG_USING_MSIX)
10229 bnxt_setup_msix(bp);
10231 bnxt_setup_inta(bp);
10233 rc = bnxt_set_real_num_queues(bp);
10237 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
10239 return bp->hw_resc.max_rsscos_ctxs;
10242 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
10244 return bp->hw_resc.max_vnics;
10247 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
10249 return bp->hw_resc.max_stat_ctxs;
10252 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
10254 return bp->hw_resc.max_cp_rings;
10257 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
10259 unsigned int cp = bp->hw_resc.max_cp_rings;
10261 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10262 cp -= bnxt_get_ulp_msix_num(bp);
10267 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
10269 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10271 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10277 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
10279 bp->hw_resc.max_irqs = max_irqs;
10282 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
10286 cp = bnxt_get_max_func_cp_rings_for_en(bp);
10287 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10288 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
10290 return cp - bp->cp_nr_rings;
10293 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
10295 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
10298 int bnxt_get_avail_msix(struct bnxt *bp, int num)
10300 int max_cp = bnxt_get_max_func_cp_rings(bp);
10301 int max_irq = bnxt_get_max_func_irqs(bp);
10302 int total_req = bp->cp_nr_rings + num;
10305 max_idx = bp->total_irqs;
10306 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10307 max_idx = min_t(int, bp->total_irqs, max_cp);
10308 avail_msix = max_idx - bp->cp_nr_rings;
10309 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
10313 num = max_irq - bp->cp_nr_rings;
10320 static int bnxt_get_num_msix(struct bnxt *bp)
10322 if (!BNXT_NEW_RM(bp))
10323 return bnxt_get_max_func_irqs(bp);
10325 return bnxt_nq_rings_in_use(bp);
10328 static int bnxt_init_msix(struct bnxt *bp)
10333 total_vecs = bnxt_get_num_msix(bp);
10334 max = bnxt_get_max_func_irqs(bp);
10350 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
10353 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
10354 ulp_msix = bnxt_get_ulp_msix_num(bp);
10360 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
10361 if (bp->irq_tbl) {
10363 bp->irq_tbl[i].vector = msix_ent[i].vector;
10365 bp->total_irqs = total_vecs;
10367 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10372 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
10373 bp->cp_nr_rings = (min == 1) ?
10374 max_t(int, tx_cp, bp->rx_nr_rings) :
10375 tx_cp + bp->rx_nr_rings;
10381 bp->flags |= BNXT_FLAG_USING_MSIX;
10386 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
10387 kfree(bp->irq_tbl);
10388 bp->irq_tbl = NULL;
10389 pci_disable_msix(bp->pdev);
10394 static int bnxt_init_inta(struct bnxt *bp)
10396 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
10397 if (!bp->irq_tbl)
10400 bp->total_irqs = 1;
10401 bp->rx_nr_rings = 1;
10402 bp->tx_nr_rings = 1;
10403 bp->cp_nr_rings = 1;
10404 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10405 bp->irq_tbl[0].vector = bp->pdev->irq;
10409 static int bnxt_init_int_mode(struct bnxt *bp)
10413 if (bp->flags & BNXT_FLAG_MSIX_CAP)
10414 rc = bnxt_init_msix(bp);
10416 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
10418 rc = bnxt_init_inta(bp);
10423 static void bnxt_clear_int_mode(struct bnxt *bp)
10425 if (bp->flags & BNXT_FLAG_USING_MSIX)
10426 pci_disable_msix(bp->pdev);
10428 kfree(bp->irq_tbl);
10429 bp->irq_tbl = NULL;
10430 bp->flags &= ~BNXT_FLAG_USING_MSIX;
10433 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
10436 int tcs = bp->num_tc;
10439 if (!bnxt_need_reserve_rings(bp))
10442 if (irq_re_init && BNXT_NEW_RM(bp) &&
10443 bnxt_get_num_msix(bp) != bp->total_irqs) {
10444 bnxt_ulp_irq_stop(bp);
10445 bnxt_clear_int_mode(bp);
10448 rc = __bnxt_reserve_rings(bp);
10451 rc = bnxt_init_int_mode(bp);
10452 bnxt_ulp_irq_restart(bp, rc);
10455 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10458 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
10459 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
10460 netdev_err(bp->dev, "tx ring reservation failure\n");
10461 netdev_reset_tc(bp->dev);
10462 bp->num_tc = 0;
10463 if (bp->tx_nr_rings_xdp)
10464 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
10466 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10472 static void bnxt_free_irq(struct bnxt *bp)
10478 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
10479 bp->dev->rx_cpu_rmap = NULL;
10481 if (!bp->irq_tbl || !bp->bnapi)
10484 for (i = 0; i < bp->cp_nr_rings; i++) {
10485 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10487 irq = &bp->irq_tbl[map_idx];
10494 free_irq(irq->vector, bp->bnapi[i]);
10501 static int bnxt_request_irq(struct bnxt *bp)
10509 rc = bnxt_setup_int_mode(bp);
10511 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
10516 rmap = bp->dev->rx_cpu_rmap;
10518 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
10521 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
10522 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10523 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
10526 if (rmap && bp->bnapi[i]->rx_ring) {
10529 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10535 bp->bnapi[i]);
10539 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
10543 int numa_node = dev_to_node(&bp->pdev->dev);
10550 netdev_warn(bp->dev,
10560 static void bnxt_del_napi(struct bnxt *bp)
10564 if (!bp->bnapi)
10567 for (i = 0; i < bp->rx_nr_rings; i++)
10568 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
10569 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
10570 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
10572 for (i = 0; i < bp->cp_nr_rings; i++) {
10573 struct bnxt_napi *bnapi = bp->bnapi[i];
10583 static void bnxt_init_napi(struct bnxt *bp)
10586 unsigned int cp_nr_rings = bp->cp_nr_rings;
10589 if (bp->flags & BNXT_FLAG_USING_MSIX) {
10592 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10594 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10597 bnapi = bp->bnapi[i];
10598 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
10600 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10601 bnapi = bp->bnapi[cp_nr_rings];
10602 netif_napi_add(bp->dev, &bnapi->napi,
10606 bnapi = bp->bnapi[0];
10607 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
10611 static void bnxt_disable_napi(struct bnxt *bp)
10615 if (!bp->bnapi ||
10616 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
10619 for (i = 0; i < bp->cp_nr_rings; i++) {
10620 struct bnxt_napi *bnapi = bp->bnapi[i];
10634 static void bnxt_enable_napi(struct bnxt *bp)
10638 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
10639 for (i = 0; i < bp->cp_nr_rings; i++) {
10640 struct bnxt_napi *bnapi = bp->bnapi[i];
10656 void bnxt_tx_disable(struct bnxt *bp)
10661 if (bp->tx_ring) {
10662 for (i = 0; i < bp->tx_nr_rings; i++) {
10663 txr = &bp->tx_ring[i];
10670 netif_carrier_off(bp->dev);
10672 netif_tx_disable(bp->dev);
10675 void bnxt_tx_enable(struct bnxt *bp)
10680 for (i = 0; i < bp->tx_nr_rings; i++) {
10681 txr = &bp->tx_ring[i];
10686 netif_tx_wake_all_queues(bp->dev);
10687 if (BNXT_LINK_IS_UP(bp))
10688 netif_carrier_on(bp->dev);
10715 void bnxt_report_link(struct bnxt *bp)
10717 if (BNXT_LINK_IS_UP(bp)) {
10724 netif_carrier_on(bp->dev);
10725 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
10727 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
10730 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
10734 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
10736 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
10738 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
10742 if (bp->link_info.phy_qcfg_resp.option_flags &
10744 u8 sig_mode = bp->link_info.active_fec_sig_mode &
10760 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
10762 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
10763 netdev_info(bp->dev, "EEE is %s\n",
10764 bp->eee.eee_active ? "active" :
10766 fec = bp->link_info.fec_cfg;
10768 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
10770 bnxt_report_fec(&bp->link_info));
10772 netif_carrier_off(bp->dev);
10773 netdev_err(bp->dev, "NIC Link is Down\n");
10789 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
10791 struct bnxt_link_info *link_info = &bp->link_info;
10796 if (bp->hwrm_spec_code < 0x10201)
10799 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
10803 resp = hwrm_req_hold(bp, req);
10804 rc = hwrm_req_send(bp, req);
10808 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
10810 struct ethtool_keee *eee = &bp->eee;
10814 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
10816 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
10820 if (bp->hwrm_spec_code >= 0x10a01) {
10823 netdev_warn(bp->dev, "Ethernet link disabled\n");
10826 netdev_info(bp->dev, "Ethernet link enabled\n");
10843 bp->port_count = resp->port_cnt;
10846 hwrm_req_drop(bp, req);
10859 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
10864 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
10885 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
10887 struct bnxt_link_info *link_info = &bp->link_info;
10894 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
10898 resp = hwrm_req_hold(bp, req);
10899 rc = hwrm_req_send(bp, req);
10901 hwrm_req_drop(bp, req);
10902 if (BNXT_VF(bp) && rc == -ENODEV) {
10903 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
10912 if (bp->hwrm_spec_code >= 0x10800)
10922 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
10954 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
10955 struct ethtool_keee *eee = &bp->eee;
10989 if (bp->hwrm_spec_code >= 0x10504) {
11000 bnxt_report_link(bp);
11005 hwrm_req_drop(bp, req);
11007 if (!BNXT_PHY_CFG_ABLE(bp))
11012 bnxt_hwrm_set_link_setting(bp, true, false);
11016 static void bnxt_get_port_module_status(struct bnxt *bp)
11018 struct bnxt_link_info *link_info = &bp->link_info;
11022 if (bnxt_update_link(bp, true))
11030 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11031 bp->pf.port_id);
11032 if (bp->hwrm_spec_code >= 0x10201) {
11033 netdev_warn(bp->dev, "Module part number %s\n",
11037 netdev_warn(bp->dev, "TX is disabled\n");
11039 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11044 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11046 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11047 if (bp->hwrm_spec_code >= 0x10201)
11050 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11052 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11057 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11059 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11063 if (bp->hwrm_spec_code >= 0x10201) {
11071 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11073 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11075 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11078 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11079 } else if (bp->link_info.advertising) {
11081 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11083 if (bp->link_info.advertising_pam4) {
11087 cpu_to_le16(bp->link_info.advertising_pam4);
11093 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11094 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11096 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11097 (u32)bp->link_info.req_link_speed);
11098 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11099 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11102 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11110 int bnxt_hwrm_set_pause(struct bnxt *bp)
11115 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11119 bnxt_hwrm_set_pause_common(bp, req);
11121 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11122 bp->link_info.force_link_chng)
11123 bnxt_hwrm_set_link_common(bp, req);
11125 rc = hwrm_req_send(bp, req);
11126 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11131 bp->link_info.pause =
11132 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11133 bp->link_info.auto_pause_setting = 0;
11134 if (!bp->link_info.force_link_chng)
11135 bnxt_report_link(bp);
11137 bp->link_info.force_link_chng = false;
11141 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11144 struct ethtool_keee *eee = &bp->eee;
11164 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11169 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11174 bnxt_hwrm_set_pause_common(bp, req);
11176 bnxt_hwrm_set_link_common(bp, req);
11179 bnxt_hwrm_set_eee(bp, req);
11180 return hwrm_req_send(bp, req);
11183 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11188 if (!BNXT_SINGLE_PF(bp))
11191 if (pci_num_vf(bp->pdev) &&
11192 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11195 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11200 rc = hwrm_req_send(bp, req);
11202 mutex_lock(&bp->link_lock);
11208 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11209 mutex_unlock(&bp->link_lock);
11214 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11220 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11224 netdev_err(bp->dev, "OP-TEE not supported\n");
11229 static int bnxt_try_recover_fw(struct bnxt *bp)
11231 if (bp->fw_health && bp->fw_health->status_reliable) {
11236 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11237 rc = bnxt_hwrm_poll(bp);
11245 netdev_err(bp->dev,
11251 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11252 return bnxt_fw_reset_via_optee(bp);
11260 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
11262 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11264 if (!BNXT_NEW_RM(bp))
11276 bp->tx_nr_rings = 0;
11277 bp->rx_nr_rings = 0;
11281 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
11285 if (!BNXT_NEW_RM(bp))
11288 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11290 netdev_err(bp->dev, "resc_qcaps failed\n");
11292 bnxt_clear_reservations(bp, fw_reset);
11297 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
11301 bool fw_reset = !bp->irq_tbl;
11306 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
11309 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11315 resp = hwrm_req_hold(bp, req);
11317 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
11319 rc = hwrm_req_send(bp, req);
11328 hwrm_req_drop(bp, req);
11333 rc = bnxt_try_recover_fw(bp);
11336 hwrm_req_drop(bp, req);
11341 bnxt_inv_fw_health_reg(bp);
11348 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
11351 bnxt_remap_fw_health_regs(bp);
11353 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
11354 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
11355 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11360 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11361 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11362 bnxt_ulp_stop(bp);
11363 bnxt_free_ctx_mem(bp);
11364 bnxt_dcb_free(bp);
11365 rc = bnxt_fw_init_one(bp);
11367 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11368 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11371 bnxt_clear_int_mode(bp);
11372 rc = bnxt_init_int_mode(bp);
11374 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11375 netdev_err(bp->dev, "init int mode failed\n");
11379 rc = bnxt_cancel_reservations(bp, fw_reset);
11384 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
11388 struct bnxt_pf_info *pf = &bp->pf;
11391 bp->num_leds = 0;
11392 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
11395 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11400 resp = hwrm_req_hold(bp, req);
11401 rc = hwrm_req_send(bp, req);
11403 hwrm_req_drop(bp, req);
11409 bp->num_leds = resp->num_leds;
11410 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
11411 bp->num_leds);
11412 for (i = 0; i < bp->num_leds; i++) {
11413 struct bnxt_led_info *led = &bp->leds[i];
11418 bp->num_leds = 0;
11423 hwrm_req_drop(bp, req);
11427 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
11433 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11437 req->port_id = cpu_to_le16(bp->pf.port_id);
11440 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
11442 resp = hwrm_req_hold(bp, req);
11443 rc = hwrm_req_send(bp, req);
11445 bp->wol_filter_id = resp->wol_filter_id;
11446 hwrm_req_drop(bp, req);
11450 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
11455 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11459 req->port_id = cpu_to_le16(bp->pf.port_id);
11461 req->wol_filter_id = bp->wol_filter_id;
11463 return hwrm_req_send(bp, req);
11466 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
11473 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11477 req->port_id = cpu_to_le16(bp->pf.port_id);
11479 resp = hwrm_req_hold(bp, req);
11480 rc = hwrm_req_send(bp, req);
11486 bp->wol = 1;
11487 bp->wol_filter_id = resp->wol_filter_id;
11491 hwrm_req_drop(bp, req);
11495 static void bnxt_get_wol_settings(struct bnxt *bp)
11499 bp->wol = 0;
11500 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
11504 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
11508 static bool bnxt_eee_config_ok(struct bnxt *bp)
11510 struct ethtool_keee *eee = &bp->eee;
11511 struct bnxt_link_info *link_info = &bp->link_info;
11513 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
11535 static int bnxt_update_phy_setting(struct bnxt *bp)
11541 struct bnxt_link_info *link_info = &bp->link_info;
11543 rc = bnxt_update_link(bp, true);
11545 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11549 if (!BNXT_SINGLE_PF(bp))
11576 if (!BNXT_LINK_IS_UP(bp))
11579 if (!bnxt_eee_config_ok(bp))
11583 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11585 rc = bnxt_hwrm_set_pause(bp);
11587 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11600 static void bnxt_preset_reg_win(struct bnxt *bp)
11602 if (BNXT_PF(bp)) {
11605 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
11609 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
11611 static int bnxt_reinit_after_abort(struct bnxt *bp)
11615 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11618 if (bp->dev->reg_state == NETREG_UNREGISTERED)
11621 rc = bnxt_fw_init_one(bp);
11623 bnxt_clear_int_mode(bp);
11624 rc = bnxt_init_int_mode(bp);
11626 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11627 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11633 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
11643 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
11646 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
11647 bnxt_del_ntp_filter(bp, ntp_fltr);
11648 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
11653 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
11654 bnxt_del_l2_filter(bp, l2_fltr);
11655 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
11661 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
11665 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
11666 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
11669 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11673 bnxt_preset_reg_win(bp);
11674 netif_carrier_off(bp->dev);
11677 rc = bnxt_init_dflt_ring_mode(bp);
11679 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
11683 rc = bnxt_reserve_rings(bp, irq_re_init);
11686 if ((bp->flags & BNXT_FLAG_RFS) &&
11687 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
11689 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
11690 bp->flags &= ~BNXT_FLAG_RFS;
11693 rc = bnxt_alloc_mem(bp, irq_re_init);
11695 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11700 bnxt_init_napi(bp);
11701 rc = bnxt_request_irq(bp);
11703 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
11708 rc = bnxt_init_nic(bp, irq_re_init);
11710 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11714 bnxt_enable_napi(bp);
11715 bnxt_debug_dev_init(bp);
11718 mutex_lock(&bp->link_lock);
11719 rc = bnxt_update_phy_setting(bp);
11720 mutex_unlock(&bp->link_lock);
11722 netdev_warn(bp->dev, "failed to update phy settings\n");
11723 if (BNXT_SINGLE_PF(bp)) {
11724 bp->link_info.phy_retry = true;
11725 bp->link_info.phy_retry_expires =
11732 udp_tunnel_nic_reset_ntf(bp->dev);
11734 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
11740 set_bit(BNXT_STATE_OPEN, &bp->state);
11741 bnxt_enable_int(bp);
11743 bnxt_tx_enable(bp);
11744 mod_timer(&bp->timer, jiffies + bp->current_interval);
11746 mutex_lock(&bp->link_lock);
11747 bnxt_get_port_module_status(bp);
11748 mutex_unlock(&bp->link_lock);
11751 if (BNXT_PF(bp))
11752 bnxt_vf_reps_open(bp);
11753 if (bp->ptp_cfg)
11754 atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
11755 bnxt_ptp_init_rtc(bp, true);
11756 bnxt_ptp_cfg_tstamp_filters(bp);
11757 bnxt_cfg_usr_fltrs(bp);
11761 bnxt_del_napi(bp);
11764 bnxt_free_skbs(bp);
11765 bnxt_free_irq(bp);
11766 bnxt_free_mem(bp, true);
11771 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11775 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
11778 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
11780 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
11781 dev_close(bp->dev);
11790 int bnxt_half_open_nic(struct bnxt *bp)
11794 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11795 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
11800 rc = bnxt_alloc_mem(bp, true);
11802 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11805 bnxt_init_napi(bp);
11806 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11807 rc = bnxt_init_nic(bp, true);
11809 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11810 bnxt_del_napi(bp);
11811 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11817 bnxt_free_skbs(bp);
11818 bnxt_free_mem(bp, true);
11819 dev_close(bp->dev);
11826 void bnxt_half_close_nic(struct bnxt *bp)
11828 bnxt_hwrm_resource_free(bp, false, true);
11829 bnxt_del_napi(bp);
11830 bnxt_free_skbs(bp);
11831 bnxt_free_mem(bp, true);
11832 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11835 void bnxt_reenable_sriov(struct bnxt *bp)
11837 if (BNXT_PF(bp)) {
11838 struct bnxt_pf_info *pf = &bp->pf;
11842 bnxt_cfg_hw_sriov(bp, &n, true);
11848 struct bnxt *bp = netdev_priv(dev);
11851 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11852 rc = bnxt_reinit_after_abort(bp);
11855 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
11857 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
11862 rc = bnxt_hwrm_if_change(bp, true);
11866 rc = __bnxt_open_nic(bp, true, true);
11868 bnxt_hwrm_if_change(bp, false);
11870 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
11871 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11872 bnxt_ulp_start(bp, 0);
11873 bnxt_reenable_sriov(bp);
11881 static bool bnxt_drv_busy(struct bnxt *bp)
11883 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
11884 test_bit(BNXT_STATE_READ_STATS, &bp->state));
11887 static void bnxt_get_ring_stats(struct bnxt *bp,
11890 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
11894 if (BNXT_PF(bp))
11895 bnxt_vf_reps_close(bp);
11898 bnxt_tx_disable(bp);
11900 clear_bit(BNXT_STATE_OPEN, &bp->state);
11902 while (bnxt_drv_busy(bp))
11906 bnxt_shutdown_nic(bp, irq_re_init);
11910 bnxt_debug_dev_exit(bp);
11911 bnxt_disable_napi(bp);
11912 del_timer_sync(&bp->timer);
11913 bnxt_free_skbs(bp);
11916 if (bp->bnapi && irq_re_init) {
11917 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
11918 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
11921 bnxt_free_irq(bp);
11922 bnxt_del_napi(bp);
11924 bnxt_free_mem(bp, irq_re_init);
11927 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11929 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11937 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
11938 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11942 if (bp->sriov_cfg) {
11945 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
11946 !bp->sriov_cfg,
11949 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
11951 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
11954 __bnxt_close_nic(bp, irq_re_init, link_re_init);
11959 struct bnxt *bp = netdev_priv(dev);
11961 bnxt_close_nic(bp, true, true);
11962 bnxt_hwrm_shutdown_link(bp);
11963 bnxt_hwrm_if_change(bp, false);
11967 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
11974 if (bp->hwrm_spec_code < 0x10a00)
11977 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
11981 req->port_id = cpu_to_le16(bp->pf.port_id);
11991 resp = hwrm_req_hold(bp, req);
11992 rc = hwrm_req_send(bp, req);
11995 hwrm_req_drop(bp, req);
11999 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12005 if (bp->hwrm_spec_code < 0x10a00)
12008 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12012 req->port_id = cpu_to_le16(bp->pf.port_id);
12023 return hwrm_req_send(bp, req);
12030 struct bnxt *bp = netdev_priv(dev);
12035 mdio->phy_id = bp->link_info.phy_addr;
12044 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12054 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12070 static void bnxt_get_ring_stats(struct bnxt *bp,
12075 for (i = 0; i < bp->cp_nr_rings; i++) {
12076 struct bnxt_napi *bnapi = bp->bnapi[i];
12109 static void bnxt_add_prev_stats(struct bnxt *bp,
12112 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12127 struct bnxt *bp = netdev_priv(dev);
12129 set_bit(BNXT_STATE_READ_STATS, &bp->state);
12134 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12135 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12136 *stats = bp->net_stats_prev;
12140 bnxt_get_ring_stats(bp, stats);
12141 bnxt_add_prev_stats(bp, stats);
12143 if (bp->flags & BNXT_FLAG_PORT_STATS) {
12144 u64 *rx = bp->port_stats.sw_stats;
12145 u64 *tx = bp->port_stats.sw_stats +
12165 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12168 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12188 void bnxt_get_ring_err_stats(struct bnxt *bp,
12193 for (i = 0; i < bp->cp_nr_rings; i++)
12194 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
12197 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
12199 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12200 struct net_device *dev = bp->dev;
12231 static bool bnxt_uc_list_updated(struct bnxt *bp)
12233 struct net_device *dev = bp->dev;
12234 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12252 struct bnxt *bp = netdev_priv(dev);
12258 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
12261 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12271 uc_update = bnxt_uc_list_updated(bp);
12279 mc_update = bnxt_mc_list_updated(bp, &mask);
12285 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12289 static int bnxt_cfg_rx_mode(struct bnxt *bp)
12291 struct net_device *dev = bp->dev;
12292 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12298 uc_update = bnxt_uc_list_updated(bp);
12307 bnxt_hwrm_l2_filter_free(bp, fltr);
12308 bnxt_del_l2_filter(bp, fltr);
12326 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12328 if (BNXT_VF(bp) && rc == -ENODEV) {
12329 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12330 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
12332 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
12335 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12341 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12342 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
12346 !bnxt_promisc_ok(bp))
12348 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12350 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12355 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12358 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12364 static bool bnxt_can_reserve_rings(struct bnxt *bp)
12367 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
12368 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12376 if (!netif_running(bp->dev))
12384 static bool bnxt_rfs_supported(struct bnxt *bp)
12386 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
12387 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
12392 if (BNXT_FW_MAJ(bp) == 212)
12394 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
12396 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12402 static bool bnxt_rfs_capable(struct bnxt *bp)
12408 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
12411 hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
12415 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
12416 return bnxt_rfs_supported(bp);
12417 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
12420 hwr.vnic = 1 + bp->rx_nr_rings;
12422 max_vnics = bnxt_get_max_func_vnics(bp);
12423 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
12425 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
12426 !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
12430 if (bp->rx_nr_rings > 1)
12431 netdev_warn(bp->dev,
12437 if (!BNXT_NEW_RM(bp))
12440 if (hwr.vnic == bp->hw_resc.resv_vnics &&
12441 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12444 bnxt_hwrm_reserve_rings(bp, &hwr);
12445 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
12446 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12449 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
12452 bnxt_hwrm_reserve_rings(bp, &hwr);
12459 struct bnxt *bp = netdev_priv(dev);
12462 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
12465 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
12485 if (BNXT_VF(bp) && bp->vf.vlan)
12491 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
12494 bnxt_close_nic(bp, irq_re_init, link_re_init);
12495 bp->flags = flags;
12497 bnxt_set_ring_params(bp);
12498 return bnxt_open_nic(bp, irq_re_init, link_re_init);
12504 struct bnxt *bp = netdev_priv(dev);
12505 u32 flags = bp->flags;
12516 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
12525 bnxt_clear_usr_fltrs(bp, true);
12527 changes = flags ^ bp->flags;
12530 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
12532 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12542 if (flags != bp->flags) {
12543 u32 old_flags = bp->flags;
12545 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12546 bp->flags = flags;
12548 bnxt_set_ring_params(bp);
12553 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
12556 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
12559 bp->flags = flags;
12560 rc = bnxt_set_tpa(bp,
12564 bp->flags = old_flags;
12570 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
12637 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
12642 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
12643 udp_port != bp->vxlan_gpe_port)
12652 return bnxt_exthdr_check(bp, skb,
12659 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12665 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
12669 return bnxt_udp_tunl_check(bp, skb);
12684 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12694 struct bnxt *bp = netdev_priv(dev);
12703 if (bnxt_tunl_check(bp, skb, *l4_proto))
12707 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
12710 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
12717 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
12726 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
12730 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
12739 resp = hwrm_req_hold(bp, req);
12743 rc = hwrm_req_send(bp, req);
12752 hwrm_req_drop(bp, req);
12756 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
12763 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
12769 resp = hwrm_req_hold(bp, req);
12770 rc = hwrm_req_send(bp, req);
12775 hwrm_req_drop(bp, req);
12785 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
12798 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
12809 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
12813 static void bnxt_dbg_dump_states(struct bnxt *bp)
12818 for (i = 0; i < bp->cp_nr_rings; i++) {
12819 bnapi = bp->bnapi[i];
12820 if (netif_msg_drv(bp)) {
12828 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
12830 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
12837 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
12845 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
12846 return hwrm_req_send_silent(bp, req);
12849 static void bnxt_reset_task(struct bnxt *bp, bool silent)
12852 bnxt_dbg_dump_states(bp);
12853 if (netif_running(bp->dev)) {
12857 bnxt_close_nic(bp, false, false);
12858 bnxt_open_nic(bp, false, false);
12860 bnxt_ulp_stop(bp);
12861 bnxt_close_nic(bp, true, false);
12862 rc = bnxt_open_nic(bp, true, false);
12863 bnxt_ulp_start(bp, rc);
12870 struct bnxt *bp = netdev_priv(dev);
12872 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
12873 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
12876 static void bnxt_fw_health_check(struct bnxt *bp)
12878 struct bnxt_fw_health *fw_health = bp->fw_health;
12879 struct pci_dev *pdev = bp->pdev;
12882 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12892 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
12900 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12910 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
12915 struct bnxt *bp = from_timer(bp, t, timer);
12916 struct net_device *dev = bp->dev;
12918 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
12921 if (atomic_read(&bp->intr_sem) != 0)
12924 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
12925 bnxt_fw_health_check(bp);
12927 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
12928 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
12930 if (bnxt_tc_flower_enabled(bp))
12931 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
12934 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
12935 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
12938 if (bp->link_info.phy_retry) {
12939 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
12940 bp->link_info.phy_retry = false;
12941 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
12943 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
12947 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12948 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12950 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
12951 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
12954 mod_timer(&bp->timer, jiffies + bp->current_interval);
12957 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
12964 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12968 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
12970 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12975 static void bnxt_reset(struct bnxt *bp, bool silent)
12977 bnxt_rtnl_lock_sp(bp);
12978 if (test_bit(BNXT_STATE_OPEN, &bp->state))
12979 bnxt_reset_task(bp, silent);
12980 bnxt_rtnl_unlock_sp(bp);
12984 static void bnxt_rx_ring_reset(struct bnxt *bp)
12988 bnxt_rtnl_lock_sp(bp);
12989 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12990 bnxt_rtnl_unlock_sp(bp);
12994 if (bp->flags & BNXT_FLAG_TPA)
12995 bnxt_set_tpa(bp, false);
12996 for (i = 0; i < bp->rx_nr_rings; i++) {
12997 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13004 rc = bnxt_hwrm_rx_ring_reset(bp, i);
13007 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13009 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13011 bnxt_reset_task(bp, true);
13014 bnxt_free_one_rx_ring_skbs(bp, i);
13020 bnxt_alloc_one_rx_ring(bp, i);
13023 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13024 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13025 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13027 if (bp->flags & BNXT_FLAG_TPA)
13028 bnxt_set_tpa(bp, true);
13029 bnxt_rtnl_unlock_sp(bp);
13032 static void bnxt_fw_fatal_close(struct bnxt *bp)
13034 bnxt_tx_disable(bp);
13035 bnxt_disable_napi(bp);
13036 bnxt_disable_int_sync(bp);
13037 bnxt_free_irq(bp);
13038 bnxt_clear_int_mode(bp);
13039 pci_disable_device(bp->pdev);
13042 static void bnxt_fw_reset_close(struct bnxt *bp)
13044 bnxt_ulp_stop(bp);
13049 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13052 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13054 bp->fw_reset_min_dsecs = 0;
13055 bnxt_fw_fatal_close(bp);
13057 __bnxt_close_nic(bp, true, false);
13058 bnxt_vf_reps_free(bp);
13059 bnxt_clear_int_mode(bp);
13060 bnxt_hwrm_func_drv_unrgtr(bp);
13061 if (pci_is_enabled(bp->pdev))
13062 pci_disable_device(bp->pdev);
13063 bnxt_free_ctx_mem(bp);
13066 static bool is_bnxt_fw_ok(struct bnxt *bp)
13068 struct bnxt_fw_health *fw_health = bp->fw_health;
13072 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13076 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13087 static void bnxt_force_fw_reset(struct bnxt *bp)
13089 struct bnxt_fw_health *fw_health = bp->fw_health;
13090 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13093 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13094 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13099 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13102 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13104 bnxt_fw_reset_close(bp);
13109 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13111 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13113 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13116 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13117 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13118 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13121 void bnxt_fw_exception(struct bnxt *bp)
13123 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13124 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13125 bnxt_rtnl_lock_sp(bp);
13126 bnxt_force_fw_reset(bp);
13127 bnxt_rtnl_unlock_sp(bp);
13133 static int bnxt_get_registered_vfs(struct bnxt *bp)
13138 if (!BNXT_PF(bp))
13141 rc = bnxt_hwrm_func_qcfg(bp);
13143 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13146 if (bp->pf.registered_vfs)
13147 return bp->pf.registered_vfs;
13148 if (bp->sriov_cfg)
13154 void bnxt_fw_reset(struct bnxt *bp)
13156 bnxt_rtnl_lock_sp(bp);
13157 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13158 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13159 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13164 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13167 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13169 if (bp->pf.active_vfs &&
13170 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13171 n = bnxt_get_registered_vfs(bp);
13173 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13175 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13176 dev_close(bp->dev);
13181 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
13182 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
13183 bp->fw_reset_state =
13185 bnxt_queue_fw_reset_work(bp, HZ / 10);
13188 bnxt_fw_reset_close(bp);
13189 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13190 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13193 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13194 tmo = bp->fw_reset_min_dsecs * HZ / 10;
13196 bnxt_queue_fw_reset_work(bp, tmo);
13199 bnxt_rtnl_unlock_sp(bp);
13202 static void bnxt_chk_missed_irq(struct bnxt *bp)
13206 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13209 for (i = 0; i < bp->cp_nr_rings; i++) {
13210 struct bnxt_napi *bnapi = bp->bnapi[i];
13223 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
13231 bnxt_dbg_hwrm_ring_info_get(bp,
13241 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
13243 struct bnxt_link_info *link_info = &bp->link_info;
13247 if (bp->hwrm_spec_code >= 0x10201) {
13266 static void bnxt_fw_echo_reply(struct bnxt *bp)
13268 struct bnxt_fw_health *fw_health = bp->fw_health;
13272 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13277 hwrm_req_send(bp, req);
13282 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
13284 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13286 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13287 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13291 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
13292 bnxt_cfg_rx_mode(bp);
13294 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
13295 bnxt_cfg_ntp_filters(bp);
13296 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
13297 bnxt_hwrm_exec_fwd_req(bp);
13298 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13299 netdev_info(bp->dev, "Receive PF driver unload event!\n");
13300 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
13301 bnxt_hwrm_port_qstats(bp, 0);
13302 bnxt_hwrm_port_qstats_ext(bp, 0);
13303 bnxt_accumulate_all_stats(bp);
13306 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
13309 mutex_lock(&bp->link_lock);
13311 &bp->sp_event))
13312 bnxt_hwrm_phy_qcaps(bp);
13314 rc = bnxt_update_link(bp, true);
13316 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13320 &bp->sp_event))
13321 bnxt_init_ethtool_link_settings(bp);
13322 mutex_unlock(&bp->link_lock);
13324 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
13327 mutex_lock(&bp->link_lock);
13328 rc = bnxt_update_phy_setting(bp);
13329 mutex_unlock(&bp->link_lock);
13331 netdev_warn(bp->dev, "update phy settings retry failed\n");
13333 bp->link_info.phy_retry = false;
13334 netdev_info(bp->dev, "update phy settings retry succeeded\n");
13337 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
13338 mutex_lock(&bp->link_lock);
13339 bnxt_get_port_module_status(bp);
13340 mutex_unlock(&bp->link_lock);
13343 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
13344 bnxt_tc_flow_stats_work(bp);
13346 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
13347 bnxt_chk_missed_irq(bp);
13349 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
13350 bnxt_fw_echo_reply(bp);
13352 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
13353 bnxt_hwmon_notify_event(bp);
13358 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
13359 bnxt_reset(bp, false);
13361 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
13362 bnxt_reset(bp, true);
13364 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
13365 bnxt_rx_ring_reset(bp);
13367 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
13368 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
13369 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
13370 bnxt_devlink_health_fw_report(bp);
13372 bnxt_fw_reset(bp);
13375 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
13376 if (!is_bnxt_fw_ok(bp))
13377 bnxt_devlink_health_fw_report(bp);
13381 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13384 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13388 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
13398 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
13403 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13411 hwr.vnic = bnxt_get_total_vnics(bp, rx);
13413 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
13418 if (BNXT_NEW_RM(bp)) {
13419 hwr.cp += bnxt_get_ulp_msix_num(bp);
13420 hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
13422 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13424 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
13426 return bnxt_hwrm_check_rings(bp, &hwr);
13429 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
13431 if (bp->bar2) {
13432 pci_iounmap(pdev, bp->bar2);
13433 bp->bar2 = NULL;
13436 if (bp->bar1) {
13437 pci_iounmap(pdev, bp->bar1);
13438 bp->bar1 = NULL;
13441 if (bp->bar0) {
13442 pci_iounmap(pdev, bp->bar0);
13443 bp->bar0 = NULL;
13447 static void bnxt_cleanup_pci(struct bnxt *bp)
13449 bnxt_unmap_bars(bp, bp->pdev);
13450 pci_release_regions(bp->pdev);
13451 if (pci_is_enabled(bp->pdev))
13452 pci_disable_device(bp->pdev);
13455 static void bnxt_init_dflt_coal(struct bnxt *bp)
13457 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
13468 coal = &bp->rx_coal;
13478 coal = &bp->tx_coal;
13486 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
13490 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
13492 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
13494 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13497 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13503 static int bnxt_fw_init_one_p1(struct bnxt *bp)
13507 bp->fw_cap = 0;
13508 rc = bnxt_hwrm_ver_get(bp);
13514 bnxt_try_map_fw_health_reg(bp);
13516 rc = bnxt_try_recover_fw(bp);
13519 rc = bnxt_hwrm_ver_get(bp);
13524 bnxt_nvm_cfg_ver_get(bp);
13526 rc = bnxt_hwrm_func_reset(bp);
13530 bnxt_hwrm_fw_set_time(bp);
13534 static int bnxt_fw_init_one_p2(struct bnxt *bp)
13539 rc = bnxt_hwrm_func_qcaps(bp);
13541 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13546 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13548 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13551 if (bnxt_alloc_fw_health(bp)) {
13552 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
13554 rc = bnxt_hwrm_error_recovery_qcfg(bp);
13556 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13560 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13564 if (bnxt_fw_pre_resv_vnics(bp))
13565 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
13567 bnxt_hwrm_func_qcfg(bp);
13568 bnxt_hwrm_vnic_qcaps(bp);
13569 bnxt_hwrm_port_led_qcaps(bp);
13570 bnxt_ethtool_init(bp);
13571 if (bp->fw_cap & BNXT_FW_CAP_PTP)
13572 __bnxt_hwrm_ptp_qcfg(bp);
13573 bnxt_dcb_init(bp);
13574 bnxt_hwmon_init(bp);
13578 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
13580 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
13581 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
13585 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
13586 bp->rss_hash_delta = bp->rss_hash_cfg;
13587 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
13588 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
13589 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
13594 static void bnxt_set_dflt_rfs(struct bnxt *bp)
13596 struct net_device *dev = bp->dev;
13600 bp->flags &= ~BNXT_FLAG_RFS;
13601 if (bnxt_rfs_supported(bp)) {
13603 if (bnxt_rfs_capable(bp)) {
13604 bp->flags |= BNXT_FLAG_RFS;
13610 static void bnxt_fw_init_one_p3(struct bnxt *bp)
13612 struct pci_dev *pdev = bp->pdev;
13614 bnxt_set_dflt_rss_hash_type(bp);
13615 bnxt_set_dflt_rfs(bp);
13617 bnxt_get_wol_settings(bp);
13618 if (bp->flags & BNXT_FLAG_WOL_CAP)
13619 device_set_wakeup_enable(&pdev->dev, bp->wol);
13623 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
13624 bnxt_hwrm_coal_params_qcaps(bp);
13627 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
13629 int bnxt_fw_init_one(struct bnxt *bp)
13633 rc = bnxt_fw_init_one_p1(bp);
13635 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
13638 rc = bnxt_fw_init_one_p2(bp);
13640 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
13643 rc = bnxt_probe_phy(bp, false);
13646 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
13650 bnxt_fw_init_one_p3(bp);
13654 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
13656 struct bnxt_fw_health *fw_health = bp->fw_health;
13666 pci_write_config_dword(bp->pdev, reg_off, val);
13670 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
13674 writel(val, bp->bar0 + reg_off);
13677 writel(val, bp->bar1 + reg_off);
13681 pci_read_config_dword(bp->pdev, 0, &val);
13686 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
13692 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
13695 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
13699 resp = hwrm_req_hold(bp, req);
13700 if (!hwrm_req_send(bp, req))
13703 hwrm_req_drop(bp, req);
13707 static void bnxt_reset_all(struct bnxt *bp)
13709 struct bnxt_fw_health *fw_health = bp->fw_health;
13712 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13713 bnxt_fw_reset_via_optee(bp);
13714 bp->fw_reset_timestamp = jiffies;
13720 bnxt_fw_reset_writel(bp, i);
13724 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
13730 rc = hwrm_req_send(bp, req);
13733 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
13735 bp->fw_reset_timestamp = jiffies;
13738 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
13740 return time_after(jiffies, bp->fw_reset_timestamp +
13741 (bp->fw_reset_max_dsecs * HZ / 10));
13744 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
13746 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13747 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
13748 bnxt_ulp_start(bp, rc);
13749 bnxt_dl_health_fw_status_update(bp, false);
13751 bp->fw_reset_state = 0;
13752 dev_close(bp->dev);
13757 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
13760 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13761 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
13765 switch (bp->fw_reset_state) {
13767 int n = bnxt_get_registered_vfs(bp);
13771 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
13773 bp->fw_reset_timestamp));
13776 if (bnxt_fw_reset_timeout(bp)) {
13777 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13778 bp->fw_reset_state = 0;
13779 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
13783 bnxt_queue_fw_reset_work(bp, HZ / 10);
13786 bp->fw_reset_timestamp = jiffies;
13788 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13789 bnxt_fw_reset_abort(bp, rc);
13793 bnxt_fw_reset_close(bp);
13794 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13795 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13798 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13799 tmo = bp->fw_reset_min_dsecs * HZ / 10;
13802 bnxt_queue_fw_reset_work(bp, tmo);
13808 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
13810 !bnxt_fw_reset_timeout(bp)) {
13811 bnxt_queue_fw_reset_work(bp, HZ / 5);
13815 if (!bp->fw_health->primary) {
13816 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
13818 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13819 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13822 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13826 bnxt_reset_all(bp);
13827 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13828 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
13831 bnxt_inv_fw_health_reg(bp);
13832 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
13833 !bp->fw_reset_min_dsecs) {
13836 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13838 if (bnxt_fw_reset_timeout(bp)) {
13839 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
13843 bnxt_queue_fw_reset_work(bp, HZ / 1000);
13847 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13848 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
13849 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
13850 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
13851 bnxt_dl_remote_reload(bp);
13852 if (pci_enable_device(bp->pdev)) {
13853 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
13857 pci_set_master(bp->pdev);
13858 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
13861 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
13862 rc = bnxt_hwrm_poll(bp);
13864 if (bnxt_fw_reset_timeout(bp)) {
13865 netdev_err(bp->dev, "Firmware reset aborted\n");
13868 bnxt_queue_fw_reset_work(bp, HZ / 5);
13871 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
13872 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
13876 bnxt_queue_fw_reset_work(bp, HZ / 10);
13879 rc = bnxt_open(bp->dev);
13881 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
13882 bnxt_fw_reset_abort(bp, rc);
13887 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
13888 bp->fw_health->enabled) {
13889 bp->fw_health->last_fw_reset_cnt =
13890 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13892 bp->fw_reset_state = 0;
13895 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13896 bnxt_ulp_start(bp, 0);
13897 bnxt_reenable_sriov(bp);
13898 bnxt_vf_reps_alloc(bp);
13899 bnxt_vf_reps_open(bp);
13900 bnxt_ptp_reapply_pps(bp);
13901 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
13902 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
13903 bnxt_dl_health_fw_recovery_done(bp);
13904 bnxt_dl_health_fw_status_update(bp, true);
13912 if (bp->fw_health->status_reliable ||
13913 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
13914 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
13916 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
13920 bnxt_fw_reset_abort(bp, rc);
13927 struct bnxt *bp = netdev_priv(dev);
13960 bp->dev = dev;
13961 bp->pdev = pdev;
13963 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
13966 bp->bar0 = pci_ioremap_bar(pdev, 0);
13967 if (!bp->bar0) {
13973 bp->bar2 = pci_ioremap_bar(pdev, 4);
13974 if (!bp->bar2) {
13980 INIT_WORK(&bp->sp_task, bnxt_sp_task);
13981 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
13983 spin_lock_init(&bp->ntp_fltr_lock);
13985 spin_lock_init(&bp->db_lock);
13988 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
13989 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
13991 timer_setup(&bp->timer, bnxt_timer, 0);
13992 bp->current_interval = BNXT_TIMER_INTERVAL;
13994 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
13995 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
13997 clear_bit(BNXT_STATE_OPEN, &bp->state);
14001 bnxt_unmap_bars(bp, pdev);
14015 struct bnxt *bp = netdev_priv(dev);
14024 rc = bnxt_approve_mac(bp, addr->sa_data, true);
14029 bnxt_clear_usr_fltrs(bp, true);
14031 bnxt_close_nic(bp, false, false);
14032 rc = bnxt_open_nic(bp, false, false);
14041 struct bnxt *bp = netdev_priv(dev);
14044 bnxt_close_nic(bp, true, false);
14047 bnxt_set_ring_params(bp);
14050 return bnxt_open_nic(bp, true, false);
14057 struct bnxt *bp = netdev_priv(dev);
14061 if (tc > bp->max_tc) {
14063 tc, bp->max_tc);
14067 if (bp->num_tc == tc)
14070 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14073 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14074 sh, tc, bp->tx_nr_rings_xdp);
14079 if (netif_running(bp->dev))
14080 bnxt_close_nic(bp, true, false);
14083 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14085 bp->num_tc = tc;
14087 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14089 bp->num_tc = 0;
14091 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14092 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14093 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14094 tx_cp + bp->rx_nr_rings;
14096 if (netif_running(bp->dev))
14097 return bnxt_open_nic(bp, true, false);
14105 struct bnxt *bp = cb_priv;
14107 if (!bnxt_tc_flower_enabled(bp) ||
14108 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14113 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14124 struct bnxt *bp = netdev_priv(dev);
14131 bp, bp, true);
14144 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
14152 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
14153 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
14156 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
14162 spin_lock_bh(&bp->ntp_fltr_lock);
14163 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
14165 spin_unlock_bh(&bp->ntp_fltr_lock);
14172 head = &bp->ntp_fltr_hash_tbl[idx];
14175 bnxt_insert_usr_fltr(bp, &fltr->base);
14176 bp->ntp_fltr_count++;
14177 spin_unlock_bh(&bp->ntp_fltr_lock);
14220 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
14226 head = &bp->ntp_fltr_hash_tbl[idx];
14238 struct bnxt *bp = netdev_priv(dev);
14247 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
14254 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
14258 bnxt_del_l2_filter(bp, l2_fltr);
14264 bnxt_del_l2_filter(bp, l2_fltr);
14283 if (bp->hwrm_spec_code < 0x10601) {
14291 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
14297 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
14299 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
14309 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14311 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14316 bnxt_del_l2_filter(bp, l2_fltr);
14322 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
14324 spin_lock_bh(&bp->ntp_fltr_lock);
14326 spin_unlock_bh(&bp->ntp_fltr_lock);
14330 bnxt_del_one_usr_fltr(bp, &fltr->base);
14331 bp->ntp_fltr_count--;
14332 spin_unlock_bh(&bp->ntp_fltr_lock);
14333 bnxt_del_l2_filter(bp, fltr->l2_fltr);
14334 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
14338 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
14349 head = &bp->ntp_fltr_hash_tbl[i];
14356 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
14359 bnxt_hwrm_cfa_ntuple_filter_free(bp,
14364 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14373 bnxt_del_ntp_filter(bp, fltr);
14382 struct bnxt *bp = netdev_priv(netdev);
14392 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
14398 struct bnxt *bp = netdev_priv(netdev);
14408 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
14436 struct bnxt *bp = netdev_priv(dev);
14438 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
14445 struct bnxt *bp = netdev_priv(dev);
14449 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
14463 if (mode == bp->br_mode)
14466 rc = bnxt_hwrm_set_br_mode(bp, mode);
14468 bp->br_mode = mode;
14477 struct bnxt *bp = netdev_priv(dev);
14479 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
14483 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
14486 ppid->id_len = sizeof(bp->dsn);
14487 memcpy(ppid->id, bp->dsn, ppid->id_len);
14528 struct bnxt *bp = netdev_priv(dev);
14532 cpr = &bp->bnapi[i]->cp_ring;
14551 struct bnxt *bp = netdev_priv(dev);
14555 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
14573 struct bnxt *bp = netdev_priv(dev);
14575 rx->packets = bp->net_stats_prev.rx_packets;
14576 rx->bytes = bp->net_stats_prev.rx_bytes;
14577 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
14579 tx->packets = bp->net_stats_prev.tx_packets;
14580 tx->bytes = bp->net_stats_prev.tx_bytes;
14592 struct bnxt *bp = netdev_priv(dev);
14594 if (BNXT_PF(bp))
14595 bnxt_sriov_disable(bp);
14597 bnxt_rdma_aux_device_uninit(bp);
14599 bnxt_ptp_clear(bp);
14601 bnxt_free_l2_filters(bp, true);
14602 bnxt_free_ntp_fltrs(bp, true);
14603 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14605 cancel_work_sync(&bp->sp_task);
14606 cancel_delayed_work_sync(&bp->fw_reset_task);
14607 bp->sp_event = 0;
14609 bnxt_dl_fw_reporters_destroy(bp);
14610 bnxt_dl_unregister(bp);
14611 bnxt_shutdown_tc(bp);
14613 bnxt_clear_int_mode(bp);
14614 bnxt_hwrm_func_drv_unrgtr(bp);
14615 bnxt_free_hwrm_resources(bp);
14616 bnxt_hwmon_uninit(bp);
14617 bnxt_ethtool_free(bp);
14618 bnxt_dcb_free(bp);
14619 kfree(bp->ptp_cfg);
14620 bp->ptp_cfg = NULL;
14621 kfree(bp->fw_health);
14622 bp->fw_health = NULL;
14623 bnxt_cleanup_pci(bp);
14624 bnxt_free_ctx_mem(bp);
14625 kfree(bp->rss_indir_tbl);
14626 bp->rss_indir_tbl = NULL;
14627 bnxt_free_port_stats(bp);
14631 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
14634 struct bnxt_link_info *link_info = &bp->link_info;
14636 bp->phy_flags = 0;
14637 rc = bnxt_hwrm_phy_qcaps(bp);
14639 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
14643 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
14644 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
14646 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
14650 mutex_lock(&bp->link_lock);
14651 rc = bnxt_update_link(bp, false);
14653 mutex_unlock(&bp->link_lock);
14654 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
14665 bnxt_init_ethtool_link_settings(bp);
14666 mutex_unlock(&bp->link_lock);
14681 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14684 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
14689 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
14690 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
14691 bnxt_get_ulp_msix_num(bp),
14692 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
14693 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14696 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
14700 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14702 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
14705 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
14716 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
14720 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
14726 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
14729 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14734 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14735 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
14737 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
14738 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14741 bp->flags |= BNXT_FLAG_AGG_RINGS;
14744 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
14745 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14746 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14747 bnxt_set_ring_params(bp);
14750 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
14754 max_cp = bnxt_get_max_func_cp_rings(bp);
14755 max_stat = bnxt_get_max_func_stat_ctxs(bp);
14756 max_irq = bnxt_get_max_func_irqs(bp);
14767 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
14777 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
14779 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
14780 bp->rx_nr_rings = bp->cp_nr_rings;
14781 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
14782 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14785 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
14789 if (!bnxt_can_reserve_rings(bp))
14793 bp->flags |= BNXT_FLAG_SHARED_RINGS;
14798 if (bp->port_count > 1) {
14800 max_t(int, num_online_cpus() / bp->port_count, 1);
14804 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
14807 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
14808 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
14810 bnxt_trim_dflt_sh_rings(bp);
14812 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
14813 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14815 rc = __bnxt_reserve_rings(bp);
14817 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
14818 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14820 bnxt_trim_dflt_sh_rings(bp);
14823 if (bnxt_need_reserve_rings(bp)) {
14824 rc = __bnxt_reserve_rings(bp);
14826 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
14827 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14829 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
14830 bp->rx_nr_rings++;
14831 bp->cp_nr_rings++;
14834 bp->tx_nr_rings = 0;
14835 bp->rx_nr_rings = 0;
14840 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
14844 if (bp->tx_nr_rings)
14847 bnxt_ulp_irq_stop(bp);
14848 bnxt_clear_int_mode(bp);
14849 rc = bnxt_set_dflt_rings(bp, true);
14851 if (BNXT_VF(bp) && rc == -ENODEV)
14852 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
14854 netdev_err(bp->dev, "Not enough rings available.\n");
14857 rc = bnxt_init_int_mode(bp);
14861 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14863 bnxt_set_dflt_rfs(bp);
14866 bnxt_ulp_irq_restart(bp, rc);
14870 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
14875 bnxt_hwrm_func_qcaps(bp);
14877 if (netif_running(bp->dev))
14878 __bnxt_close_nic(bp, true, false);
14880 bnxt_ulp_irq_stop(bp);
14881 bnxt_clear_int_mode(bp);
14882 rc = bnxt_init_int_mode(bp);
14883 bnxt_ulp_irq_restart(bp, rc);
14885 if (netif_running(bp->dev)) {
14887 dev_close(bp->dev);
14889 rc = bnxt_open_nic(bp, true, false);
14895 static int bnxt_init_mac_addr(struct bnxt *bp)
14899 if (BNXT_PF(bp)) {
14900 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
14903 struct bnxt_vf_info *vf = &bp->vf;
14908 eth_hw_addr_set(bp->dev, vf->mac_addr);
14914 eth_hw_addr_random(bp->dev);
14916 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
14922 static void bnxt_vpd_read_info(struct bnxt *bp)
14924 struct pci_dev *pdev = bp->pdev;
14941 memcpy(bp->board_partno, &vpd_data[pos], size);
14951 memcpy(bp->board_serialno, &vpd_data[pos], size);
14956 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
14958 struct pci_dev *pdev = bp->pdev;
14963 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
14969 bp->flags |= BNXT_FLAG_DSN_VALID;
14973 static int bnxt_map_db_bar(struct bnxt *bp)
14975 if (!bp->db_size)
14977 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
14978 if (!bp->bar1)
14983 void bnxt_print_device_info(struct bnxt *bp)
14985 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
14986 board_info[bp->board_idx].name,
14987 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
14989 pcie_print_link_status(bp->pdev);
14996 struct bnxt *bp;
15011 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
15016 bp = netdev_priv(dev);
15017 bp->board_idx = ent->driver_data;
15018 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
15019 bnxt_set_max_func_irqs(bp, max_irqs);
15021 if (bnxt_vf_pciid(bp->board_idx))
15022 bp->flags |= BNXT_FLAG_VF;
15025 if (BNXT_PF(bp))
15026 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
15029 bp->flags |= BNXT_FLAG_MSIX_CAP;
15041 rc = bnxt_alloc_hwrm_resources(bp);
15045 mutex_init(&bp->hwrm_cmd_lock);
15046 mutex_init(&bp->link_lock);
15048 rc = bnxt_fw_init_one_p1(bp);
15052 if (BNXT_PF(bp))
15053 bnxt_vpd_read_info(bp);
15055 if (BNXT_CHIP_P5_PLUS(bp)) {
15056 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
15057 if (BNXT_CHIP_P7(bp))
15058 bp->flags |= BNXT_FLAG_CHIP_P7;
15061 rc = bnxt_alloc_rss_indir_tbl(bp);
15065 rc = bnxt_fw_init_one_p2(bp);
15069 rc = bnxt_map_db_bar(bp);
15083 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15086 if (BNXT_SUPPORTS_TPA(bp))
15095 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15097 if (bp->flags & BNXT_FLAG_CHIP_P7)
15105 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
15107 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
15109 if (BNXT_SUPPORTS_TPA(bp))
15122 init_waitqueue_head(&bp->sriov_cfg_wait);
15124 if (BNXT_SUPPORTS_TPA(bp)) {
15125 bp->gro_func = bnxt_gro_func_5730x;
15126 if (BNXT_CHIP_P4(bp))
15127 bp->gro_func = bnxt_gro_func_5731x;
15128 else if (BNXT_CHIP_P5_PLUS(bp))
15129 bp->gro_func = bnxt_gro_func_5750x;
15131 if (!BNXT_CHIP_P4_PLUS(bp))
15132 bp->flags |= BNXT_FLAG_DOUBLE_DB;
15134 rc = bnxt_init_mac_addr(bp);
15141 if (BNXT_PF(bp)) {
15143 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
15148 dev->max_mtu = bp->max_mtu;
15150 rc = bnxt_probe_phy(bp, true);
15154 hw_resc = &bp->hw_resc;
15155 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
15158 if (bp->max_fltr < BNXT_MAX_FLTR)
15159 bp->max_fltr = BNXT_MAX_FLTR;
15160 bnxt_init_l2_fltr_tbl(bp);
15161 bnxt_set_rx_skb_mode(bp, false);
15162 bnxt_set_tpa_flags(bp);
15163 bnxt_set_ring_params(bp);
15164 rc = bnxt_set_dflt_rings(bp, true);
15166 if (BNXT_VF(bp) && rc == -ENODEV) {
15167 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15169 netdev_err(bp->dev, "Not enough rings available.\n");
15175 bnxt_fw_init_one_p3(bp);
15177 bnxt_init_dflt_coal(bp);
15180 bp->flags |= BNXT_FLAG_STRIP_VLAN;
15182 rc = bnxt_init_int_mode(bp);
15189 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15191 if (BNXT_PF(bp)) {
15201 rc = bnxt_init_tc(bp);
15207 bnxt_inv_fw_health_reg(bp);
15208 rc = bnxt_dl_register(bp);
15212 INIT_LIST_HEAD(&bp->usr_fltr_list);
15218 bnxt_dl_fw_reporters_create(bp);
15220 bnxt_rdma_aux_device_init(bp);
15222 bnxt_print_device_info(bp);
15228 bnxt_dl_unregister(bp);
15230 bnxt_shutdown_tc(bp);
15231 bnxt_clear_int_mode(bp);
15234 bnxt_hwrm_func_drv_unrgtr(bp);
15235 bnxt_free_hwrm_resources(bp);
15236 bnxt_hwmon_uninit(bp);
15237 bnxt_ethtool_free(bp);
15238 bnxt_ptp_clear(bp);
15239 kfree(bp->ptp_cfg);
15240 bp->ptp_cfg = NULL;
15241 kfree(bp->fw_health);
15242 bp->fw_health = NULL;
15243 bnxt_cleanup_pci(bp);
15244 bnxt_free_ctx_mem(bp);
15245 kfree(bp->rss_indir_tbl);
15246 bp->rss_indir_tbl = NULL;
15256 struct bnxt *bp;
15262 bp = netdev_priv(dev);
15263 if (!bp)
15269 bnxt_clear_int_mode(bp);
15273 pci_wake_from_d3(pdev, bp->wol);
15285 struct bnxt *bp = netdev_priv(dev);
15289 bnxt_ulp_stop(bp);
15294 bnxt_hwrm_func_drv_unrgtr(bp);
15295 pci_disable_device(bp->pdev);
15296 bnxt_free_ctx_mem(bp);
15304 struct bnxt *bp = netdev_priv(dev);
15308 rc = pci_enable_device(bp->pdev);
15314 pci_set_master(bp->pdev);
15315 if (bnxt_hwrm_ver_get(bp)) {
15319 rc = bnxt_hwrm_func_reset(bp);
15325 rc = bnxt_hwrm_func_qcaps(bp);
15329 bnxt_clear_reservations(bp, true);
15331 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
15336 bnxt_get_wol_settings(bp);
15344 bnxt_ulp_start(bp, rc);
15346 bnxt_reenable_sriov(bp);
15372 struct bnxt *bp = netdev_priv(netdev);
15380 bnxt_ulp_stop(bp);
15382 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
15383 netdev_err(bp->dev, "Firmware reset already in progress\n");
15397 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
15398 bnxt_fw_fatal_close(bp);
15402 __bnxt_close_nic(bp, true, true);
15406 bnxt_free_ctx_mem(bp);
15426 struct bnxt *bp = netdev_priv(netdev);
15431 netdev_info(bp->dev, "PCI Slot Reset\n");
15449 &bp->state)) {
15452 pci_write_config_dword(bp->pdev, off, 0);
15457 bnxt_inv_fw_health_reg(bp);
15458 bnxt_try_map_fw_health_reg(bp);
15464 err = bnxt_try_recover_fw(bp);
15475 err = bnxt_hwrm_func_reset(bp);
15479 bnxt_ulp_irq_stop(bp);
15480 bnxt_clear_int_mode(bp);
15481 err = bnxt_init_int_mode(bp);
15482 bnxt_ulp_irq_restart(bp, err);
15486 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
15487 bnxt_clear_reservations(bp, true);
15503 struct bnxt *bp = netdev_priv(netdev);
15506 netdev_info(bp->dev, "PCI Slot Resume\n");
15509 err = bnxt_hwrm_func_qcaps(bp);
15513 bnxt_ulp_start(bp, err);
15515 bnxt_reenable_sriov(bp);