Lines Matching refs:cpr

989 				       struct bnxt_cp_ring_info *cpr,
996 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1009 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1012 struct bnxt_napi *bnapi = cpr->bnapi;
1033 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1175 struct bnxt_cp_ring_info *cpr,
1180 struct bnxt_napi *bnapi = cpr->bnapi;
1201 agg = bnxt_get_agg(bp, cpr, idx, i);
1231 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1246 struct bnxt_cp_ring_info *cpr,
1253 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1268 struct bnxt_cp_ring_info *cpr,
1278 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1288 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1297 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1359 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1382 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1529 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1532 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1743 struct bnxt_cp_ring_info *cpr,
1749 struct bnxt_napi *bnapi = cpr->bnapi;
1762 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1790 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1805 bnxt_abort_tpa(cpr, idx, agg_bufs);
1815 bnxt_abort_tpa(cpr, idx, agg_bufs);
1816 cpr->sw_stats->rx.rx_oom_discards += 1;
1825 bnxt_abort_tpa(cpr, idx, agg_bufs);
1826 cpr->sw_stats->rx.rx_oom_discards += 1;
1841 bnxt_abort_tpa(cpr, idx, agg_bufs);
1842 cpr->sw_stats->rx.rx_oom_discards += 1;
1850 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1853 cpr->sw_stats->rx.rx_oom_discards += 1;
1998 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2001 struct bnxt_napi *bnapi = cpr->bnapi;
2021 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2033 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2054 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2072 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2092 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2106 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2129 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2154 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2176 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2251 cpr->rx_packets += 1;
2252 cpr->rx_bytes += len;
2264 cpr->sw_stats->rx.rx_oom_discards += 1;
2273 struct bnxt_cp_ring_info *cpr,
2285 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2290 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2311 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2313 cpr->sw_stats->rx.rx_netpoll_discards += 1;
2813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2814 u32 cons = RING_CMP(cpr->cp_raw_cons);
2816 cpr->event_ctr++;
2817 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2822 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2824 u32 raw_cons = cpr->cp_raw_cons;
2828 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2837 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2838 u32 cons = RING_CMP(cpr->cp_raw_cons);
2841 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2843 if (!bnxt_has_work(bp, cpr)) {
2846 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2851 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2861 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2864 struct bnxt_napi *bnapi = cpr->bnapi;
2865 u32 raw_cons = cpr->cp_raw_cons;
2871 cpr->has_more_work = 0;
2872 cpr->had_work_done = 1;
2878 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2907 cpr->has_more_work = 1;
2913 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2915 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2936 cpr->has_more_work = 1;
2954 cpr->cp_raw_cons = raw_cons;
2978 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2981 struct bnxt_napi *bnapi = cpr->bnapi;
2984 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2990 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3005 u32 raw_cons = cpr->cp_raw_cons;
3014 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3027 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3036 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3056 cpr->cp_raw_cons = raw_cons;
3057 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3065 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3067 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3076 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3084 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3088 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3092 if (!bnxt_has_work(bp, cpr)) {
3094 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3101 dim_update_sample(cpr->event_ctr,
3102 cpr->rx_packets,
3103 cpr->rx_bytes,
3105 net_dim(&cpr->dim, dim_sample);
3112 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3115 for (i = 0; i < cpr->cp_ring_count; i++) {
3116 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3121 cpr->has_more_work |= cpr2->has_more_work;
3130 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3133 for (i = 0; i < cpr->cp_ring_count; i++) {
3134 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3158 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3160 u32 raw_cons = cpr->cp_raw_cons;
3170 if (cpr->has_more_work) {
3171 cpr->has_more_work = 0;
3178 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3181 if (cpr->has_more_work)
3186 cpr->cp_raw_cons = raw_cons;
3188 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3189 cpr->cp_raw_cons);
3210 cpr2 = &cpr->cp_ring_arr[idx];
3215 cpr->has_more_work |= cpr2->has_more_work;
3222 if (raw_cons != cpr->cp_raw_cons) {
3223 cpr->cp_raw_cons = raw_cons;
3224 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3227 cpr_rx = &cpr->cp_ring_arr[0];
3232 dim_update_sample(cpr->event_ctr,
3236 net_dim(&cpr->dim, dim_sample);
3779 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3781 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3783 kfree(cpr->cp_desc_ring);
3784 cpr->cp_desc_ring = NULL;
3786 kfree(cpr->cp_desc_mapping);
3787 cpr->cp_desc_mapping = NULL;
3791 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3793 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3794 if (!cpr->cp_desc_ring)
3796 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3798 if (!cpr->cp_desc_mapping)
3844 struct bnxt_cp_ring_info *cpr;
3851 cpr = &bnapi->cp_ring;
3852 ring = &cpr->cp_ring_struct;
3856 if (!cpr->cp_ring_arr)
3859 for (j = 0; j < cpr->cp_ring_count; j++) {
3860 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
3866 kfree(cpr->cp_ring_arr);
3867 cpr->cp_ring_arr = NULL;
3868 cpr->cp_ring_count = 0;
3873 struct bnxt_cp_ring_info *cpr)
3879 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3881 bnxt_free_cp_arrays(cpr);
3884 ring = &cpr->cp_ring_struct;
3888 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3889 rmem->dma_arr = cpr->cp_desc_mapping;
3894 bnxt_free_cp_arrays(cpr);
3910 struct bnxt_cp_ring_info *cpr, *cpr2;
3918 cpr = &bnapi->cp_ring;
3919 cpr->bnapi = bnapi;
3920 ring = &cpr->cp_ring_struct;
3944 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
3946 if (!cpr->cp_ring_arr)
3948 cpr->cp_ring_count = cp_count;
3951 cpr2 = &cpr->cp_ring_arr[k];
3956 cpr2->sw_stats = cpr->sw_stats;
3982 struct bnxt_cp_ring_info *cpr;
3990 cpr = &bnapi->cp_ring;
3991 ring = &cpr->cp_ring_struct;
3995 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3996 rmem->dma_arr = cpr->cp_desc_mapping;
4149 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4150 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4153 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4154 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4155 if (!cpr->cp_ring_arr)
4157 for (j = 0; j < cpr->cp_ring_count; j++) {
4158 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4708 struct bnxt_cp_ring_info *cpr;
4716 cpr = &bnapi->cp_ring;
4717 stats = &cpr->stats;
4795 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4797 bnxt_free_stats_mem(bp, &cpr->stats);
4799 kfree(cpr->sw_stats);
4800 cpr->sw_stats = NULL;
4813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4815 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
4816 if (!cpr->sw_stats)
4819 cpr->stats.len = size;
4820 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4824 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4880 struct bnxt_cp_ring_info *cpr;
4887 cpr = &bnapi->cp_ring;
4888 cpr->cp_raw_cons = 0;
5073 struct bnxt_cp_ring_info *cpr =
5076 cpr->cp_ring_struct.ring_mem.flags =
5206 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5207 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5210 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5217 struct bnxt_cp_ring_info *cpr;
5219 cpr = &bnapi->cp_ring;
5220 return cpr->cp_ring_struct.map_idx;
5247 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5249 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
6876 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6877 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
6888 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
6889 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
7090 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7094 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7095 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7105 ring = &cpr->cp_ring_struct;
7755 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7768 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
7782 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7791 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
7792 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
7913 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7915 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
7916 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
7923 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
7950 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7952 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
7958 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
7960 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
9594 struct bnxt_cp_ring_info *cpr;
9597 cpr = &bnapi->cp_ring;
9598 stats = &cpr->stats;
10813 struct bnxt_cp_ring_info *cpr;
10815 cpr = &bnapi->cp_ring;
10817 cpr->sw_stats->tx.tx_resets++;
10819 cpr->sw_stats->rx.rx_resets++;
10822 cancel_work_sync(&cpr->dim.work);
10833 struct bnxt_cp_ring_info *cpr;
10837 cpr = &bnapi->cp_ring;
10841 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
10842 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
12316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12317 u64 *sw = cpr->stats.sw_stats;
12343 cpr->sw_stats->rx.rx_netpoll_discards +
12344 cpr->sw_stats->rx.rx_oom_discards;
12409 struct bnxt_cp_ring_info *cpr)
12411 struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
12412 u64 *hw_stats = cpr->stats.sw_stats;
13038 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
13042 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
13065 struct bnxt_cp_ring_info *cpr;
13073 cpr = &bnapi->cp_ring;
13074 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
13221 struct bnxt_cp_ring_info *cpr;
13244 cpr = &rxr->bnapi->cp_ring;
13245 cpr->sw_stats->rx.rx_resets++;
13435 struct bnxt_cp_ring_info *cpr;
13442 cpr = &bnapi->cp_ring;
13443 for (j = 0; j < cpr->cp_ring_count; j++) {
13444 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
13458 cpr->sw_stats->cmn.missed_irqs++;
14763 struct bnxt_cp_ring_info *cpr;
14766 cpr = &bp->bnapi[i]->cp_ring;
14767 sw = cpr->stats.sw_stats;
14779 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;